1 /*
   2  * mr_sas.c: source for mr_sas driver
   3  *
   4  * Solaris MegaRAID device driver for SAS2.0 controllers
   5  * Copyright (c) 2008-2012, LSI Logic Corporation.
   6  * All rights reserved.
   7  *
   8  * Version:
   9  * Author:
  10  *              Swaminathan K S
  11  *              Arun Chandrashekhar
  12  *              Manju R
  13  *              Rasheed
  14  *              Shakeel Bukhari
  15  *
  16  * Redistribution and use in source and binary forms, with or without
  17  * modification, are permitted provided that the following conditions are met:
  18  *
  19  * 1. Redistributions of source code must retain the above copyright notice,
  20  *    this list of conditions and the following disclaimer.
  21  *
  22  * 2. Redistributions in binary form must reproduce the above copyright notice,
  23  *    this list of conditions and the following disclaimer in the documentation
  24  *    and/or other materials provided with the distribution.
  25  *
  26  * 3. Neither the name of the author nor the names of its contributors may be
  27  *    used to endorse or promote products derived from this software without
  28  *    specific prior written permission.
  29  *
  30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  31  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  32  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  33  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  34  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  35  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
  36  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
  37  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
  38  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  39  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
  40  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  41  * DAMAGE.
  42  */
  43 
  44 /*
  45  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
  46  * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
  47  * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
  48  * Copyright 2015, 2017 Citrus IT Limited. All rights reserved.
  49  * Copyright 2015 Garrett D'Amore <garrett@damore.org>
  50  */
  51 
  52 #include <sys/types.h>
  53 #include <sys/param.h>
  54 #include <sys/file.h>
  55 #include <sys/errno.h>
  56 #include <sys/open.h>
  57 #include <sys/cred.h>
  58 #include <sys/modctl.h>
  59 #include <sys/conf.h>
  60 #include <sys/devops.h>
  61 #include <sys/cmn_err.h>
  62 #include <sys/kmem.h>
  63 #include <sys/stat.h>
  64 #include <sys/mkdev.h>
  65 #include <sys/pci.h>
  66 #include <sys/scsi/scsi.h>
  67 #include <sys/ddi.h>
  68 #include <sys/sunddi.h>
  69 #include <sys/atomic.h>
  70 #include <sys/signal.h>
  71 #include <sys/byteorder.h>
  72 #include <sys/sdt.h>
  73 #include <sys/fs/dv_node.h>       /* devfs_clean */
  74 
  75 #include "mr_sas.h"
  76 
  77 /*
  78  * FMA header files
  79  */
  80 #include <sys/ddifm.h>
  81 #include <sys/fm/protocol.h>
  82 #include <sys/fm/util.h>
  83 #include <sys/fm/io/ddi.h>
  84 
  85 /* Macros to help Skinny and stock 2108/MFI live together. */
  86 #define WR_IB_PICK_QPORT(addr, instance) \
  87         if ((instance)->skinny) { \
  88                 WR_IB_LOW_QPORT((addr), (instance)); \
  89                 WR_IB_HIGH_QPORT(0, (instance)); \
  90         } else { \
  91                 WR_IB_QPORT((addr), (instance)); \
  92         }
  93 
  94 /*
  95  * Local static data
  96  */
  97 static void     *mrsas_state = NULL;
  98 static volatile boolean_t       mrsas_relaxed_ordering = B_TRUE;
  99 volatile int    debug_level_g = CL_NONE;
 100 static volatile int     msi_enable = 1;
 101 static volatile int     ctio_enable = 1;
 102 
 103 /* Default Timeout value to issue online controller reset */
 104 volatile int  debug_timeout_g  = 0xF0;          /* 0xB4; */
 105 /* Simulate consecutive firmware fault */
 106 static volatile int  debug_fw_faults_after_ocr_g  = 0;
 107 #ifdef OCRDEBUG
 108 /* Simulate three consecutive timeout for an IO */
 109 static volatile int  debug_consecutive_timeout_after_ocr_g  = 0;
 110 #endif
 111 
 112 #pragma weak scsi_hba_open
 113 #pragma weak scsi_hba_close
 114 #pragma weak scsi_hba_ioctl
 115 
 116 /* Local static prototypes. */
 117 static int      mrsas_getinfo(dev_info_t *, ddi_info_cmd_t,  void *, void **);
 118 static int      mrsas_attach(dev_info_t *, ddi_attach_cmd_t);
 119 #ifdef __sparc
 120 static int      mrsas_reset(dev_info_t *, ddi_reset_cmd_t);
 121 #else
 122 static int      mrsas_quiesce(dev_info_t *);
 123 #endif
 124 static int      mrsas_detach(dev_info_t *, ddi_detach_cmd_t);
 125 static int      mrsas_open(dev_t *, int, int, cred_t *);
 126 static int      mrsas_close(dev_t, int, int, cred_t *);
 127 static int      mrsas_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
 128 
 129 static int      mrsas_tran_tgt_init(dev_info_t *, dev_info_t *,
 130                     scsi_hba_tran_t *, struct scsi_device *);
 131 static struct scsi_pkt *mrsas_tran_init_pkt(struct scsi_address *, register
 132                     struct scsi_pkt *, struct buf *, int, int, int, int,
 133                     int (*)(), caddr_t);
 134 static int      mrsas_tran_start(struct scsi_address *,
 135                     register struct scsi_pkt *);
 136 static int      mrsas_tran_abort(struct scsi_address *, struct scsi_pkt *);
 137 static int      mrsas_tran_reset(struct scsi_address *, int);
 138 static int      mrsas_tran_getcap(struct scsi_address *, char *, int);
 139 static int      mrsas_tran_setcap(struct scsi_address *, char *, int, int);
 140 static void     mrsas_tran_destroy_pkt(struct scsi_address *,
 141                     struct scsi_pkt *);
 142 static void     mrsas_tran_dmafree(struct scsi_address *, struct scsi_pkt *);
 143 static void     mrsas_tran_sync_pkt(struct scsi_address *, struct scsi_pkt *);
 144 static int      mrsas_tran_quiesce(dev_info_t *dip);
 145 static int      mrsas_tran_unquiesce(dev_info_t *dip);
 146 static uint_t   mrsas_isr();
 147 static uint_t   mrsas_softintr();
 148 static void     mrsas_undo_resources(dev_info_t *, struct mrsas_instance *);
 149 
 150 static void     free_space_for_mfi(struct mrsas_instance *);
 151 static uint32_t read_fw_status_reg_ppc(struct mrsas_instance *);
 152 static void     issue_cmd_ppc(struct mrsas_cmd *, struct mrsas_instance *);
 153 static int      issue_cmd_in_poll_mode_ppc(struct mrsas_instance *,
 154                     struct mrsas_cmd *);
 155 static int      issue_cmd_in_sync_mode_ppc(struct mrsas_instance *,
 156                     struct mrsas_cmd *);
 157 static void     enable_intr_ppc(struct mrsas_instance *);
 158 static void     disable_intr_ppc(struct mrsas_instance *);
 159 static int      intr_ack_ppc(struct mrsas_instance *);
 160 static void     flush_cache(struct mrsas_instance *instance);
 161 void    display_scsi_inquiry(caddr_t);
 162 static int      start_mfi_aen(struct mrsas_instance *instance);
 163 static int      handle_drv_ioctl(struct mrsas_instance *instance,
 164                     struct mrsas_ioctl *ioctl, int mode);
 165 static int      handle_mfi_ioctl(struct mrsas_instance *instance,
 166                     struct mrsas_ioctl *ioctl, int mode);
 167 static int      handle_mfi_aen(struct mrsas_instance *instance,
 168                     struct mrsas_aen *aen);
 169 static struct mrsas_cmd *build_cmd(struct mrsas_instance *,
 170     struct scsi_address *, struct scsi_pkt *, uchar_t *);
 171 static int      alloc_additional_dma_buffer(struct mrsas_instance *);
 172 static void     complete_cmd_in_sync_mode(struct mrsas_instance *,
 173                 struct mrsas_cmd *);
 174 static int      mrsas_kill_adapter(struct mrsas_instance *);
 175 static int      mrsas_issue_init_mfi(struct mrsas_instance *);
 176 static int      mrsas_reset_ppc(struct mrsas_instance *);
 177 static uint32_t mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *);
 178 static int      wait_for_outstanding(struct mrsas_instance *instance);
 179 static int      register_mfi_aen(struct mrsas_instance *instance,
 180                     uint32_t seq_num, uint32_t class_locale_word);
 181 static int      issue_mfi_pthru(struct mrsas_instance *instance, struct
 182                     mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
 183 static int      issue_mfi_dcmd(struct mrsas_instance *instance, struct
 184                     mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
 185 static int      issue_mfi_smp(struct mrsas_instance *instance, struct
 186                     mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
 187 static int      issue_mfi_stp(struct mrsas_instance *instance, struct
 188                     mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
 189 static int      abort_aen_cmd(struct mrsas_instance *instance,
 190                     struct mrsas_cmd *cmd_to_abort);
 191 
 192 static void     mrsas_rem_intrs(struct mrsas_instance *instance);
 193 static int      mrsas_add_intrs(struct mrsas_instance *instance, int intr_type);
 194 
 195 static void     mrsas_tran_tgt_free(dev_info_t *, dev_info_t *,
 196                     scsi_hba_tran_t *, struct scsi_device *);
 197 static int      mrsas_tran_bus_config(dev_info_t *, uint_t,
 198                     ddi_bus_config_op_t, void *, dev_info_t **);
 199 static int      mrsas_parse_devname(char *, int *, int *);
 200 static int      mrsas_config_all_devices(struct mrsas_instance *);
 201 static int      mrsas_config_ld(struct mrsas_instance *, uint16_t,
 202                         uint8_t, dev_info_t **);
 203 static int      mrsas_name_node(dev_info_t *, char *, int);
 204 static void     mrsas_issue_evt_taskq(struct mrsas_eventinfo *);
 205 static void     free_additional_dma_buffer(struct mrsas_instance *);
 206 static void io_timeout_checker(void *);
 207 static void mrsas_fm_init(struct mrsas_instance *);
 208 static void mrsas_fm_fini(struct mrsas_instance *);
 209 
 210 static struct mrsas_function_template mrsas_function_template_ppc = {
 211         .read_fw_status_reg = read_fw_status_reg_ppc,
 212         .issue_cmd = issue_cmd_ppc,
 213         .issue_cmd_in_sync_mode = issue_cmd_in_sync_mode_ppc,
 214         .issue_cmd_in_poll_mode = issue_cmd_in_poll_mode_ppc,
 215         .enable_intr = enable_intr_ppc,
 216         .disable_intr = disable_intr_ppc,
 217         .intr_ack = intr_ack_ppc,
 218         .init_adapter = mrsas_init_adapter_ppc
 219 };
 220 
 221 
 222 static struct mrsas_function_template mrsas_function_template_fusion = {
 223         .read_fw_status_reg = tbolt_read_fw_status_reg,
 224         .issue_cmd = tbolt_issue_cmd,
 225         .issue_cmd_in_sync_mode = tbolt_issue_cmd_in_sync_mode,
 226         .issue_cmd_in_poll_mode = tbolt_issue_cmd_in_poll_mode,
 227         .enable_intr = tbolt_enable_intr,
 228         .disable_intr = tbolt_disable_intr,
 229         .intr_ack = tbolt_intr_ack,
 230         .init_adapter = mrsas_init_adapter_tbolt
 231 };
 232 
 233 
 234 ddi_dma_attr_t mrsas_generic_dma_attr = {
 235         DMA_ATTR_V0,            /* dma_attr_version */
 236         0,                      /* low DMA address range */
 237         0xFFFFFFFFU,            /* high DMA address range */
 238         0xFFFFFFFFU,            /* DMA counter register  */
 239         8,                      /* DMA address alignment */
 240         0x07,                   /* DMA burstsizes  */
 241         1,                      /* min DMA size */
 242         0xFFFFFFFFU,            /* max DMA size */
 243         0xFFFFFFFFU,            /* segment boundary */
 244         MRSAS_MAX_SGE_CNT,      /* dma_attr_sglen */
 245         512,                    /* granularity of device */
 246         0                       /* bus specific DMA flags */
 247 };
 248 
 249 int32_t mrsas_max_cap_maxxfer = 0x1000000;
 250 
 251 /*
 252  * Fix for: Thunderbolt controller IO timeout when IO write size is 1MEG,
 253  * Limit size to 256K
 254  */
 255 uint32_t mrsas_tbolt_max_cap_maxxfer = (512 * 512);
 256 
 257 /*
 258  * cb_ops contains base level routines
 259  */
 260 static struct cb_ops mrsas_cb_ops = {
 261         mrsas_open,             /* open */
 262         mrsas_close,            /* close */
 263         nodev,                  /* strategy */
 264         nodev,                  /* print */
 265         nodev,                  /* dump */
 266         nodev,                  /* read */
 267         nodev,                  /* write */
 268         mrsas_ioctl,            /* ioctl */
 269         nodev,                  /* devmap */
 270         nodev,                  /* mmap */
 271         nodev,                  /* segmap */
 272         nochpoll,               /* poll */
 273         nodev,                  /* cb_prop_op */
 274         0,                      /* streamtab  */
 275         D_NEW | D_HOTPLUG,      /* cb_flag */
 276         CB_REV,                 /* cb_rev */
 277         nodev,                  /* cb_aread */
 278         nodev                   /* cb_awrite */
 279 };
 280 
 281 /*
 282  * dev_ops contains configuration routines
 283  */
 284 static struct dev_ops mrsas_ops = {
 285         DEVO_REV,               /* rev, */
 286         0,                      /* refcnt */
 287         mrsas_getinfo,          /* getinfo */
 288         nulldev,                /* identify */
 289         nulldev,                /* probe */
 290         mrsas_attach,           /* attach */
 291         mrsas_detach,           /* detach */
 292 #ifdef  __sparc
 293         mrsas_reset,            /* reset */
 294 #else   /* __sparc */
 295         nodev,
 296 #endif  /* __sparc */
 297         &mrsas_cb_ops,              /* char/block ops */
 298         NULL,                   /* bus ops */
 299         NULL,                   /* power */
 300 #ifdef __sparc
 301         ddi_quiesce_not_needed
 302 #else   /* __sparc */
 303         mrsas_quiesce   /* quiesce */
 304 #endif  /* __sparc */
 305 };
 306 
 307 static struct modldrv modldrv = {
 308         &mod_driverops,             /* module type - driver */
 309         MRSAS_VERSION,
 310         &mrsas_ops,         /* driver ops */
 311 };
 312 
 313 static struct modlinkage modlinkage = {
 314         MODREV_1,       /* ml_rev - must be MODREV_1 */
 315         &modldrv,   /* ml_linkage */
 316         NULL            /* end of driver linkage */
 317 };
 318 
 319 static struct ddi_device_acc_attr endian_attr = {
 320         DDI_DEVICE_ATTR_V1,
 321         DDI_STRUCTURE_LE_ACC,
 322         DDI_STRICTORDER_ACC,
 323         DDI_DEFAULT_ACC
 324 };
 325 
 326 /* Use the LSI Fast Path for the 2208 (tbolt) commands. */
 327 unsigned int enable_fp = 1;
 328 
 329 
 330 /*
 331  * ************************************************************************** *
 332  *                                                                            *
 333  *         common entry points - for loadable kernel modules                  *
 334  *                                                                            *
 335  * ************************************************************************** *
 336  */
 337 
 338 /*
 339  * _init - initialize a loadable module
 340  * @void
 341  *
 342  * The driver should perform any one-time resource allocation or data
 343  * initialization during driver loading in _init(). For example, the driver
 344  * should initialize any mutexes global to the driver in this routine.
 345  * The driver should not, however, use _init() to allocate or initialize
 346  * anything that has to do with a particular instance of the device.
 347  * Per-instance initialization must be done in attach().
 348  */
 349 int
 350 _init(void)
 351 {
 352         int ret;
 353 
 354         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
 355 
 356         ret = ddi_soft_state_init(&mrsas_state,
 357             sizeof (struct mrsas_instance), 0);
 358 
 359         if (ret != DDI_SUCCESS) {
 360                 cmn_err(CE_WARN, "mr_sas: could not init state");
 361                 return (ret);
 362         }
 363 
 364         if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) {
 365                 cmn_err(CE_WARN, "mr_sas: could not init scsi hba");
 366                 ddi_soft_state_fini(&mrsas_state);
 367                 return (ret);
 368         }
 369 
 370         ret = mod_install(&modlinkage);
 371 
 372         if (ret != DDI_SUCCESS) {
 373                 cmn_err(CE_WARN, "mr_sas: mod_install failed");
 374                 scsi_hba_fini(&modlinkage);
 375                 ddi_soft_state_fini(&mrsas_state);
 376         }
 377 
 378         return (ret);
 379 }
 380 
 381 /*
 382  * _info - returns information about a loadable module.
 383  * @void
 384  *
 385  * _info() is called to return module information. This is a typical entry
 386  * point that does predefined role. It simply calls mod_info().
 387  */
 388 int
 389 _info(struct modinfo *modinfop)
 390 {
 391         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
 392 
 393         return (mod_info(&modlinkage, modinfop));
 394 }
 395 
 396 /*
 397  * _fini - prepare a loadable module for unloading
 398  * @void
 399  *
 400  * In _fini(), the driver should release any resources that were allocated in
 401  * _init(). The driver must remove itself from the system module list.
 402  */
 403 int
 404 _fini(void)
 405 {
 406         int ret;
 407 
 408         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
 409 
 410         if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS) {
 411                 con_log(CL_ANN1,
 412                     (CE_WARN, "_fini: mod_remove() failed, error 0x%X", ret));
 413                 return (ret);
 414         }
 415 
 416         scsi_hba_fini(&modlinkage);
 417         con_log(CL_DLEVEL1, (CE_NOTE, "_fini: scsi_hba_fini() done."));
 418 
 419         ddi_soft_state_fini(&mrsas_state);
 420         con_log(CL_DLEVEL1, (CE_NOTE, "_fini: ddi_soft_state_fini() done."));
 421 
 422         return (ret);
 423 }
 424 
 425 
 426 /*
 427  * ************************************************************************** *
 428  *                                                                            *
 429  *               common entry points - for autoconfiguration                  *
 430  *                                                                            *
 431  * ************************************************************************** *
 432  */
 433 /*
 434  * attach - adds a device to the system as part of initialization
 435  * @dip:
 436  * @cmd:
 437  *
 438  * The kernel calls a driver's attach() entry point to attach an instance of
 439  * a device (for MegaRAID, it is instance of a controller) or to resume
 440  * operation for an instance of a device that has been suspended or has been
 441  * shut down by the power management framework
 442  * The attach() entry point typically includes the following types of
 443  * processing:
 444  * - allocate a soft-state structure for the device instance (for MegaRAID,
 445  *   controller instance)
 446  * - initialize per-instance mutexes
 447  * - initialize condition variables
 448  * - register the device's interrupts (for MegaRAID, controller's interrupts)
 449  * - map the registers and memory of the device instance (for MegaRAID,
 450  *   controller instance)
 451  * - create minor device nodes for the device instance (for MegaRAID,
 452  *   controller instance)
 453  * - report that the device instance (for MegaRAID, controller instance) has
 454  *   attached
 455  */
 456 static int
 457 mrsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
 458 {
 459         int             instance_no;
 460         int             nregs;
 461         int             i = 0;
 462         uint8_t         irq;
 463         uint16_t        vendor_id;
 464         uint16_t        device_id;
 465         uint16_t        subsysvid;
 466         uint16_t        subsysid;
 467         uint16_t        command;
 468         off_t           reglength = 0;
 469         int             intr_types = 0;
 470         char            *data;
 471 
 472         scsi_hba_tran_t         *tran;
 473         ddi_dma_attr_t  tran_dma_attr;
 474         struct mrsas_instance   *instance;
 475 
 476         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
 477 
 478         /* CONSTCOND */
 479         ASSERT(NO_COMPETING_THREADS);
 480 
 481         instance_no = ddi_get_instance(dip);
 482 
 483         /*
 484          * check to see whether this device is in a DMA-capable slot.
 485          */
 486         if (ddi_slaveonly(dip) == DDI_SUCCESS) {
 487                 dev_err(dip, CE_WARN, "Device in slave-only slot, unused");
 488                 return (DDI_FAILURE);
 489         }
 490 
 491         switch (cmd) {
 492         case DDI_ATTACH:
 493                 /* allocate the soft state for the instance */
 494                 if (ddi_soft_state_zalloc(mrsas_state, instance_no)
 495                     != DDI_SUCCESS) {
 496                         dev_err(dip, CE_WARN, "Failed to allocate soft state");
 497                         return (DDI_FAILURE);
 498                 }
 499 
 500                 instance = (struct mrsas_instance *)ddi_get_soft_state
 501                     (mrsas_state, instance_no);
 502 
 503                 if (instance == NULL) {
 504                         dev_err(dip, CE_WARN, "Bad soft state");
 505                         ddi_soft_state_free(mrsas_state, instance_no);
 506                         return (DDI_FAILURE);
 507                 }
 508 
 509                 instance->unroll.softs       = 1;
 510 
 511                 /* Setup the PCI configuration space handles */
 512                 if (pci_config_setup(dip, &instance->pci_handle) !=
 513                     DDI_SUCCESS) {
 514                         dev_err(dip, CE_WARN, "pci config setup failed");
 515 
 516                         ddi_soft_state_free(mrsas_state, instance_no);
 517                         return (DDI_FAILURE);
 518                 }
 519 
 520                 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
 521                         dev_err(dip, CE_WARN, "Failed to get registers");
 522 
 523                         pci_config_teardown(&instance->pci_handle);
 524                         ddi_soft_state_free(mrsas_state, instance_no);
 525                         return (DDI_FAILURE);
 526                 }
 527 
 528                 vendor_id = pci_config_get16(instance->pci_handle,
 529                     PCI_CONF_VENID);
 530                 device_id = pci_config_get16(instance->pci_handle,
 531                     PCI_CONF_DEVID);
 532 
 533                 subsysvid = pci_config_get16(instance->pci_handle,
 534                     PCI_CONF_SUBVENID);
 535                 subsysid = pci_config_get16(instance->pci_handle,
 536                     PCI_CONF_SUBSYSID);
 537 
 538                 pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
 539                     (pci_config_get16(instance->pci_handle,
 540                     PCI_CONF_COMM) | PCI_COMM_ME));
 541                 irq = pci_config_get8(instance->pci_handle,
 542                     PCI_CONF_ILINE);
 543 
 544                 dev_err(dip, CE_CONT,
 545                     "?0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s\n",
 546                     vendor_id, device_id, subsysvid,
 547                     subsysid, irq, MRSAS_VERSION);
 548 
 549                 /* enable bus-mastering */
 550                 command = pci_config_get16(instance->pci_handle,
 551                     PCI_CONF_COMM);
 552 
 553                 if (!(command & PCI_COMM_ME)) {
 554                         command |= PCI_COMM_ME;
 555 
 556                         pci_config_put16(instance->pci_handle,
 557                             PCI_CONF_COMM, command);
 558 
 559                         con_log(CL_ANN, (CE_CONT, "mr_sas%d: "
 560                             "enable bus-mastering", instance_no));
 561                 } else {
 562                         con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
 563                             "bus-mastering already set", instance_no));
 564                 }
 565 
 566                 /* initialize function pointers */
 567                 switch (device_id) {
 568                 case PCI_DEVICE_ID_LSI_INVADER:
 569                 case PCI_DEVICE_ID_LSI_FURY:
 570                 case PCI_DEVICE_ID_LSI_INTRUDER:
 571                 case PCI_DEVICE_ID_LSI_INTRUDER_24:
 572                 case PCI_DEVICE_ID_LSI_CUTLASS_52:
 573                 case PCI_DEVICE_ID_LSI_CUTLASS_53:
 574                         dev_err(dip, CE_CONT, "?Gen3 device detected\n");
 575                         instance->gen3 = 1;
 576                         /* FALLTHROUGH */
 577                 case PCI_DEVICE_ID_LSI_TBOLT:
 578                         dev_err(dip, CE_CONT, "?TBOLT device detected\n");
 579 
 580                         instance->func_ptr =
 581                             &mrsas_function_template_fusion;
 582                         instance->tbolt = 1;
 583                         break;
 584 
 585                 case PCI_DEVICE_ID_LSI_SKINNY:
 586                 case PCI_DEVICE_ID_LSI_SKINNY_NEW:
 587                         /*
 588                          * FALLTHRU to PPC-style functions, but mark this
 589                          * instance as Skinny, because the register set is
 590                          * slightly different (See WR_IB_PICK_QPORT), and
 591                          * certain other features are available to a Skinny
 592                          * HBA.
 593                          */
 594                         dev_err(dip, CE_CONT, "?Skinny device detected\n");
 595                         instance->skinny = 1;
 596                         /* FALLTHRU */
 597 
 598                 case PCI_DEVICE_ID_LSI_2108VDE:
 599                 case PCI_DEVICE_ID_LSI_2108V:
 600                         dev_err(dip, CE_CONT,
 601                             "?2108 Liberator device detected\n");
 602 
 603                         instance->func_ptr =
 604                             &mrsas_function_template_ppc;
 605                         break;
 606 
 607                 default:
 608                         dev_err(dip, CE_WARN, "Invalid device detected");
 609 
 610                         pci_config_teardown(&instance->pci_handle);
 611                         ddi_soft_state_free(mrsas_state, instance_no);
 612                         return (DDI_FAILURE);
 613                 }
 614 
 615                 instance->baseaddress = pci_config_get32(
 616                     instance->pci_handle, PCI_CONF_BASE0);
 617                 instance->baseaddress &= 0x0fffc;
 618 
 619                 instance->dip                = dip;
 620                 instance->vendor_id  = vendor_id;
 621                 instance->device_id  = device_id;
 622                 instance->subsysvid  = subsysvid;
 623                 instance->subsysid   = subsysid;
 624                 instance->instance   = instance_no;
 625 
 626                 /* Initialize FMA */
 627                 instance->fm_capabilities = ddi_prop_get_int(
 628                     DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS,
 629                     "fm-capable", DDI_FM_EREPORT_CAPABLE |
 630                     DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE
 631                     | DDI_FM_ERRCB_CAPABLE);
 632 
 633                 mrsas_fm_init(instance);
 634 
 635                 /* Setup register map */
 636                 if ((ddi_dev_regsize(instance->dip,
 637                     REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) ||
 638                     reglength < MINIMUM_MFI_MEM_SZ) {
 639                         goto fail_attach;
 640                 }
 641                 if (reglength > DEFAULT_MFI_MEM_SZ) {
 642                         reglength = DEFAULT_MFI_MEM_SZ;
 643                         con_log(CL_DLEVEL1, (CE_NOTE,
 644                             "mr_sas: register length to map is 0x%lx bytes",
 645                             reglength));
 646                 }
 647                 if (ddi_regs_map_setup(instance->dip,
 648                     REGISTER_SET_IO_2108, &instance->regmap, 0,
 649                     reglength, &endian_attr, &instance->regmap_handle)
 650                     != DDI_SUCCESS) {
 651                         dev_err(dip, CE_WARN, "couldn't map control registers");
 652                         goto fail_attach;
 653                 }
 654 
 655                 instance->unroll.regs = 1;
 656 
 657                 /*
 658                  * Disable Interrupt Now.
 659                  * Setup Software interrupt
 660                  */
 661                 instance->func_ptr->disable_intr(instance);
 662 
 663                 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
 664                     "mrsas-enable-msi", &data) == DDI_SUCCESS) {
 665                         if (strncmp(data, "no", 3) == 0) {
 666                                 msi_enable = 0;
 667                                 con_log(CL_ANN1, (CE_WARN,
 668                                     "msi_enable = %d disabled", msi_enable));
 669                         }
 670                         ddi_prop_free(data);
 671                 }
 672 
 673                 dev_err(dip, CE_CONT, "?msi_enable = %d\n", msi_enable);
 674 
 675                 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
 676                     "mrsas-enable-fp", &data) == DDI_SUCCESS) {
 677                         if (strncmp(data, "no", 3) == 0) {
 678                                 enable_fp = 0;
 679                                 dev_err(dip, CE_NOTE,
 680                                     "enable_fp = %d, Fast-Path disabled.\n",
 681                                     enable_fp);
 682                         }
 683 
 684                         ddi_prop_free(data);
 685                 }
 686 
 687                 dev_err(dip, CE_CONT, "?enable_fp = %d\n", enable_fp);
 688 
 689                 /* Check for all supported interrupt types */
 690                 if (ddi_intr_get_supported_types(
 691                     dip, &intr_types) != DDI_SUCCESS) {
 692                         dev_err(dip, CE_WARN,
 693                             "ddi_intr_get_supported_types() failed");
 694                         goto fail_attach;
 695                 }
 696 
 697                 con_log(CL_DLEVEL1, (CE_NOTE,
 698                     "ddi_intr_get_supported_types() ret: 0x%x", intr_types));
 699 
 700                 /* Initialize and Setup Interrupt handler */
 701                 if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) {
 702                         if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSIX) !=
 703                             DDI_SUCCESS) {
 704                                 dev_err(dip, CE_WARN,
 705                                     "MSIX interrupt query failed");
 706                                 goto fail_attach;
 707                         }
 708                         instance->intr_type = DDI_INTR_TYPE_MSIX;
 709                 } else if (msi_enable && (intr_types & DDI_INTR_TYPE_MSI)) {
 710                         if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSI) !=
 711                             DDI_SUCCESS) {
 712                                 dev_err(dip, CE_WARN,
 713                                     "MSI interrupt query failed");
 714                                 goto fail_attach;
 715                         }
 716                         instance->intr_type = DDI_INTR_TYPE_MSI;
 717                 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
 718                         msi_enable = 0;
 719                         if (mrsas_add_intrs(instance, DDI_INTR_TYPE_FIXED) !=
 720                             DDI_SUCCESS) {
 721                                 dev_err(dip, CE_WARN,
 722                                     "FIXED interrupt query failed");
 723                                 goto fail_attach;
 724                         }
 725                         instance->intr_type = DDI_INTR_TYPE_FIXED;
 726                 } else {
 727                         dev_err(dip, CE_WARN, "Device cannot "
 728                             "suppport either FIXED or MSI/X "
 729                             "interrupts");
 730                         goto fail_attach;
 731                 }
 732 
 733                 instance->unroll.intr = 1;
 734 
 735                 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
 736                     "mrsas-enable-ctio", &data) == DDI_SUCCESS) {
 737                         if (strncmp(data, "no", 3) == 0) {
 738                                 ctio_enable = 0;
 739                                 con_log(CL_ANN1, (CE_WARN,
 740                                     "ctio_enable = %d disabled", ctio_enable));
 741                         }
 742                         ddi_prop_free(data);
 743                 }
 744 
 745                 dev_err(dip, CE_CONT, "?ctio_enable = %d\n", ctio_enable);
 746 
 747                 /* setup the mfi based low level driver */
 748                 if (mrsas_init_adapter(instance) != DDI_SUCCESS) {
 749                         dev_err(dip, CE_WARN,
 750                             "could not initialize the low level driver");
 751 
 752                         goto fail_attach;
 753                 }
 754 
 755                 /* Initialize all Mutex */
 756                 INIT_LIST_HEAD(&instance->completed_pool_list);
 757                 mutex_init(&instance->completed_pool_mtx, NULL,
 758                     MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 759 
 760                 mutex_init(&instance->sync_map_mtx, NULL,
 761                     MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 762 
 763                 mutex_init(&instance->app_cmd_pool_mtx, NULL,
 764                     MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 765 
 766                 mutex_init(&instance->config_dev_mtx, NULL,
 767                     MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 768 
 769                 mutex_init(&instance->cmd_pend_mtx, NULL,
 770                     MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 771 
 772                 mutex_init(&instance->ocr_flags_mtx, NULL,
 773                     MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 774 
 775                 mutex_init(&instance->int_cmd_mtx, NULL,
 776                     MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 777                 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
 778 
 779                 mutex_init(&instance->cmd_pool_mtx, NULL,
 780                     MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 781 
 782                 mutex_init(&instance->reg_write_mtx, NULL,
 783                     MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 784 
 785                 if (instance->tbolt) {
 786                         mutex_init(&instance->cmd_app_pool_mtx, NULL,
 787                             MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 788 
 789                         mutex_init(&instance->chip_mtx, NULL,
 790                             MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 791 
 792                 }
 793 
 794                 instance->unroll.mutexs = 1;
 795 
 796                 instance->timeout_id = (timeout_id_t)-1;
 797 
 798                 /* Register our soft-isr for highlevel interrupts. */
 799                 instance->isr_level = instance->intr_pri;
 800                 if (!(instance->tbolt)) {
 801                         if (instance->isr_level == HIGH_LEVEL_INTR) {
 802                                 if (ddi_add_softintr(dip,
 803                                     DDI_SOFTINT_HIGH,
 804                                     &instance->soft_intr_id, NULL, NULL,
 805                                     mrsas_softintr, (caddr_t)instance) !=
 806                                     DDI_SUCCESS) {
 807                                         dev_err(dip, CE_WARN,
 808                                             "Software ISR did not register");
 809 
 810                                         goto fail_attach;
 811                                 }
 812 
 813                                 instance->unroll.soft_isr = 1;
 814 
 815                         }
 816                 }
 817 
 818                 instance->softint_running = 0;
 819 
 820                 /* Allocate a transport structure */
 821                 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
 822 
 823                 if (tran == NULL) {
 824                         dev_err(dip, CE_WARN,
 825                             "scsi_hba_tran_alloc failed");
 826                         goto fail_attach;
 827                 }
 828 
 829                 instance->tran = tran;
 830                 instance->unroll.tran = 1;
 831 
 832                 tran->tran_hba_private       = instance;
 833                 tran->tran_tgt_init  = mrsas_tran_tgt_init;
 834                 tran->tran_tgt_probe = scsi_hba_probe;
 835                 tran->tran_tgt_free  = mrsas_tran_tgt_free;
 836                 tran->tran_init_pkt  = mrsas_tran_init_pkt;
 837                 if (instance->tbolt)
 838                         tran->tran_start = mrsas_tbolt_tran_start;
 839                 else
 840                         tran->tran_start = mrsas_tran_start;
 841                 tran->tran_abort     = mrsas_tran_abort;
 842                 tran->tran_reset     = mrsas_tran_reset;
 843                 tran->tran_getcap    = mrsas_tran_getcap;
 844                 tran->tran_setcap    = mrsas_tran_setcap;
 845                 tran->tran_destroy_pkt       = mrsas_tran_destroy_pkt;
 846                 tran->tran_dmafree   = mrsas_tran_dmafree;
 847                 tran->tran_sync_pkt  = mrsas_tran_sync_pkt;
 848                 tran->tran_quiesce   = mrsas_tran_quiesce;
 849                 tran->tran_unquiesce = mrsas_tran_unquiesce;
 850                 tran->tran_bus_config        = mrsas_tran_bus_config;
 851 
 852                 if (mrsas_relaxed_ordering)
 853                         mrsas_generic_dma_attr.dma_attr_flags |=
 854                             DDI_DMA_RELAXED_ORDERING;
 855 
 856 
 857                 tran_dma_attr = mrsas_generic_dma_attr;
 858                 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
 859 
 860                 /* Attach this instance of the hba */
 861                 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0)
 862                     != DDI_SUCCESS) {
 863                         dev_err(dip, CE_WARN,
 864                             "scsi_hba_attach failed");
 865 
 866                         goto fail_attach;
 867                 }
 868                 instance->unroll.tranSetup = 1;
 869                 con_log(CL_ANN1,
 870                     (CE_CONT, "scsi_hba_attach_setup()  done."));
 871 
 872                 /* create devctl node for cfgadm command */
 873                 if (ddi_create_minor_node(dip, "devctl",
 874                     S_IFCHR, INST2DEVCTL(instance_no),
 875                     DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) {
 876                         dev_err(dip, CE_WARN, "failed to create devctl node.");
 877 
 878                         goto fail_attach;
 879                 }
 880 
 881                 instance->unroll.devctl = 1;
 882 
 883                 /* create scsi node for cfgadm command */
 884                 if (ddi_create_minor_node(dip, "scsi", S_IFCHR,
 885                     INST2SCSI(instance_no), DDI_NT_SCSI_ATTACHMENT_POINT, 0) ==
 886                     DDI_FAILURE) {
 887                         dev_err(dip, CE_WARN, "failed to create scsi node.");
 888 
 889                         goto fail_attach;
 890                 }
 891 
 892                 instance->unroll.scsictl = 1;
 893 
 894                 (void) snprintf(instance->iocnode, sizeof (instance->iocnode),
 895                     "%d:lsirdctl", instance_no);
 896 
 897                 /*
 898                  * Create a node for applications
 899                  * for issuing ioctl to the driver.
 900                  */
 901                 if (ddi_create_minor_node(dip, instance->iocnode,
 902                     S_IFCHR, INST2LSIRDCTL(instance_no), DDI_PSEUDO, 0) ==
 903                     DDI_FAILURE) {
 904                         dev_err(dip, CE_WARN, "failed to create ioctl node.");
 905 
 906                         goto fail_attach;
 907                 }
 908 
 909                 instance->unroll.ioctl = 1;
 910 
 911                 /* Create a taskq to handle dr events */
 912                 if ((instance->taskq = ddi_taskq_create(dip,
 913                     "mrsas_dr_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
 914                         dev_err(dip, CE_WARN, "failed to create taskq.");
 915                         instance->taskq = NULL;
 916                         goto fail_attach;
 917                 }
 918                 instance->unroll.taskq = 1;
 919                 con_log(CL_ANN1, (CE_CONT, "ddi_taskq_create() done."));
 920 
 921                 /* enable interrupt */
 922                 instance->func_ptr->enable_intr(instance);
 923 
 924                 /* initiate AEN */
 925                 if (start_mfi_aen(instance)) {
 926                         dev_err(dip, CE_WARN, "failed to initiate AEN.");
 927                         goto fail_attach;
 928                 }
 929                 instance->unroll.aenPend = 1;
 930                 con_log(CL_ANN1,
 931                     (CE_CONT, "AEN started for instance %d.", instance_no));
 932 
 933                 /* Finally! We are on the air.  */
 934                 ddi_report_dev(dip);
 935 
 936                 /* FMA handle checking. */
 937                 if (mrsas_check_acc_handle(instance->regmap_handle) !=
 938                     DDI_SUCCESS) {
 939                         goto fail_attach;
 940                 }
 941                 if (mrsas_check_acc_handle(instance->pci_handle) !=
 942                     DDI_SUCCESS) {
 943                         goto fail_attach;
 944                 }
 945 
 946                 instance->mr_ld_list =
 947                     kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld),
 948                     KM_SLEEP);
 949                 instance->unroll.ldlist_buff = 1;
 950 
 951                 if (instance->tbolt || instance->skinny) {
 952                         instance->mr_tbolt_pd_max = MRSAS_TBOLT_PD_TGT_MAX;
 953                         instance->mr_tbolt_pd_list =
 954                             kmem_zalloc(MRSAS_TBOLT_GET_PD_MAX(instance) *
 955                             sizeof (struct mrsas_tbolt_pd), KM_SLEEP);
 956                         ASSERT(instance->mr_tbolt_pd_list);
 957                         for (i = 0; i < instance->mr_tbolt_pd_max; i++) {
 958                                 instance->mr_tbolt_pd_list[i].lun_type =
 959                                     MRSAS_TBOLT_PD_LUN;
 960                                 instance->mr_tbolt_pd_list[i].dev_id =
 961                                     (uint8_t)i;
 962                         }
 963 
 964                         instance->unroll.pdlist_buff = 1;
 965                 }
 966                 break;
 967         case DDI_PM_RESUME:
 968                 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_PM_RESUME"));
 969                 break;
 970         case DDI_RESUME:
 971                 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_RESUME"));
 972                 break;
 973         default:
 974                 con_log(CL_ANN,
 975                     (CE_WARN, "mr_sas: invalid attach cmd=%x", cmd));
 976                 return (DDI_FAILURE);
 977         }
 978 
 979 
 980         con_log(CL_DLEVEL1,
 981             (CE_NOTE, "mrsas_attach() return SUCCESS instance_num %d",
 982             instance_no));
 983         return (DDI_SUCCESS);
 984 
 985 fail_attach:
 986 
 987         mrsas_undo_resources(dip, instance);
 988 
 989         mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
 990         ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
 991 
 992         mrsas_fm_fini(instance);
 993 
 994         pci_config_teardown(&instance->pci_handle);
 995         ddi_soft_state_free(mrsas_state, instance_no);
 996 
 997         return (DDI_FAILURE);
 998 }
 999 
1000 /*
1001  * getinfo - gets device information
1002  * @dip:
1003  * @cmd:
1004  * @arg:
1005  * @resultp:
1006  *
1007  * The system calls getinfo() to obtain configuration information that only
1008  * the driver knows. The mapping of minor numbers to device instance is
1009  * entirely under the control of the driver. The system sometimes needs to ask
1010  * the driver which device a particular dev_t represents.
1011  * Given the device number return the devinfo pointer from the scsi_device
1012  * structure.
1013  */
1014 /*ARGSUSED*/
1015 static int
1016 mrsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd,  void *arg, void **resultp)
1017 {
1018         int     rval;
1019         int     mrsas_minor = getminor((dev_t)arg);
1020 
1021         struct mrsas_instance   *instance;
1022 
1023         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1024 
1025         switch (cmd) {
1026                 case DDI_INFO_DEVT2DEVINFO:
1027                         instance = (struct mrsas_instance *)
1028                             ddi_get_soft_state(mrsas_state,
1029                             MINOR2INST(mrsas_minor));
1030 
1031                         if (instance == NULL) {
1032                                 *resultp = NULL;
1033                                 rval = DDI_FAILURE;
1034                         } else {
1035                                 *resultp = instance->dip;
1036                                 rval = DDI_SUCCESS;
1037                         }
1038                         break;
1039                 case DDI_INFO_DEVT2INSTANCE:
1040                         *resultp = (void *)(intptr_t)
1041                             (MINOR2INST(getminor((dev_t)arg)));
1042                         rval = DDI_SUCCESS;
1043                         break;
1044                 default:
1045                         *resultp = NULL;
1046                         rval = DDI_FAILURE;
1047         }
1048 
1049         return (rval);
1050 }
1051 
1052 /*
1053  * detach - detaches a device from the system
1054  * @dip: pointer to the device's dev_info structure
1055  * @cmd: type of detach
1056  *
1057  * A driver's detach() entry point is called to detach an instance of a device
1058  * that is bound to the driver. The entry point is called with the instance of
1059  * the device node to be detached and with DDI_DETACH, which is specified as
1060  * the cmd argument to the entry point.
1061  * This routine is called during driver unload. We free all the allocated
1062  * resources and call the corresponding LLD so that it can also release all
1063  * its resources.
1064  */
1065 static int
1066 mrsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1067 {
1068         int     instance_no;
1069 
1070         struct mrsas_instance   *instance;
1071 
1072         con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1073 
1074 
1075         /* CONSTCOND */
1076         ASSERT(NO_COMPETING_THREADS);
1077 
1078         instance_no = ddi_get_instance(dip);
1079 
1080         instance = (struct mrsas_instance *)ddi_get_soft_state(mrsas_state,
1081             instance_no);
1082 
1083         if (!instance) {
1084                 dev_err(dip, CE_WARN, "could not get instance in detach");
1085 
1086                 return (DDI_FAILURE);
1087         }
1088 
1089         switch (cmd) {
1090                 case DDI_DETACH:
1091                         con_log(CL_ANN, (CE_NOTE,
1092                             "mrsas_detach: DDI_DETACH"));
1093 
1094                         mutex_enter(&instance->config_dev_mtx);
1095                         if (instance->timeout_id != (timeout_id_t)-1) {
1096                                 mutex_exit(&instance->config_dev_mtx);
1097                                 (void) untimeout(instance->timeout_id);
1098                                 instance->timeout_id = (timeout_id_t)-1;
1099                                 mutex_enter(&instance->config_dev_mtx);
1100                                 instance->unroll.timer = 0;
1101                         }
1102                         mutex_exit(&instance->config_dev_mtx);
1103 
1104                         if (instance->unroll.tranSetup == 1) {
1105                                 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1106                                         dev_err(dip, CE_WARN,
1107                                             "failed to detach");
1108                                         return (DDI_FAILURE);
1109                                 }
1110                                 instance->unroll.tranSetup = 0;
1111                                 con_log(CL_ANN1,
1112                                     (CE_CONT, "scsi_hba_dettach()  done."));
1113                         }
1114 
1115                         flush_cache(instance);
1116 
1117                         mrsas_undo_resources(dip, instance);
1118 
1119                         mrsas_fm_fini(instance);
1120 
1121                         pci_config_teardown(&instance->pci_handle);
1122                         ddi_soft_state_free(mrsas_state, instance_no);
1123                         break;
1124 
1125                 case DDI_PM_SUSPEND:
1126                         con_log(CL_ANN, (CE_NOTE,
1127                             "mrsas_detach: DDI_PM_SUSPEND"));
1128 
1129                         break;
1130                 case DDI_SUSPEND:
1131                         con_log(CL_ANN, (CE_NOTE,
1132                             "mrsas_detach: DDI_SUSPEND"));
1133 
1134                         break;
1135                 default:
1136                         con_log(CL_ANN, (CE_WARN,
1137                             "invalid detach command:0x%x", cmd));
1138                         return (DDI_FAILURE);
1139         }
1140 
1141         return (DDI_SUCCESS);
1142 }
1143 
1144 
1145 static void
1146 mrsas_undo_resources(dev_info_t *dip, struct mrsas_instance *instance)
1147 {
1148         con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1149 
1150         if (instance->unroll.ioctl == 1) {
1151                 ddi_remove_minor_node(dip, instance->iocnode);
1152                 instance->unroll.ioctl = 0;
1153         }
1154 
1155         if (instance->unroll.scsictl == 1) {
1156                 ddi_remove_minor_node(dip, "scsi");
1157                 instance->unroll.scsictl = 0;
1158         }
1159 
1160         if (instance->unroll.devctl == 1) {
1161                 ddi_remove_minor_node(dip, "devctl");
1162                 instance->unroll.devctl = 0;
1163         }
1164 
1165         if (instance->unroll.tranSetup == 1) {
1166                 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1167                         dev_err(dip, CE_WARN, "failed to detach");
1168                         return;  /* DDI_FAILURE */
1169                 }
1170                 instance->unroll.tranSetup = 0;
1171                 con_log(CL_ANN1, (CE_CONT, "scsi_hba_dettach()  done."));
1172         }
1173 
1174         if (instance->unroll.tran == 1)       {
1175                 scsi_hba_tran_free(instance->tran);
1176                 instance->unroll.tran = 0;
1177                 con_log(CL_ANN1, (CE_CONT, "scsi_hba_tran_free()  done."));
1178         }
1179 
1180         if (instance->unroll.syncCmd == 1) {
1181                 if (instance->tbolt) {
1182                         if (abort_syncmap_cmd(instance,
1183                             instance->map_update_cmd)) {
1184                                 dev_err(dip, CE_WARN, "mrsas_detach: "
1185                                     "failed to abort previous syncmap command");
1186                         }
1187 
1188                         instance->unroll.syncCmd = 0;
1189                         con_log(CL_ANN1, (CE_CONT, "sync cmd aborted, done."));
1190                 }
1191         }
1192 
1193         if (instance->unroll.aenPend == 1) {
1194                 if (abort_aen_cmd(instance, instance->aen_cmd))
1195                         dev_err(dip, CE_WARN, "mrsas_detach: "
1196                             "failed to abort prevous AEN command");
1197 
1198                 instance->unroll.aenPend = 0;
1199                 con_log(CL_ANN1, (CE_CONT, "aen cmd aborted, done."));
1200                 /* This means the controller is fully initialized and running */
1201                 /* Shutdown should be a last command to controller. */
1202                 /* shutdown_controller(); */
1203         }
1204 
1205 
1206         if (instance->unroll.timer == 1)      {
1207                 if (instance->timeout_id != (timeout_id_t)-1) {
1208                         (void) untimeout(instance->timeout_id);
1209                         instance->timeout_id = (timeout_id_t)-1;
1210 
1211                         instance->unroll.timer = 0;
1212                 }
1213         }
1214 
1215         instance->func_ptr->disable_intr(instance);
1216 
1217 
1218         if (instance->unroll.mutexs == 1) {
1219                 mutex_destroy(&instance->cmd_pool_mtx);
1220                 mutex_destroy(&instance->app_cmd_pool_mtx);
1221                 mutex_destroy(&instance->cmd_pend_mtx);
1222                 mutex_destroy(&instance->completed_pool_mtx);
1223                 mutex_destroy(&instance->sync_map_mtx);
1224                 mutex_destroy(&instance->int_cmd_mtx);
1225                 cv_destroy(&instance->int_cmd_cv);
1226                 mutex_destroy(&instance->config_dev_mtx);
1227                 mutex_destroy(&instance->ocr_flags_mtx);
1228                 mutex_destroy(&instance->reg_write_mtx);
1229 
1230                 if (instance->tbolt) {
1231                         mutex_destroy(&instance->cmd_app_pool_mtx);
1232                         mutex_destroy(&instance->chip_mtx);
1233                 }
1234 
1235                 instance->unroll.mutexs = 0;
1236                 con_log(CL_ANN1, (CE_CONT, "Destroy mutex & cv,      done."));
1237         }
1238 
1239 
1240         if (instance->unroll.soft_isr == 1) {
1241                 ddi_remove_softintr(instance->soft_intr_id);
1242                 instance->unroll.soft_isr = 0;
1243         }
1244 
1245         if (instance->unroll.intr == 1) {
1246                 mrsas_rem_intrs(instance);
1247                 instance->unroll.intr = 0;
1248         }
1249 
1250 
1251         if (instance->unroll.taskq == 1)      {
1252                 if (instance->taskq) {
1253                         ddi_taskq_destroy(instance->taskq);
1254                         instance->unroll.taskq = 0;
1255                 }
1256 
1257         }
1258 
1259         /*
1260          * free dma memory allocated for
1261          * cmds/frames/queues/driver version etc
1262          */
1263         if (instance->unroll.verBuff == 1) {
1264                 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1265                 instance->unroll.verBuff = 0;
1266         }
1267 
1268         if (instance->unroll.pdlist_buff == 1)       {
1269                 if (instance->mr_tbolt_pd_list != NULL) {
1270                         kmem_free(instance->mr_tbolt_pd_list,
1271                             MRSAS_TBOLT_GET_PD_MAX(instance) *
1272                             sizeof (struct mrsas_tbolt_pd));
1273                 }
1274 
1275                 instance->mr_tbolt_pd_list = NULL;
1276                 instance->unroll.pdlist_buff = 0;
1277         }
1278 
1279         if (instance->unroll.ldlist_buff == 1)       {
1280                 if (instance->mr_ld_list != NULL) {
1281                         kmem_free(instance->mr_ld_list, MRDRV_MAX_LD
1282                             * sizeof (struct mrsas_ld));
1283                 }
1284 
1285                 instance->mr_ld_list = NULL;
1286                 instance->unroll.ldlist_buff = 0;
1287         }
1288 
1289         if (instance->tbolt) {
1290                 if (instance->unroll.alloc_space_mpi2 == 1) {
1291                         free_space_for_mpi2(instance);
1292                         instance->unroll.alloc_space_mpi2 = 0;
1293                 }
1294         } else {
1295                 if (instance->unroll.alloc_space_mfi == 1) {
1296                         free_space_for_mfi(instance);
1297                         instance->unroll.alloc_space_mfi = 0;
1298                 }
1299         }
1300 
1301         if (instance->unroll.regs == 1)       {
1302                 ddi_regs_map_free(&instance->regmap_handle);
1303                 instance->unroll.regs = 0;
1304                 con_log(CL_ANN1, (CE_CONT, "ddi_regs_map_free()  done."));
1305         }
1306 }
1307 
1308 
1309 
1310 /*
1311  * ************************************************************************** *
1312  *                                                                            *
1313  *             common entry points - for character driver types               *
1314  *                                                                            *
1315  * ************************************************************************** *
1316  */
1317 /*
1318  * open - gets access to a device
1319  * @dev:
1320  * @openflags:
1321  * @otyp:
1322  * @credp:
1323  *
1324  * Access to a device by one or more application programs is controlled
1325  * through the open() and close() entry points. The primary function of
1326  * open() is to verify that the open request is allowed.
1327  */
1328 static  int
1329 mrsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
1330 {
1331         int     rval = 0;
1332 
1333         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1334 
1335         /* Check root permissions */
1336         if (drv_priv(credp) != 0) {
1337                 con_log(CL_ANN, (CE_WARN,
1338                     "mr_sas: Non-root ioctl access denied!"));
1339                 return (EPERM);
1340         }
1341 
1342         /* Verify we are being opened as a character device */
1343         if (otyp != OTYP_CHR) {
1344                 con_log(CL_ANN, (CE_WARN,
1345                     "mr_sas: ioctl node must be a char node"));
1346                 return (EINVAL);
1347         }
1348 
1349         if (ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(*dev)))
1350             == NULL) {
1351                 return (ENXIO);
1352         }
1353 
1354         if (scsi_hba_open) {
1355                 rval = scsi_hba_open(dev, openflags, otyp, credp);
1356         }
1357 
1358         return (rval);
1359 }
1360 
1361 /*
1362  * close - gives up access to a device
1363  * @dev:
1364  * @openflags:
1365  * @otyp:
1366  * @credp:
1367  *
1368  * close() should perform any cleanup necessary to finish using the minor
1369  * device, and prepare the device (and driver) to be opened again.
1370  */
1371 static  int
1372 mrsas_close(dev_t dev, int openflags, int otyp, cred_t *credp)
1373 {
1374         int     rval = 0;
1375 
1376         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1377 
1378         /* no need for locks! */
1379 
1380         if (scsi_hba_close) {
1381                 rval = scsi_hba_close(dev, openflags, otyp, credp);
1382         }
1383 
1384         return (rval);
1385 }
1386 
1387 /*
1388  * ioctl - performs a range of I/O commands for character drivers
1389  * @dev:
1390  * @cmd:
1391  * @arg:
1392  * @mode:
1393  * @credp:
1394  * @rvalp:
1395  *
1396  * ioctl() routine must make sure that user data is copied into or out of the
1397  * kernel address space explicitly using copyin(), copyout(), ddi_copyin(),
1398  * and ddi_copyout(), as appropriate.
1399  * This is a wrapper routine to serialize access to the actual ioctl routine.
1400  * ioctl() should return 0 on success, or the appropriate error number. The
1401  * driver may also set the value returned to the calling process through rvalp.
1402  */
1403 
1404 static int
1405 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1406     int *rvalp)
1407 {
1408         int     rval = 0;
1409 
1410         struct mrsas_instance   *instance;
1411         struct mrsas_ioctl      *ioctl;
1412         struct mrsas_aen        aen;
1413         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1414 
1415         instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev)));
1416 
1417         if (instance == NULL) {
1418                 /* invalid minor number */
1419                 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found."));
1420                 return (ENXIO);
1421         }
1422 
1423         ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl),
1424             KM_SLEEP);
1425         ASSERT(ioctl);
1426 
1427         switch ((uint_t)cmd) {
1428                 case MRSAS_IOCTL_FIRMWARE:
1429                         if (ddi_copyin((void *)arg, ioctl,
1430                             sizeof (struct mrsas_ioctl), mode)) {
1431                                 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: "
1432                                     "ERROR IOCTL copyin"));
1433                                 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1434                                 return (EFAULT);
1435                         }
1436 
1437                         if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) {
1438                                 rval = handle_drv_ioctl(instance, ioctl, mode);
1439                         } else {
1440                                 rval = handle_mfi_ioctl(instance, ioctl, mode);
1441                         }
1442 
1443                         if (ddi_copyout((void *)ioctl, (void *)arg,
1444                             (sizeof (struct mrsas_ioctl) - 1), mode)) {
1445                                 con_log(CL_ANN, (CE_WARN,
1446                                     "mrsas_ioctl: copy_to_user failed"));
1447                                 rval = 1;
1448                         }
1449 
1450                         break;
1451                 case MRSAS_IOCTL_AEN:
1452                         if (ddi_copyin((void *) arg, &aen,
1453                             sizeof (struct mrsas_aen), mode)) {
1454                                 con_log(CL_ANN, (CE_WARN,
1455                                     "mrsas_ioctl: ERROR AEN copyin"));
1456                                 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1457                                 return (EFAULT);
1458                         }
1459 
1460                         rval = handle_mfi_aen(instance, &aen);
1461 
1462                         if (ddi_copyout((void *) &aen, (void *)arg,
1463                             sizeof (struct mrsas_aen), mode)) {
1464                                 con_log(CL_ANN, (CE_WARN,
1465                                     "mrsas_ioctl: copy_to_user failed"));
1466                                 rval = 1;
1467                         }
1468 
1469                         break;
1470                 default:
1471                         rval = scsi_hba_ioctl(dev, cmd, arg,
1472                             mode, credp, rvalp);
1473 
1474                         con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_ioctl: "
1475                             "scsi_hba_ioctl called, ret = %x.", rval));
1476         }
1477 
1478         kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1479         return (rval);
1480 }
1481 
1482 /*
1483  * ************************************************************************** *
1484  *                                                                            *
1485  *               common entry points - for block driver types                 *
1486  *                                                                            *
1487  * ************************************************************************** *
1488  */
1489 #ifdef __sparc
1490 /*
1491  * reset - TBD
1492  * @dip:
1493  * @cmd:
1494  *
1495  * TBD
1496  */
1497 /*ARGSUSED*/
1498 static int
1499 mrsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1500 {
1501         int     instance_no;
1502 
1503         struct mrsas_instance   *instance;
1504 
1505         instance_no = ddi_get_instance(dip);
1506         instance = (struct mrsas_instance *)ddi_get_soft_state
1507             (mrsas_state, instance_no);
1508 
1509         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1510 
1511         if (!instance) {
1512                 con_log(CL_ANN, (CE_WARN, "mr_sas:%d could not get adapter "
1513                     "in reset", instance_no));
1514                 return (DDI_FAILURE);
1515         }
1516 
1517         instance->func_ptr->disable_intr(instance);
1518 
1519         con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1520             instance_no));
1521 
1522         flush_cache(instance);
1523 
1524         return (DDI_SUCCESS);
1525 }
1526 #else /* __sparc */
1527 /*ARGSUSED*/
1528 static int
1529 mrsas_quiesce(dev_info_t *dip)
1530 {
1531         int     instance_no;
1532 
1533         struct mrsas_instance   *instance;
1534 
1535         instance_no = ddi_get_instance(dip);
1536         instance = (struct mrsas_instance *)ddi_get_soft_state
1537             (mrsas_state, instance_no);
1538 
1539         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1540 
1541         if (!instance) {
1542                 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d could not get adapter "
1543                     "in quiesce", instance_no));
1544                 return (DDI_FAILURE);
1545         }
1546         if (instance->deadadapter || instance->adapterresetinprogress) {
1547                 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d adapter is not in "
1548                     "healthy state", instance_no));
1549                 return (DDI_FAILURE);
1550         }
1551 
1552         if (abort_aen_cmd(instance, instance->aen_cmd)) {
1553                 con_log(CL_ANN1, (CE_WARN, "mrsas_quiesce: "
1554                     "failed to abort prevous AEN command QUIESCE"));
1555         }
1556 
1557         if (instance->tbolt) {
1558                 if (abort_syncmap_cmd(instance,
1559                     instance->map_update_cmd)) {
1560                         dev_err(dip, CE_WARN,
1561                             "mrsas_detach: failed to abort "
1562                             "previous syncmap command");
1563                         return (DDI_FAILURE);
1564                 }
1565         }
1566 
1567         instance->func_ptr->disable_intr(instance);
1568 
1569         con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1570             instance_no));
1571 
1572         flush_cache(instance);
1573 
1574         if (wait_for_outstanding(instance)) {
1575                 con_log(CL_ANN1,
1576                     (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
1577                 return (DDI_FAILURE);
1578         }
1579         return (DDI_SUCCESS);
1580 }
1581 #endif  /* __sparc */
1582 
1583 /*
1584  * ************************************************************************** *
1585  *                                                                            *
1586  *                          entry points (SCSI HBA)                           *
1587  *                                                                            *
1588  * ************************************************************************** *
1589  */
1590 /*
1591  * tran_tgt_init - initialize a target device instance
1592  * @hba_dip:
1593  * @tgt_dip:
1594  * @tran:
1595  * @sd:
1596  *
1597  * The tran_tgt_init() entry point enables the HBA to allocate and initialize
1598  * any per-target resources. tran_tgt_init() also enables the HBA to qualify
1599  * the device's address as valid and supportable for that particular HBA.
1600  * By returning DDI_FAILURE, the instance of the target driver for that device
1601  * is not probed or attached.
1602  */
1603 /*ARGSUSED*/
1604 static int
1605 mrsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1606     scsi_hba_tran_t *tran, struct scsi_device *sd)
1607 {
1608         struct mrsas_instance *instance;
1609         uint16_t tgt = sd->sd_address.a_target;
1610         uint8_t lun = sd->sd_address.a_lun;
1611         dev_info_t *child = NULL;
1612 
1613         con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init target %d lun %d",
1614             tgt, lun));
1615 
1616         instance = ADDR2MR(&sd->sd_address);
1617 
1618         if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
1619                 /*
1620                  * If no persistent node exists, we don't allow .conf node
1621                  * to be created.
1622                  */
1623                 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
1624                         con_log(CL_DLEVEL2,
1625                             (CE_NOTE, "mrsas_tgt_init find child ="
1626                             " %p t = %d l = %d", (void *)child, tgt, lun));
1627                         if (ndi_merge_node(tgt_dip, mrsas_name_node) !=
1628                             DDI_SUCCESS)
1629                                 /* Create this .conf node */
1630                                 return (DDI_SUCCESS);
1631                 }
1632                 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init in ndi_per "
1633                     "DDI_FAILURE t = %d l = %d", tgt, lun));
1634                 return (DDI_FAILURE);
1635 
1636         }
1637 
1638         con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p",
1639             (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip));
1640 
1641         if (tgt < MRDRV_MAX_LD && lun == 0) {
1642                 if (instance->mr_ld_list[tgt].dip == NULL &&
1643                     strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) {
1644                         mutex_enter(&instance->config_dev_mtx);
1645                         instance->mr_ld_list[tgt].dip = tgt_dip;
1646                         instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN;
1647                         instance->mr_ld_list[tgt].flag = MRDRV_TGT_VALID;
1648                         mutex_exit(&instance->config_dev_mtx);
1649                 }
1650         } else if (instance->tbolt || instance->skinny) {
1651                 if (instance->mr_tbolt_pd_list[tgt].dip == NULL) {
1652                         mutex_enter(&instance->config_dev_mtx);
1653                         instance->mr_tbolt_pd_list[tgt].dip = tgt_dip;
1654                         instance->mr_tbolt_pd_list[tgt].flag =
1655                             MRDRV_TGT_VALID;
1656                         mutex_exit(&instance->config_dev_mtx);
1657                         con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_tgt_init:"
1658                             "t%xl%x", tgt, lun));
1659                 }
1660         }
1661 
1662         return (DDI_SUCCESS);
1663 }
1664 
1665 /*ARGSUSED*/
1666 static void
1667 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1668     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1669 {
1670         struct mrsas_instance *instance;
1671         int tgt = sd->sd_address.a_target;
1672         int lun = sd->sd_address.a_lun;
1673 
1674         instance = ADDR2MR(&sd->sd_address);
1675 
1676         con_log(CL_DLEVEL2, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun));
1677 
1678         if (tgt < MRDRV_MAX_LD && lun == 0) {
1679                 if (instance->mr_ld_list[tgt].dip == tgt_dip) {
1680                         mutex_enter(&instance->config_dev_mtx);
1681                         instance->mr_ld_list[tgt].dip = NULL;
1682                         mutex_exit(&instance->config_dev_mtx);
1683                 }
1684         } else if (instance->tbolt || instance->skinny) {
1685                 mutex_enter(&instance->config_dev_mtx);
1686                 instance->mr_tbolt_pd_list[tgt].dip = NULL;
1687                 mutex_exit(&instance->config_dev_mtx);
1688                 con_log(CL_ANN1, (CE_NOTE, "tgt_free: Setting dip = NULL"
1689                     "for tgt:%x", tgt));
1690         }
1691 }
1692 
1693 dev_info_t *
1694 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun)
1695 {
1696         dev_info_t *child = NULL;
1697         char addr[SCSI_MAXNAMELEN];
1698         char tmp[MAXNAMELEN];
1699 
1700         (void) snprintf(addr, sizeof (addr), "%x,%x", tgt, lun);
1701         for (child = ddi_get_child(instance->dip); child;
1702             child = ddi_get_next_sibling(child)) {
1703 
1704                 if (ndi_dev_is_persistent_node(child) == 0) {
1705                         continue;
1706                 }
1707 
1708                 if (mrsas_name_node(child, tmp, MAXNAMELEN) !=
1709                     DDI_SUCCESS) {
1710                         continue;
1711                 }
1712 
1713                 if (strcmp(addr, tmp) == 0) {
1714                         break;
1715                 }
1716         }
1717         con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_find_child: return child = %p",
1718             (void *)child));
1719         return (child);
1720 }
1721 
1722 /*
1723  * mrsas_name_node -
1724  * @dip:
1725  * @name:
1726  * @len:
1727  */
1728 static int
1729 mrsas_name_node(dev_info_t *dip, char *name, int len)
1730 {
1731         int tgt, lun;
1732 
1733         tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1734             DDI_PROP_DONTPASS, "target", -1);
1735         con_log(CL_DLEVEL2, (CE_NOTE,
1736             "mrsas_name_node: dip %p tgt %d", (void *)dip, tgt));
1737         if (tgt == -1) {
1738                 return (DDI_FAILURE);
1739         }
1740         lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1741             "lun", -1);
1742         con_log(CL_DLEVEL2,
1743             (CE_NOTE, "mrsas_name_node: tgt %d lun %d", tgt, lun));
1744         if (lun == -1) {
1745                 return (DDI_FAILURE);
1746         }
1747         (void) snprintf(name, len, "%x,%x", tgt, lun);
1748         return (DDI_SUCCESS);
1749 }
1750 
1751 /*
1752  * tran_init_pkt - allocate & initialize a scsi_pkt structure
1753  * @ap:
1754  * @pkt:
1755  * @bp:
1756  * @cmdlen:
1757  * @statuslen:
1758  * @tgtlen:
1759  * @flags:
1760  * @callback:
1761  *
1762  * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1763  * structure and DMA resources for a target driver request. The
1764  * tran_init_pkt() entry point is called when the target driver calls the
1765  * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1766  * is a request to perform one or more of three possible services:
1767  *  - allocation and initialization of a scsi_pkt structure
1768  *  - allocation of DMA resources for data transfer
1769  *  - reallocation of DMA resources for the next portion of the data transfer
1770  */
1771 static struct scsi_pkt *
1772 mrsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt,
1773     struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1774     int flags, int (*callback)(), caddr_t arg)
1775 {
1776         struct scsa_cmd *acmd;
1777         struct mrsas_instance   *instance;
1778         struct scsi_pkt *new_pkt;
1779 
1780         con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1781 
1782         instance = ADDR2MR(ap);
1783 
1784         /* step #1 : pkt allocation */
1785         if (pkt == NULL) {
1786                 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1787                     tgtlen, sizeof (struct scsa_cmd), callback, arg);
1788                 if (pkt == NULL) {
1789                         return (NULL);
1790                 }
1791 
1792                 acmd = PKT2CMD(pkt);
1793 
1794                 /*
1795                  * Initialize the new pkt - we redundantly initialize
1796                  * all the fields for illustrative purposes.
1797                  */
1798                 acmd->cmd_pkt                = pkt;
1799                 acmd->cmd_flags              = 0;
1800                 acmd->cmd_scblen     = statuslen;
1801                 acmd->cmd_cdblen     = cmdlen;
1802                 acmd->cmd_dmahandle  = NULL;
1803                 acmd->cmd_ncookies   = 0;
1804                 acmd->cmd_cookie     = 0;
1805                 acmd->cmd_cookiecnt  = 0;
1806                 acmd->cmd_nwin               = 0;
1807 
1808                 pkt->pkt_address     = *ap;
1809                 pkt->pkt_comp                = (void (*)())NULL;
1810                 pkt->pkt_flags               = 0;
1811                 pkt->pkt_time                = 0;
1812                 pkt->pkt_resid               = 0;
1813                 pkt->pkt_state               = 0;
1814                 pkt->pkt_statistics  = 0;
1815                 pkt->pkt_reason              = 0;
1816                 new_pkt                 = pkt;
1817         } else {
1818                 acmd = PKT2CMD(pkt);
1819                 new_pkt = NULL;
1820         }
1821 
1822         /* step #2 : dma allocation/move */
1823         if (bp && bp->b_bcount != 0) {
1824                 if (acmd->cmd_dmahandle == NULL) {
1825                         if (mrsas_dma_alloc(instance, pkt, bp, flags,
1826                             callback) == DDI_FAILURE) {
1827                                 if (new_pkt) {
1828                                         scsi_hba_pkt_free(ap, new_pkt);
1829                                 }
1830                                 return ((struct scsi_pkt *)NULL);
1831                         }
1832                 } else {
1833                         if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) {
1834                                 return ((struct scsi_pkt *)NULL);
1835                         }
1836                 }
1837         }
1838 
1839         return (pkt);
1840 }
1841 
1842 /*
1843  * tran_start - transport a SCSI command to the addressed target
1844  * @ap:
1845  * @pkt:
1846  *
1847  * The tran_start() entry point for a SCSI HBA driver is called to transport a
1848  * SCSI command to the addressed target. The SCSI command is described
1849  * entirely within the scsi_pkt structure, which the target driver allocated
1850  * through the HBA driver's tran_init_pkt() entry point. If the command
1851  * involves a data transfer, DMA resources must also have been allocated for
1852  * the scsi_pkt structure.
1853  *
1854  * Return Values :
1855  *      TRAN_BUSY - request queue is full, no more free scbs
1856  *      TRAN_ACCEPT - pkt has been submitted to the instance
1857  */
1858 static int
1859 mrsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt)
1860 {
1861         uchar_t         cmd_done = 0;
1862 
1863         struct mrsas_instance   *instance = ADDR2MR(ap);
1864         struct mrsas_cmd        *cmd;
1865 
1866         con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1867         if (instance->deadadapter == 1) {
1868                 con_log(CL_ANN1, (CE_WARN,
1869                     "mrsas_tran_start: return TRAN_FATAL_ERROR "
1870                     "for IO, as the HBA doesnt take any more IOs"));
1871                 if (pkt) {
1872                         pkt->pkt_reason              = CMD_DEV_GONE;
1873                         pkt->pkt_statistics  = STAT_DISCON;
1874                 }
1875                 return (TRAN_FATAL_ERROR);
1876         }
1877 
1878         if (instance->adapterresetinprogress) {
1879                 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_start: Reset flag set, "
1880                     "returning mfi_pkt and setting TRAN_BUSY\n"));
1881                 return (TRAN_BUSY);
1882         }
1883 
1884         con_log(CL_ANN1, (CE_CONT, "chkpnt:%s:%d:SCSI CDB[0]=0x%x time:%x",
1885             __func__, __LINE__, pkt->pkt_cdbp[0], pkt->pkt_time));
1886 
1887         pkt->pkt_reason      = CMD_CMPLT;
1888         *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1889 
1890         cmd = build_cmd(instance, ap, pkt, &cmd_done);
1891 
1892         /*
1893          * Check if the command is already completed by the mrsas_build_cmd()
1894          * routine. In which case the busy_flag would be clear and scb will be
1895          * NULL and appropriate reason provided in pkt_reason field
1896          */
1897         if (cmd_done) {
1898                 pkt->pkt_reason = CMD_CMPLT;
1899                 pkt->pkt_scbp[0] = STATUS_GOOD;
1900                 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1901                     | STATE_SENT_CMD;
1902                 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1903                         (*pkt->pkt_comp)(pkt);
1904                 }
1905 
1906                 return (TRAN_ACCEPT);
1907         }
1908 
1909         if (cmd == NULL) {
1910                 return (TRAN_BUSY);
1911         }
1912 
1913         if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1914                 if (instance->fw_outstanding > instance->max_fw_cmds) {
1915                         con_log(CL_ANN, (CE_CONT, "mr_sas:Firmware busy"));
1916                         DTRACE_PROBE2(start_tran_err,
1917                             uint16_t, instance->fw_outstanding,
1918                             uint16_t, instance->max_fw_cmds);
1919                         mrsas_return_mfi_pkt(instance, cmd);
1920                         return (TRAN_BUSY);
1921                 }
1922 
1923                 /* Synchronize the Cmd frame for the controller */
1924                 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1925                     DDI_DMA_SYNC_FORDEV);
1926                 con_log(CL_ANN, (CE_CONT, "issue_cmd_ppc: SCSI CDB[0]=0x%x"
1927                     "cmd->index:%x\n", pkt->pkt_cdbp[0], cmd->index));
1928                 instance->func_ptr->issue_cmd(cmd, instance);
1929 
1930         } else {
1931                 struct mrsas_header *hdr = &cmd->frame->hdr;
1932 
1933                 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1934 
1935                 pkt->pkt_reason              = CMD_CMPLT;
1936                 pkt->pkt_statistics  = 0;
1937                 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1938 
1939                 switch (ddi_get8(cmd->frame_dma_obj.acc_handle,
1940                     &hdr->cmd_status)) {
1941                 case MFI_STAT_OK:
1942                         pkt->pkt_scbp[0] = STATUS_GOOD;
1943                         break;
1944 
1945                 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1946                         con_log(CL_ANN, (CE_CONT,
1947                             "mrsas_tran_start: scsi done with error"));
1948                         pkt->pkt_reason      = CMD_CMPLT;
1949                         pkt->pkt_statistics = 0;
1950 
1951                         ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
1952                         break;
1953 
1954                 case MFI_STAT_DEVICE_NOT_FOUND:
1955                         con_log(CL_ANN, (CE_CONT,
1956                             "mrsas_tran_start: device not found error"));
1957                         pkt->pkt_reason              = CMD_DEV_GONE;
1958                         pkt->pkt_statistics  = STAT_DISCON;
1959                         break;
1960 
1961                 default:
1962                         ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
1963                 }
1964 
1965                 (void) mrsas_common_check(instance, cmd);
1966                 DTRACE_PROBE2(start_nointr_done, uint8_t, hdr->cmd,
1967                     uint8_t, hdr->cmd_status);
1968                 mrsas_return_mfi_pkt(instance, cmd);
1969 
1970                 if (pkt->pkt_comp) {
1971                         (*pkt->pkt_comp)(pkt);
1972                 }
1973 
1974         }
1975 
1976         return (TRAN_ACCEPT);
1977 }
1978 
1979 /*
1980  * tran_abort - Abort any commands that are currently in transport
1981  * @ap:
1982  * @pkt:
1983  *
1984  * The tran_abort() entry point for a SCSI HBA driver is called to abort any
1985  * commands that are currently in transport for a particular target. This entry
1986  * point is called when a target driver calls scsi_abort(). The tran_abort()
1987  * entry point should attempt to abort the command denoted by the pkt
1988  * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
1989  * abort all outstanding commands in the transport layer for the particular
1990  * target or logical unit.
1991  */
1992 /*ARGSUSED*/
1993 static int
1994 mrsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1995 {
1996         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1997 
1998         /* abort command not supported by H/W */
1999 
2000         return (DDI_FAILURE);
2001 }
2002 
2003 /*
2004  * tran_reset - reset either the SCSI bus or target
2005  * @ap:
2006  * @level:
2007  *
2008  * The tran_reset() entry point for a SCSI HBA driver is called to reset either
2009  * the SCSI bus or a particular SCSI target device. This entry point is called
2010  * when a target driver calls scsi_reset(). The tran_reset() entry point must
2011  * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the
2012  * particular target or logical unit must be reset.
2013  */
2014 /*ARGSUSED*/
2015 static int
2016 mrsas_tran_reset(struct scsi_address *ap, int level)
2017 {
2018         struct mrsas_instance *instance = ADDR2MR(ap);
2019 
2020         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2021 
2022         if (wait_for_outstanding(instance)) {
2023                 con_log(CL_ANN1,
2024                     (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
2025                 return (DDI_FAILURE);
2026         } else {
2027                 return (DDI_SUCCESS);
2028         }
2029 }
2030 
2031 /*
2032  * tran_getcap - get one of a set of SCSA-defined capabilities
2033  * @ap:
2034  * @cap:
2035  * @whom:
2036  *
2037  * The target driver can request the current setting of the capability for a
2038  * particular target by setting the whom parameter to nonzero. A whom value of
2039  * zero indicates a request for the current setting of the general capability
2040  * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1
2041  * for undefined capabilities or the current value of the requested capability.
2042  */
2043 /*ARGSUSED*/
2044 static int
2045 mrsas_tran_getcap(struct scsi_address *ap, char *cap, int whom)
2046 {
2047         int     rval = 0;
2048 
2049         struct mrsas_instance   *instance = ADDR2MR(ap);
2050 
2051         con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2052 
2053         /* we do allow inquiring about capabilities for other targets */
2054         if (cap == NULL) {
2055                 return (-1);
2056         }
2057 
2058         switch (scsi_hba_lookup_capstr(cap)) {
2059         case SCSI_CAP_DMA_MAX:
2060                 if (instance->tbolt) {
2061                         /* Limit to 256k max transfer */
2062                         rval = mrsas_tbolt_max_cap_maxxfer;
2063                 } else {
2064                         /* Limit to 16MB max transfer */
2065                         rval = mrsas_max_cap_maxxfer;
2066                 }
2067                 break;
2068         case SCSI_CAP_MSG_OUT:
2069                 rval = 1;
2070                 break;
2071         case SCSI_CAP_DISCONNECT:
2072                 rval = 0;
2073                 break;
2074         case SCSI_CAP_SYNCHRONOUS:
2075                 rval = 0;
2076                 break;
2077         case SCSI_CAP_WIDE_XFER:
2078                 rval = 1;
2079                 break;
2080         case SCSI_CAP_TAGGED_QING:
2081                 rval = 1;
2082                 break;
2083         case SCSI_CAP_UNTAGGED_QING:
2084                 rval = 1;
2085                 break;
2086         case SCSI_CAP_PARITY:
2087                 rval = 1;
2088                 break;
2089         case SCSI_CAP_INITIATOR_ID:
2090                 rval = instance->init_id;
2091                 break;
2092         case SCSI_CAP_ARQ:
2093                 rval = 1;
2094                 break;
2095         case SCSI_CAP_LINKED_CMDS:
2096                 rval = 0;
2097                 break;
2098         case SCSI_CAP_RESET_NOTIFICATION:
2099                 rval = 1;
2100                 break;
2101         case SCSI_CAP_GEOMETRY:
2102                 rval = -1;
2103 
2104                 break;
2105         default:
2106                 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x",
2107                     scsi_hba_lookup_capstr(cap)));
2108                 rval = -1;
2109                 break;
2110         }
2111 
2112         return (rval);
2113 }
2114 
2115 /*
2116  * tran_setcap - set one of a set of SCSA-defined capabilities
2117  * @ap:
2118  * @cap:
2119  * @value:
2120  * @whom:
2121  *
2122  * The target driver might request that the new value be set for a particular
2123  * target by setting the whom parameter to nonzero. A whom value of zero
2124  * means that request is to set the new value for the SCSI bus or for adapter
2125  * hardware in general.
2126  * The tran_setcap() should return the following values as appropriate:
2127  * - -1 for undefined capabilities
2128  * - 0 if the HBA driver cannot set the capability to the requested value
2129  * - 1 if the HBA driver is able to set the capability to the requested value
2130  */
2131 /*ARGSUSED*/
2132 static int
2133 mrsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
2134 {
2135         int             rval = 1;
2136 
2137         con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2138 
2139         /* We don't allow setting capabilities for other targets */
2140         if (cap == NULL || whom == 0) {
2141                 return (-1);
2142         }
2143 
2144         switch (scsi_hba_lookup_capstr(cap)) {
2145                 case SCSI_CAP_DMA_MAX:
2146                 case SCSI_CAP_MSG_OUT:
2147                 case SCSI_CAP_PARITY:
2148                 case SCSI_CAP_LINKED_CMDS:
2149                 case SCSI_CAP_RESET_NOTIFICATION:
2150                 case SCSI_CAP_DISCONNECT:
2151                 case SCSI_CAP_SYNCHRONOUS:
2152                 case SCSI_CAP_UNTAGGED_QING:
2153                 case SCSI_CAP_WIDE_XFER:
2154                 case SCSI_CAP_INITIATOR_ID:
2155                 case SCSI_CAP_ARQ:
2156                         /*
2157                          * None of these are settable via
2158                          * the capability interface.
2159                          */
2160                         break;
2161                 case SCSI_CAP_TAGGED_QING:
2162                         rval = 1;
2163                         break;
2164                 case SCSI_CAP_SECTOR_SIZE:
2165                         rval = 1;
2166                         break;
2167 
2168                 case SCSI_CAP_TOTAL_SECTORS:
2169                         rval = 1;
2170                         break;
2171                 default:
2172                         rval = -1;
2173                         break;
2174         }
2175 
2176         return (rval);
2177 }
2178 
2179 /*
2180  * tran_destroy_pkt - deallocate scsi_pkt structure
2181  * @ap:
2182  * @pkt:
2183  *
2184  * The tran_destroy_pkt() entry point is the HBA driver function that
2185  * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is
2186  * called when the target driver calls scsi_destroy_pkt(). The
2187  * tran_destroy_pkt() entry point must free any DMA resources that have been
2188  * allocated for the packet. An implicit DMA synchronization occurs if the
2189  * DMA resources are freed and any cached data remains after the completion
2190  * of the transfer.
2191  */
2192 static void
2193 mrsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2194 {
2195         struct scsa_cmd *acmd = PKT2CMD(pkt);
2196 
2197         con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2198 
2199         if (acmd->cmd_flags & CFLAG_DMAVALID) {
2200                 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2201 
2202                 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2203 
2204                 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2205 
2206                 acmd->cmd_dmahandle = NULL;
2207         }
2208 
2209         /* free the pkt */
2210         scsi_hba_pkt_free(ap, pkt);
2211 }
2212 
2213 /*
2214  * tran_dmafree - deallocates DMA resources
2215  * @ap:
2216  * @pkt:
2217  *
2218  * The tran_dmafree() entry point deallocates DMAQ resources that have been
2219  * allocated for a scsi_pkt structure. The tran_dmafree() entry point is
2220  * called when the target driver calls scsi_dmafree(). The tran_dmafree() must
2221  * free only DMA resources allocated for a scsi_pkt structure, not the
2222  * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is
2223  * implicitly performed.
2224  */
2225 /*ARGSUSED*/
2226 static void
2227 mrsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2228 {
2229         register struct scsa_cmd *acmd = PKT2CMD(pkt);
2230 
2231         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2232 
2233         if (acmd->cmd_flags & CFLAG_DMAVALID) {
2234                 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2235 
2236                 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2237 
2238                 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2239 
2240                 acmd->cmd_dmahandle = NULL;
2241         }
2242 }
2243 
2244 /*
2245  * tran_sync_pkt - synchronize the DMA object allocated
2246  * @ap:
2247  * @pkt:
2248  *
2249  * The tran_sync_pkt() entry point synchronizes the DMA object allocated for
2250  * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt()
2251  * entry point is called when the target driver calls scsi_sync_pkt(). If the
2252  * data transfer direction is a DMA read from device to memory, tran_sync_pkt()
2253  * must synchronize the CPU's view of the data. If the data transfer direction
2254  * is a DMA write from memory to device, tran_sync_pkt() must synchronize the
2255  * device's view of the data.
2256  */
2257 /*ARGSUSED*/
2258 static void
2259 mrsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2260 {
2261         register struct scsa_cmd        *acmd = PKT2CMD(pkt);
2262 
2263         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2264 
2265         if (acmd->cmd_flags & CFLAG_DMAVALID) {
2266                 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset,
2267                     acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ?
2268                     DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
2269         }
2270 }
2271 
2272 /*ARGSUSED*/
2273 static int
2274 mrsas_tran_quiesce(dev_info_t *dip)
2275 {
2276         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2277 
2278         return (1);
2279 }
2280 
2281 /*ARGSUSED*/
2282 static int
2283 mrsas_tran_unquiesce(dev_info_t *dip)
2284 {
2285         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2286 
2287         return (1);
2288 }
2289 
2290 
2291 /*
2292  * mrsas_isr(caddr_t)
2293  *
2294  * The Interrupt Service Routine
2295  *
2296  * Collect status for all completed commands and do callback
2297  *
2298  */
2299 static uint_t
2300 mrsas_isr(struct mrsas_instance *instance)
2301 {
2302         int             need_softintr;
2303         uint32_t        producer;
2304         uint32_t        consumer;
2305         uint32_t        context;
2306         int             retval;
2307 
2308         struct mrsas_cmd        *cmd;
2309         struct mrsas_header     *hdr;
2310         struct scsi_pkt         *pkt;
2311 
2312         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2313         ASSERT(instance);
2314         if (instance->tbolt) {
2315                 mutex_enter(&instance->chip_mtx);
2316                 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2317                     !(instance->func_ptr->intr_ack(instance))) {
2318                         mutex_exit(&instance->chip_mtx);
2319                         return (DDI_INTR_UNCLAIMED);
2320                 }
2321                 retval = mr_sas_tbolt_process_outstanding_cmd(instance);
2322                 mutex_exit(&instance->chip_mtx);
2323                 return (retval);
2324         } else {
2325                 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2326                     !instance->func_ptr->intr_ack(instance)) {
2327                         return (DDI_INTR_UNCLAIMED);
2328                 }
2329         }
2330 
2331         (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2332             0, 0, DDI_DMA_SYNC_FORCPU);
2333 
2334         if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2335             != DDI_SUCCESS) {
2336                 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2337                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2338                 con_log(CL_ANN1, (CE_WARN,
2339                     "mr_sas_isr(): FMA check, returning DDI_INTR_UNCLAIMED"));
2340                 return (DDI_INTR_CLAIMED);
2341         }
2342         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2343 
2344 #ifdef OCRDEBUG
2345         if (debug_consecutive_timeout_after_ocr_g == 1) {
2346                 con_log(CL_ANN1, (CE_NOTE,
2347                     "simulating consecutive timeout after ocr"));
2348                 return (DDI_INTR_CLAIMED);
2349         }
2350 #endif
2351 
2352         mutex_enter(&instance->completed_pool_mtx);
2353         mutex_enter(&instance->cmd_pend_mtx);
2354 
2355         producer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2356             instance->producer);
2357         consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2358             instance->consumer);
2359 
2360         con_log(CL_ANN, (CE_CONT, " producer %x consumer %x ",
2361             producer, consumer));
2362         if (producer == consumer) {
2363                 con_log(CL_ANN, (CE_WARN, "producer ==  consumer case"));
2364                 DTRACE_PROBE2(isr_pc_err, uint32_t, producer,
2365                     uint32_t, consumer);
2366                 mutex_exit(&instance->cmd_pend_mtx);
2367                 mutex_exit(&instance->completed_pool_mtx);
2368                 return (DDI_INTR_CLAIMED);
2369         }
2370 
2371         while (consumer != producer) {
2372                 context = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2373                     &instance->reply_queue[consumer]);
2374                 cmd = instance->cmd_list[context];
2375 
2376                 if (cmd->sync_cmd == MRSAS_TRUE) {
2377                         hdr = (struct mrsas_header *)&cmd->frame->hdr;
2378                         if (hdr) {
2379                                 mlist_del_init(&cmd->list);
2380                         }
2381                 } else {
2382                         pkt = cmd->pkt;
2383                         if (pkt) {
2384                                 mlist_del_init(&cmd->list);
2385                         }
2386                 }
2387 
2388                 mlist_add_tail(&cmd->list, &instance->completed_pool_list);
2389 
2390                 consumer++;
2391                 if (consumer == (instance->max_fw_cmds + 1)) {
2392                         consumer = 0;
2393                 }
2394         }
2395         ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
2396             instance->consumer, consumer);
2397         mutex_exit(&instance->cmd_pend_mtx);
2398         mutex_exit(&instance->completed_pool_mtx);
2399 
2400         (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2401             0, 0, DDI_DMA_SYNC_FORDEV);
2402 
2403         if (instance->softint_running) {
2404                 need_softintr = 0;
2405         } else {
2406                 need_softintr = 1;
2407         }
2408 
2409         if (instance->isr_level == HIGH_LEVEL_INTR) {
2410                 if (need_softintr) {
2411                         ddi_trigger_softintr(instance->soft_intr_id);
2412                 }
2413         } else {
2414                 /*
2415                  * Not a high-level interrupt, therefore call the soft level
2416                  * interrupt explicitly
2417                  */
2418                 (void) mrsas_softintr(instance);
2419         }
2420 
2421         return (DDI_INTR_CLAIMED);
2422 }
2423 
2424 
2425 /*
2426  * ************************************************************************** *
2427  *                                                                            *
2428  *                                  libraries                                 *
2429  *                                                                            *
2430  * ************************************************************************** *
2431  */
2432 /*
2433  * get_mfi_pkt : Get a command from the free pool
2434  * After successful allocation, the caller of this routine
2435  * must clear the frame buffer (memset to zero) before
2436  * using the packet further.
2437  *
2438  * ***** Note *****
2439  * After clearing the frame buffer the context id of the
2440  * frame buffer SHOULD be restored back.
2441  */
2442 struct mrsas_cmd *
2443 mrsas_get_mfi_pkt(struct mrsas_instance *instance)
2444 {
2445         mlist_t                 *head = &instance->cmd_pool_list;
2446         struct mrsas_cmd        *cmd = NULL;
2447 
2448         mutex_enter(&instance->cmd_pool_mtx);
2449 
2450         if (!mlist_empty(head)) {
2451                 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2452                 mlist_del_init(head->next);
2453         }
2454         if (cmd != NULL) {
2455                 cmd->pkt = NULL;
2456                 cmd->retry_count_for_ocr = 0;
2457                 cmd->drv_pkt_time = 0;
2458 
2459         }
2460         mutex_exit(&instance->cmd_pool_mtx);
2461 
2462         return (cmd);
2463 }
2464 
2465 static struct mrsas_cmd *
2466 get_mfi_app_pkt(struct mrsas_instance *instance)
2467 {
2468         mlist_t                         *head = &instance->app_cmd_pool_list;
2469         struct mrsas_cmd        *cmd = NULL;
2470 
2471         mutex_enter(&instance->app_cmd_pool_mtx);
2472 
2473         if (!mlist_empty(head)) {
2474                 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2475                 mlist_del_init(head->next);
2476         }
2477         if (cmd != NULL) {
2478                 cmd->pkt = NULL;
2479                 cmd->retry_count_for_ocr = 0;
2480                 cmd->drv_pkt_time = 0;
2481         }
2482 
2483         mutex_exit(&instance->app_cmd_pool_mtx);
2484 
2485         return (cmd);
2486 }
2487 /*
2488  * return_mfi_pkt : Return a cmd to free command pool
2489  */
2490 void
2491 mrsas_return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2492 {
2493         mutex_enter(&instance->cmd_pool_mtx);
2494         /* use mlist_add_tail for debug assistance */
2495         mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2496 
2497         mutex_exit(&instance->cmd_pool_mtx);
2498 }
2499 
2500 static void
2501 return_mfi_app_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2502 {
2503         mutex_enter(&instance->app_cmd_pool_mtx);
2504 
2505         mlist_add(&cmd->list, &instance->app_cmd_pool_list);
2506 
2507         mutex_exit(&instance->app_cmd_pool_mtx);
2508 }
2509 void
2510 push_pending_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2511 {
2512         struct scsi_pkt *pkt;
2513         struct mrsas_header     *hdr;
2514         con_log(CL_DLEVEL2, (CE_NOTE, "push_pending_pkt(): Called\n"));
2515         mutex_enter(&instance->cmd_pend_mtx);
2516         mlist_del_init(&cmd->list);
2517         mlist_add_tail(&cmd->list, &instance->cmd_pend_list);
2518         if (cmd->sync_cmd == MRSAS_TRUE) {
2519                 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2520                 if (hdr) {
2521                         con_log(CL_ANN1, (CE_CONT,
2522                             "push_pending_mfi_pkt: "
2523                             "cmd %p index %x "
2524                             "time %llx",
2525                             (void *)cmd, cmd->index,
2526                             gethrtime()));
2527                         /* Wait for specified interval  */
2528                         cmd->drv_pkt_time = ddi_get16(
2529                             cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2530                         if (cmd->drv_pkt_time < debug_timeout_g)
2531                                 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2532                                 con_log(CL_ANN1, (CE_CONT,
2533                                     "push_pending_pkt(): "
2534                                     "Called IO Timeout Value %x\n",
2535                                     cmd->drv_pkt_time));
2536                 }
2537                 if (hdr && instance->timeout_id == (timeout_id_t)-1) {
2538                         instance->timeout_id = timeout(io_timeout_checker,
2539                             (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2540                 }
2541         } else {
2542                 pkt = cmd->pkt;
2543                 if (pkt) {
2544                         con_log(CL_ANN1, (CE_CONT,
2545                             "push_pending_mfi_pkt: "
2546                             "cmd %p index %x pkt %p, "
2547                             "time %llx",
2548                             (void *)cmd, cmd->index, (void *)pkt,
2549                             gethrtime()));
2550                         cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2551                 }
2552                 if (pkt && instance->timeout_id == (timeout_id_t)-1) {
2553                         instance->timeout_id = timeout(io_timeout_checker,
2554                             (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2555                 }
2556         }
2557 
2558         mutex_exit(&instance->cmd_pend_mtx);
2559 
2560 }
2561 
2562 int
2563 mrsas_print_pending_cmds(struct mrsas_instance *instance)
2564 {
2565         mlist_t *head = &instance->cmd_pend_list;
2566         mlist_t *tmp = head;
2567         struct mrsas_cmd *cmd = NULL;
2568         struct mrsas_header     *hdr;
2569         unsigned int            flag = 1;
2570         struct scsi_pkt *pkt;
2571         int saved_level;
2572         int cmd_count = 0;
2573 
2574         saved_level = debug_level_g;
2575         debug_level_g = CL_ANN1;
2576 
2577         dev_err(instance->dip, CE_NOTE,
2578             "mrsas_print_pending_cmds(): Called");
2579 
2580         while (flag) {
2581                 mutex_enter(&instance->cmd_pend_mtx);
2582                 tmp     =       tmp->next;
2583                 if (tmp == head) {
2584                         mutex_exit(&instance->cmd_pend_mtx);
2585                         flag = 0;
2586                         con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds():"
2587                             " NO MORE CMDS PENDING....\n"));
2588                         break;
2589                 } else {
2590                         cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2591                         mutex_exit(&instance->cmd_pend_mtx);
2592                         if (cmd) {
2593                                 if (cmd->sync_cmd == MRSAS_TRUE) {
2594                                         hdr = (struct mrsas_header *)
2595                                             &cmd->frame->hdr;
2596                                         if (hdr) {
2597                                                 con_log(CL_ANN1, (CE_CONT,
2598                                                     "print: cmd %p index 0x%x "
2599                                                     "drv_pkt_time 0x%x (NO-PKT)"
2600                                                     " hdr %p\n", (void *)cmd,
2601                                                     cmd->index,
2602                                                     cmd->drv_pkt_time,
2603                                                     (void *)hdr));
2604                                         }
2605                                 } else {
2606                                         pkt = cmd->pkt;
2607                                         if (pkt) {
2608                                         con_log(CL_ANN1, (CE_CONT,
2609                                             "print: cmd %p index 0x%x "
2610                                             "drv_pkt_time 0x%x pkt %p \n",
2611                                             (void *)cmd, cmd->index,
2612                                             cmd->drv_pkt_time, (void *)pkt));
2613                                         }
2614                                 }
2615 
2616                                 if (++cmd_count == 1) {
2617                                         mrsas_print_cmd_details(instance, cmd,
2618                                             0xDD);
2619                                 } else {
2620                                         mrsas_print_cmd_details(instance, cmd,
2621                                             1);
2622                                 }
2623 
2624                         }
2625                 }
2626         }
2627         con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds(): Done\n"));
2628 
2629 
2630         debug_level_g = saved_level;
2631 
2632         return (DDI_SUCCESS);
2633 }
2634 
2635 
2636 int
2637 mrsas_complete_pending_cmds(struct mrsas_instance *instance)
2638 {
2639 
2640         struct mrsas_cmd *cmd = NULL;
2641         struct scsi_pkt *pkt;
2642         struct mrsas_header *hdr;
2643 
2644         struct mlist_head               *pos, *next;
2645 
2646         con_log(CL_ANN1, (CE_NOTE,
2647             "mrsas_complete_pending_cmds(): Called"));
2648 
2649         mutex_enter(&instance->cmd_pend_mtx);
2650         mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
2651                 cmd = mlist_entry(pos, struct mrsas_cmd, list);
2652                 if (cmd) {
2653                         pkt = cmd->pkt;
2654                         if (pkt) { /* for IO */
2655                                 if (((pkt->pkt_flags & FLAG_NOINTR)
2656                                     == 0) && pkt->pkt_comp) {
2657                                         pkt->pkt_reason
2658                                             = CMD_DEV_GONE;
2659                                         pkt->pkt_statistics
2660                                             = STAT_DISCON;
2661                                         con_log(CL_ANN1, (CE_CONT,
2662                                             "fail and posting to scsa "
2663                                             "cmd %p index %x"
2664                                             " pkt %p "
2665                                             "time : %llx",
2666                                             (void *)cmd, cmd->index,
2667                                             (void *)pkt, gethrtime()));
2668                                         (*pkt->pkt_comp)(pkt);
2669                                 }
2670                         } else { /* for DCMDS */
2671                                 if (cmd->sync_cmd == MRSAS_TRUE) {
2672                                 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2673                                 con_log(CL_ANN1, (CE_CONT,
2674                                     "posting invalid status to application "
2675                                     "cmd %p index %x"
2676                                     " hdr %p "
2677                                     "time : %llx",
2678                                     (void *)cmd, cmd->index,
2679                                     (void *)hdr, gethrtime()));
2680                                 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2681                                 complete_cmd_in_sync_mode(instance, cmd);
2682                                 }
2683                         }
2684                         mlist_del_init(&cmd->list);
2685                 } else {
2686                         con_log(CL_ANN1, (CE_CONT,
2687                             "mrsas_complete_pending_cmds:"
2688                             "NULL command\n"));
2689                 }
2690                 con_log(CL_ANN1, (CE_CONT,
2691                     "mrsas_complete_pending_cmds:"
2692                     "looping for more commands\n"));
2693         }
2694         mutex_exit(&instance->cmd_pend_mtx);
2695 
2696         con_log(CL_ANN1, (CE_CONT, "mrsas_complete_pending_cmds(): DONE\n"));
2697         return (DDI_SUCCESS);
2698 }
2699 
2700 void
2701 mrsas_print_cmd_details(struct mrsas_instance *instance, struct mrsas_cmd *cmd,
2702     int detail)
2703 {
2704         struct scsi_pkt *pkt = cmd->pkt;
2705         Mpi2RaidSCSIIORequest_t *scsi_io = cmd->scsi_io_request;
2706         int i;
2707         int saved_level;
2708         ddi_acc_handle_t acc_handle =
2709             instance->mpi2_frame_pool_dma_obj.acc_handle;
2710 
2711         if (detail == 0xDD) {
2712                 saved_level = debug_level_g;
2713                 debug_level_g = CL_ANN1;
2714         }
2715 
2716 
2717         if (instance->tbolt) {
2718                 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p "
2719                     "cmd->index 0x%x SMID 0x%x timer 0x%x sec\n",
2720                     (void *)cmd, cmd->index, cmd->SMID, cmd->drv_pkt_time));
2721         } else {
2722                 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p "
2723                     "cmd->index 0x%x timer 0x%x sec\n",
2724                     (void *)cmd, cmd->index, cmd->drv_pkt_time));
2725         }
2726 
2727         if (pkt) {
2728                 con_log(CL_ANN1, (CE_CONT, "scsi_pkt CDB[0]=0x%x",
2729                     pkt->pkt_cdbp[0]));
2730         } else {
2731                 con_log(CL_ANN1, (CE_CONT, "NO-PKT"));
2732         }
2733 
2734         if ((detail == 0xDD) && instance->tbolt) {
2735                 con_log(CL_ANN1, (CE_CONT, "RAID_SCSI_IO_REQUEST\n"));
2736                 con_log(CL_ANN1, (CE_CONT, "DevHandle=0x%X Function=0x%X "
2737                     "IoFlags=0x%X SGLFlags=0x%X DataLength=0x%X\n",
2738                     ddi_get16(acc_handle, &scsi_io->DevHandle),
2739                     ddi_get8(acc_handle, &scsi_io->Function),
2740                     ddi_get16(acc_handle, &scsi_io->IoFlags),
2741                     ddi_get16(acc_handle, &scsi_io->SGLFlags),
2742                     ddi_get32(acc_handle, &scsi_io->DataLength)));
2743 
2744                 for (i = 0; i < 32; i++) {
2745                         con_log(CL_ANN1, (CE_CONT, "CDB[%d]=0x%x ", i,
2746                             ddi_get8(acc_handle, &scsi_io->CDB.CDB32[i])));
2747                 }
2748 
2749                 con_log(CL_ANN1, (CE_CONT, "RAID-CONTEXT\n"));
2750                 con_log(CL_ANN1, (CE_CONT, "status=0x%X extStatus=0x%X "
2751                     "ldTargetId=0x%X timeoutValue=0x%X regLockFlags=0x%X "
2752                     "RAIDFlags=0x%X regLockRowLBA=0x%" PRIu64
2753                     " regLockLength=0x%X spanArm=0x%X\n",
2754                     ddi_get8(acc_handle, &scsi_io->RaidContext.status),
2755                     ddi_get8(acc_handle, &scsi_io->RaidContext.extStatus),
2756                     ddi_get16(acc_handle, &scsi_io->RaidContext.ldTargetId),
2757                     ddi_get16(acc_handle, &scsi_io->RaidContext.timeoutValue),
2758                     ddi_get8(acc_handle, &scsi_io->RaidContext.regLockFlags),
2759                     ddi_get8(acc_handle, &scsi_io->RaidContext.RAIDFlags),
2760                     ddi_get64(acc_handle, &scsi_io->RaidContext.regLockRowLBA),
2761                     ddi_get32(acc_handle, &scsi_io->RaidContext.regLockLength),
2762                     ddi_get8(acc_handle, &scsi_io->RaidContext.spanArm)));
2763         }
2764 
2765         if (detail == 0xDD) {
2766                 debug_level_g = saved_level;
2767         }
2768 }
2769 
2770 
2771 int
2772 mrsas_issue_pending_cmds(struct mrsas_instance *instance)
2773 {
2774         mlist_t *head   =       &instance->cmd_pend_list;
2775         mlist_t *tmp    =       head->next;
2776         struct mrsas_cmd *cmd = NULL;
2777         struct scsi_pkt *pkt;
2778 
2779         con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_pending_cmds(): Called"));
2780         while (tmp != head) {
2781                 mutex_enter(&instance->cmd_pend_mtx);
2782                 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2783                 tmp = tmp->next;
2784                 mutex_exit(&instance->cmd_pend_mtx);
2785                 if (cmd) {
2786                         con_log(CL_ANN1, (CE_CONT,
2787                             "mrsas_issue_pending_cmds(): "
2788                             "Got a cmd: cmd %p index 0x%x drv_pkt_time 0x%x ",
2789                             (void *)cmd, cmd->index, cmd->drv_pkt_time));
2790 
2791                         /* Reset command timeout value */
2792                         if (cmd->drv_pkt_time < debug_timeout_g)
2793                                 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2794 
2795                         cmd->retry_count_for_ocr++;
2796 
2797                         dev_err(instance->dip, CE_CONT,
2798                             "cmd retry count = %d\n",
2799                             cmd->retry_count_for_ocr);
2800 
2801                         if (cmd->retry_count_for_ocr > IO_RETRY_COUNT) {
2802                                 dev_err(instance->dip,
2803                                     CE_WARN, "mrsas_issue_pending_cmds(): "
2804                                     "cmd->retry_count exceeded limit >%d\n",
2805                                     IO_RETRY_COUNT);
2806                                 mrsas_print_cmd_details(instance, cmd, 0xDD);
2807 
2808                                 dev_err(instance->dip, CE_WARN,
2809                                     "mrsas_issue_pending_cmds():"
2810                                     "Calling KILL Adapter");
2811                                 if (instance->tbolt)
2812                                         mrsas_tbolt_kill_adapter(instance);
2813                                 else
2814                                         (void) mrsas_kill_adapter(instance);
2815                                 return (DDI_FAILURE);
2816                         }
2817 
2818                         pkt = cmd->pkt;
2819                         if (pkt) {
2820                                 con_log(CL_ANN1, (CE_CONT,
2821                                     "PENDING PKT-CMD ISSUE: cmd %p index %x "
2822                                     "pkt %p time %llx",
2823                                     (void *)cmd, cmd->index,
2824                                     (void *)pkt,
2825                                     gethrtime()));
2826 
2827                         } else {
2828                                 dev_err(instance->dip, CE_CONT,
2829                                     "mrsas_issue_pending_cmds(): NO-PKT, "
2830                                     "cmd %p index 0x%x drv_pkt_time 0x%x",
2831                                     (void *)cmd, cmd->index, cmd->drv_pkt_time);
2832                         }
2833 
2834 
2835                         if (cmd->sync_cmd == MRSAS_TRUE) {
2836                                 dev_err(instance->dip, CE_CONT,
2837                                     "mrsas_issue_pending_cmds(): "
2838                                     "SYNC_CMD == TRUE \n");
2839                                 instance->func_ptr->issue_cmd_in_sync_mode(
2840                                     instance, cmd);
2841                         } else {
2842                                 instance->func_ptr->issue_cmd(cmd, instance);
2843                         }
2844                 } else {
2845                         con_log(CL_ANN1, (CE_CONT,
2846                             "mrsas_issue_pending_cmds: NULL command\n"));
2847                 }
2848                 con_log(CL_ANN1, (CE_CONT,
2849                     "mrsas_issue_pending_cmds:"
2850                     "looping for more commands"));
2851         }
2852         con_log(CL_ANN1, (CE_CONT, "mrsas_issue_pending_cmds(): DONE\n"));
2853         return (DDI_SUCCESS);
2854 }
2855 
2856 
2857 
2858 /*
2859  * destroy_mfi_frame_pool
2860  */
2861 void
2862 destroy_mfi_frame_pool(struct mrsas_instance *instance)
2863 {
2864         int             i;
2865         uint32_t        max_cmd = instance->max_fw_cmds;
2866 
2867         struct mrsas_cmd        *cmd;
2868 
2869         /* return all frames to pool */
2870 
2871         for (i = 0; i < max_cmd; i++) {
2872 
2873                 cmd = instance->cmd_list[i];
2874 
2875                 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
2876                         (void) mrsas_free_dma_obj(instance, cmd->frame_dma_obj);
2877 
2878                 cmd->frame_dma_obj_status  = DMA_OBJ_FREED;
2879         }
2880 
2881 }
2882 
2883 /*
2884  * create_mfi_frame_pool
2885  */
2886 int
2887 create_mfi_frame_pool(struct mrsas_instance *instance)
2888 {
2889         int             i = 0;
2890         int             cookie_cnt;
2891         uint16_t        max_cmd;
2892         uint16_t        sge_sz;
2893         uint32_t        sgl_sz;
2894         uint32_t        tot_frame_size;
2895         struct mrsas_cmd        *cmd;
2896         int                     retval = DDI_SUCCESS;
2897 
2898         max_cmd = instance->max_fw_cmds;
2899         sge_sz  = sizeof (struct mrsas_sge_ieee);
2900         /* calculated the number of 64byte frames required for SGL */
2901         sgl_sz          = sge_sz * instance->max_num_sge;
2902         tot_frame_size  = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH;
2903 
2904         con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: "
2905             "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size));
2906 
2907         while (i < max_cmd) {
2908                 cmd = instance->cmd_list[i];
2909 
2910                 cmd->frame_dma_obj.size      = tot_frame_size;
2911                 cmd->frame_dma_obj.dma_attr = mrsas_generic_dma_attr;
2912                 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2913                 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
2914                 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1;
2915                 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64;
2916 
2917                 cookie_cnt = mrsas_alloc_dma_obj(instance, &cmd->frame_dma_obj,
2918                     (uchar_t)DDI_STRUCTURE_LE_ACC);
2919 
2920                 if (cookie_cnt == -1 || cookie_cnt > 1) {
2921                         dev_err(instance->dip, CE_WARN,
2922                             "create_mfi_frame_pool: could not alloc.");
2923                         retval = DDI_FAILURE;
2924                         goto mrsas_undo_frame_pool;
2925                 }
2926 
2927                 bzero(cmd->frame_dma_obj.buffer, tot_frame_size);
2928 
2929                 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED;
2930                 cmd->frame = (union mrsas_frame *)cmd->frame_dma_obj.buffer;
2931                 cmd->frame_phys_addr =
2932                     cmd->frame_dma_obj.dma_cookie[0].dmac_address;
2933 
2934                 cmd->sense = (uint8_t *)(((unsigned long)
2935                     cmd->frame_dma_obj.buffer) +
2936                     tot_frame_size - SENSE_LENGTH);
2937                 cmd->sense_phys_addr =
2938                     cmd->frame_dma_obj.dma_cookie[0].dmac_address +
2939                     tot_frame_size - SENSE_LENGTH;
2940 
2941                 if (!cmd->frame || !cmd->sense) {
2942                         dev_err(instance->dip, CE_WARN,
2943                             "pci_pool_alloc failed");
2944                         retval = ENOMEM;
2945                         goto mrsas_undo_frame_pool;
2946                 }
2947 
2948                 ddi_put32(cmd->frame_dma_obj.acc_handle,
2949                     &cmd->frame->io.context, cmd->index);
2950                 i++;
2951 
2952                 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x",
2953                     cmd->index, cmd->frame_phys_addr));
2954         }
2955 
2956         return (DDI_SUCCESS);
2957 
2958 mrsas_undo_frame_pool:
2959         if (i > 0)
2960                 destroy_mfi_frame_pool(instance);
2961 
2962         return (retval);
2963 }
2964 
2965 /*
2966  * free_additional_dma_buffer
2967  */
2968 static void
2969 free_additional_dma_buffer(struct mrsas_instance *instance)
2970 {
2971         if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
2972                 (void) mrsas_free_dma_obj(instance,
2973                     instance->mfi_internal_dma_obj);
2974                 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
2975         }
2976 
2977         if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
2978                 (void) mrsas_free_dma_obj(instance,
2979                     instance->mfi_evt_detail_obj);
2980                 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
2981         }
2982 }
2983 
2984 /*
2985  * alloc_additional_dma_buffer
2986  */
2987 static int
2988 alloc_additional_dma_buffer(struct mrsas_instance *instance)
2989 {
2990         uint32_t        reply_q_sz;
2991         uint32_t        internal_buf_size = PAGESIZE*2;
2992 
2993         /* max cmds plus 1 + producer & consumer */
2994         reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2);
2995 
2996         instance->mfi_internal_dma_obj.size = internal_buf_size;
2997         instance->mfi_internal_dma_obj.dma_attr      = mrsas_generic_dma_attr;
2998         instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2999         instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
3000             0xFFFFFFFFU;
3001         instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen      = 1;
3002 
3003         if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
3004             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3005                 dev_err(instance->dip, CE_WARN,
3006                     "could not alloc reply queue");
3007                 return (DDI_FAILURE);
3008         }
3009 
3010         bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
3011 
3012         instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
3013 
3014         instance->producer = (uint32_t *)((unsigned long)
3015             instance->mfi_internal_dma_obj.buffer);
3016         instance->consumer = (uint32_t *)((unsigned long)
3017             instance->mfi_internal_dma_obj.buffer + 4);
3018         instance->reply_queue = (uint32_t *)((unsigned long)
3019             instance->mfi_internal_dma_obj.buffer + 8);
3020         instance->internal_buf = (caddr_t)(((unsigned long)
3021             instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8);
3022         instance->internal_buf_dmac_add =
3023             instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address +
3024             (reply_q_sz + 8);
3025         instance->internal_buf_size = internal_buf_size -
3026             (reply_q_sz + 8);
3027 
3028         /* allocate evt_detail */
3029         instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
3030         instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
3031         instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3032         instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3033         instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
3034         instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1;
3035 
3036         if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
3037             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3038                 dev_err(instance->dip, CE_WARN, "alloc_additional_dma_buffer: "
3039                     "could not allocate data transfer buffer.");
3040                 goto mrsas_undo_internal_buff;
3041         }
3042 
3043         bzero(instance->mfi_evt_detail_obj.buffer,
3044             sizeof (struct mrsas_evt_detail));
3045 
3046         instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
3047 
3048         return (DDI_SUCCESS);
3049 
3050 mrsas_undo_internal_buff:
3051         if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
3052                 (void) mrsas_free_dma_obj(instance,
3053                     instance->mfi_internal_dma_obj);
3054                 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
3055         }
3056 
3057         return (DDI_FAILURE);
3058 }
3059 
3060 
3061 void
3062 mrsas_free_cmd_pool(struct mrsas_instance *instance)
3063 {
3064         int             i;
3065         uint32_t        max_cmd;
3066         size_t          sz;
3067 
3068         /* already freed */
3069         if (instance->cmd_list == NULL) {
3070                 return;
3071         }
3072 
3073         max_cmd = instance->max_fw_cmds;
3074 
3075         /* size of cmd_list array */
3076         sz = sizeof (struct mrsas_cmd *) * max_cmd;
3077 
3078         /* First free each cmd */
3079         for (i = 0; i < max_cmd; i++) {
3080                 if (instance->cmd_list[i] != NULL) {
3081                         kmem_free(instance->cmd_list[i],
3082                             sizeof (struct mrsas_cmd));
3083                 }
3084 
3085                 instance->cmd_list[i] = NULL;
3086         }
3087 
3088         /* Now, free cmd_list array */
3089         if (instance->cmd_list != NULL)
3090                 kmem_free(instance->cmd_list, sz);
3091 
3092         instance->cmd_list = NULL;
3093 
3094         INIT_LIST_HEAD(&instance->cmd_pool_list);
3095         INIT_LIST_HEAD(&instance->cmd_pend_list);
3096         if (instance->tbolt) {
3097                 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
3098         } else {
3099                 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3100         }
3101 
3102 }
3103 
3104 
3105 /*
3106  * mrsas_alloc_cmd_pool
3107  */
3108 int
3109 mrsas_alloc_cmd_pool(struct mrsas_instance *instance)
3110 {
3111         int             i;
3112         int             count;
3113         uint32_t        max_cmd;
3114         uint32_t        reserve_cmd;
3115         size_t          sz;
3116 
3117         struct mrsas_cmd        *cmd;
3118 
3119         max_cmd = instance->max_fw_cmds;
3120         con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
3121             "max_cmd %x", max_cmd));
3122 
3123 
3124         sz = sizeof (struct mrsas_cmd *) * max_cmd;
3125 
3126         /*
3127          * instance->cmd_list is an array of struct mrsas_cmd pointers.
3128          * Allocate the dynamic array first and then allocate individual
3129          * commands.
3130          */
3131         instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
3132         ASSERT(instance->cmd_list);
3133 
3134         /* create a frame pool and assign one frame to each cmd */
3135         for (count = 0; count < max_cmd; count++) {
3136                 instance->cmd_list[count] =
3137                     kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
3138                 ASSERT(instance->cmd_list[count]);
3139         }
3140 
3141         /* add all the commands to command pool */
3142 
3143         INIT_LIST_HEAD(&instance->cmd_pool_list);
3144         INIT_LIST_HEAD(&instance->cmd_pend_list);
3145         INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3146 
3147         /*
3148          * When max_cmd is lower than MRSAS_APP_RESERVED_CMDS, how do I split
3149          * into app_cmd and regular cmd?  For now, just take
3150          * max(1/8th of max, 4);
3151          */
3152         reserve_cmd = min(MRSAS_APP_RESERVED_CMDS,
3153             max(max_cmd >> 3, MRSAS_APP_MIN_RESERVED_CMDS));
3154 
3155         for (i = 0; i < reserve_cmd; i++) {
3156                 cmd = instance->cmd_list[i];
3157                 cmd->index = i;
3158                 mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list);
3159         }
3160 
3161 
3162         for (i = reserve_cmd; i < max_cmd; i++) {
3163                 cmd = instance->cmd_list[i];
3164                 cmd->index = i;
3165                 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
3166         }
3167 
3168         return (DDI_SUCCESS);
3169 
3170 mrsas_undo_cmds:
3171         if (count > 0) {
3172                 /* free each cmd */
3173                 for (i = 0; i < count; i++) {
3174                         if (instance->cmd_list[i] != NULL) {
3175                                 kmem_free(instance->cmd_list[i],
3176                                     sizeof (struct mrsas_cmd));
3177                         }
3178                         instance->cmd_list[i] = NULL;
3179                 }
3180         }
3181 
3182 mrsas_undo_cmd_list:
3183         if (instance->cmd_list != NULL)
3184                 kmem_free(instance->cmd_list, sz);
3185         instance->cmd_list = NULL;
3186 
3187         return (DDI_FAILURE);
3188 }
3189 
3190 
3191 /*
3192  * free_space_for_mfi
3193  */
3194 static void
3195 free_space_for_mfi(struct mrsas_instance *instance)
3196 {
3197 
3198         /* already freed */
3199         if (instance->cmd_list == NULL) {
3200                 return;
3201         }
3202 
3203         /* Free additional dma buffer */
3204         free_additional_dma_buffer(instance);
3205 
3206         /* Free the MFI frame pool */
3207         destroy_mfi_frame_pool(instance);
3208 
3209         /* Free all the commands in the cmd_list */
3210         /* Free the cmd_list buffer itself */
3211         mrsas_free_cmd_pool(instance);
3212 }
3213 
3214 /*
3215  * alloc_space_for_mfi
3216  */
3217 static int
3218 alloc_space_for_mfi(struct mrsas_instance *instance)
3219 {
3220         /* Allocate command pool (memory for cmd_list & individual commands) */
3221         if (mrsas_alloc_cmd_pool(instance)) {
3222                 dev_err(instance->dip, CE_WARN, "error creating cmd pool");
3223                 return (DDI_FAILURE);
3224         }
3225 
3226         /* Allocate MFI Frame pool */
3227         if (create_mfi_frame_pool(instance)) {
3228                 dev_err(instance->dip, CE_WARN,
3229                     "error creating frame DMA pool");
3230                 goto mfi_undo_cmd_pool;
3231         }
3232 
3233         /* Allocate additional DMA buffer */
3234         if (alloc_additional_dma_buffer(instance)) {
3235                 dev_err(instance->dip, CE_WARN,
3236                     "error creating frame DMA pool");
3237                 goto mfi_undo_frame_pool;
3238         }
3239 
3240         return (DDI_SUCCESS);
3241 
3242 mfi_undo_frame_pool:
3243         destroy_mfi_frame_pool(instance);
3244 
3245 mfi_undo_cmd_pool:
3246         mrsas_free_cmd_pool(instance);
3247 
3248         return (DDI_FAILURE);
3249 }
3250 
3251 
3252 
3253 /*
3254  * get_ctrl_info
3255  */
3256 static int
3257 get_ctrl_info(struct mrsas_instance *instance,
3258     struct mrsas_ctrl_info *ctrl_info)
3259 {
3260         int     ret = 0;
3261 
3262         struct mrsas_cmd                *cmd;
3263         struct mrsas_dcmd_frame *dcmd;
3264         struct mrsas_ctrl_info  *ci;
3265 
3266         if (instance->tbolt) {
3267                 cmd = get_raid_msg_mfi_pkt(instance);
3268         } else {
3269                 cmd = mrsas_get_mfi_pkt(instance);
3270         }
3271 
3272         if (!cmd) {
3273                 con_log(CL_ANN, (CE_WARN,
3274                     "Failed to get a cmd for ctrl info"));
3275                 DTRACE_PROBE2(info_mfi_err, uint16_t, instance->fw_outstanding,
3276                     uint16_t, instance->max_fw_cmds);
3277                 return (DDI_FAILURE);
3278         }
3279 
3280         /* Clear the frame buffer and assign back the context id */
3281         (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3282         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3283             cmd->index);
3284 
3285         dcmd = &cmd->frame->dcmd;
3286 
3287         ci = (struct mrsas_ctrl_info *)instance->internal_buf;
3288 
3289         if (!ci) {
3290                 dev_err(instance->dip, CE_WARN,
3291                     "Failed to alloc mem for ctrl info");
3292                 mrsas_return_mfi_pkt(instance, cmd);
3293                 return (DDI_FAILURE);
3294         }
3295 
3296         (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info));
3297 
3298         /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
3299         (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3300 
3301         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3302         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status,
3303             MFI_CMD_STATUS_POLL_MODE);
3304         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3305         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3306             MFI_FRAME_DIR_READ);
3307         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3308         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3309             sizeof (struct mrsas_ctrl_info));
3310         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3311             MR_DCMD_CTRL_GET_INFO);
3312         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3313             instance->internal_buf_dmac_add);
3314         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3315             sizeof (struct mrsas_ctrl_info));
3316 
3317         cmd->frame_count = 1;
3318 
3319         if (instance->tbolt) {
3320                 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3321         }
3322 
3323         if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3324                 ret = 0;
3325 
3326                 ctrl_info->max_request_size = ddi_get32(
3327                     cmd->frame_dma_obj.acc_handle, &ci->max_request_size);
3328 
3329                 ctrl_info->ld_present_count = ddi_get16(
3330                     cmd->frame_dma_obj.acc_handle, &ci->ld_present_count);
3331 
3332                 ctrl_info->properties.on_off_properties = ddi_get32(
3333                     cmd->frame_dma_obj.acc_handle,
3334                     &ci->properties.on_off_properties);
3335                 ddi_rep_get8(cmd->frame_dma_obj.acc_handle,
3336                     (uint8_t *)(ctrl_info->product_name),
3337                     (uint8_t *)(ci->product_name), 80 * sizeof (char),
3338                     DDI_DEV_AUTOINCR);
3339                 /* should get more members of ci with ddi_get when needed */
3340         } else {
3341                 dev_err(instance->dip, CE_WARN,
3342                     "get_ctrl_info: Ctrl info failed");
3343                 ret = -1;
3344         }
3345 
3346         if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3347                 ret = -1;
3348         }
3349         if (instance->tbolt) {
3350                 return_raid_msg_mfi_pkt(instance, cmd);
3351         } else {
3352                 mrsas_return_mfi_pkt(instance, cmd);
3353         }
3354 
3355         return (ret);
3356 }
3357 
3358 /*
3359  * abort_aen_cmd
3360  */
3361 static int
3362 abort_aen_cmd(struct mrsas_instance *instance,
3363     struct mrsas_cmd *cmd_to_abort)
3364 {
3365         int     ret = 0;
3366 
3367         struct mrsas_cmd                *cmd;
3368         struct mrsas_abort_frame        *abort_fr;
3369 
3370         con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_aen:%d", __LINE__));
3371 
3372         if (instance->tbolt) {
3373                 cmd = get_raid_msg_mfi_pkt(instance);
3374         } else {
3375                 cmd = mrsas_get_mfi_pkt(instance);
3376         }
3377 
3378         if (!cmd) {
3379                 con_log(CL_ANN1, (CE_WARN,
3380                     "abort_aen_cmd():Failed to get a cmd for abort_aen_cmd"));
3381                 DTRACE_PROBE2(abort_mfi_err, uint16_t, instance->fw_outstanding,
3382                     uint16_t, instance->max_fw_cmds);
3383                 return (DDI_FAILURE);
3384         }
3385 
3386         /* Clear the frame buffer and assign back the context id */
3387         (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3388         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3389             cmd->index);
3390 
3391         abort_fr = &cmd->frame->abort;
3392 
3393         /* prepare and issue the abort frame */
3394         ddi_put8(cmd->frame_dma_obj.acc_handle,
3395             &abort_fr->cmd, MFI_CMD_OP_ABORT);
3396         ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3397             MFI_CMD_STATUS_SYNC_MODE);
3398         ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3399         ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3400             cmd_to_abort->index);
3401         ddi_put32(cmd->frame_dma_obj.acc_handle,
3402             &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3403         ddi_put32(cmd->frame_dma_obj.acc_handle,
3404             &abort_fr->abort_mfi_phys_addr_hi, 0);
3405 
3406         instance->aen_cmd->abort_aen = 1;
3407 
3408         cmd->frame_count = 1;
3409 
3410         if (instance->tbolt) {
3411                 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3412         }
3413 
3414         if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3415                 con_log(CL_ANN1, (CE_WARN,
3416                     "abort_aen_cmd: issue_cmd_in_poll_mode failed"));
3417                 ret = -1;
3418         } else {
3419                 ret = 0;
3420         }
3421 
3422         instance->aen_cmd->abort_aen = 1;
3423         instance->aen_cmd = 0;
3424 
3425         if (instance->tbolt) {
3426                 return_raid_msg_mfi_pkt(instance, cmd);
3427         } else {
3428                 mrsas_return_mfi_pkt(instance, cmd);
3429         }
3430 
3431         atomic_add_16(&instance->fw_outstanding, (-1));
3432 
3433         return (ret);
3434 }
3435 
3436 
3437 static int
3438 mrsas_build_init_cmd(struct mrsas_instance *instance,
3439     struct mrsas_cmd **cmd_ptr)
3440 {
3441         struct mrsas_cmd                *cmd;
3442         struct mrsas_init_frame         *init_frame;
3443         struct mrsas_init_queue_info    *initq_info;
3444         struct mrsas_drv_ver            drv_ver_info;
3445 
3446 
3447         /*
3448          * Prepare a init frame. Note the init frame points to queue info
3449          * structure. Each frame has SGL allocated after first 64 bytes. For
3450          * this frame - since we don't need any SGL - we use SGL's space as
3451          * queue info structure
3452          */
3453         cmd = *cmd_ptr;
3454 
3455 
3456         /* Clear the frame buffer and assign back the context id */
3457         (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3458         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3459             cmd->index);
3460 
3461         init_frame = (struct mrsas_init_frame *)cmd->frame;
3462         initq_info = (struct mrsas_init_queue_info *)
3463             ((unsigned long)init_frame + 64);
3464 
3465         (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3466         (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3467 
3468         ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3469 
3470         ddi_put32(cmd->frame_dma_obj.acc_handle,
3471             &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3472 
3473         ddi_put32(cmd->frame_dma_obj.acc_handle,
3474             &initq_info->producer_index_phys_addr_hi, 0);
3475         ddi_put32(cmd->frame_dma_obj.acc_handle,
3476             &initq_info->producer_index_phys_addr_lo,
3477             instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3478 
3479         ddi_put32(cmd->frame_dma_obj.acc_handle,
3480             &initq_info->consumer_index_phys_addr_hi, 0);
3481         ddi_put32(cmd->frame_dma_obj.acc_handle,
3482             &initq_info->consumer_index_phys_addr_lo,
3483             instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3484 
3485         ddi_put32(cmd->frame_dma_obj.acc_handle,
3486             &initq_info->reply_queue_start_phys_addr_hi, 0);
3487         ddi_put32(cmd->frame_dma_obj.acc_handle,
3488             &initq_info->reply_queue_start_phys_addr_lo,
3489             instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3490 
3491         ddi_put8(cmd->frame_dma_obj.acc_handle,
3492             &init_frame->cmd, MFI_CMD_OP_INIT);
3493         ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3494             MFI_CMD_STATUS_POLL_MODE);
3495         ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3496         ddi_put32(cmd->frame_dma_obj.acc_handle,
3497             &init_frame->queue_info_new_phys_addr_lo,
3498             cmd->frame_phys_addr + 64);
3499         ddi_put32(cmd->frame_dma_obj.acc_handle,
3500             &init_frame->queue_info_new_phys_addr_hi, 0);
3501 
3502 
3503         /* fill driver version information */
3504         fill_up_drv_ver(&drv_ver_info);
3505 
3506         /* allocate the driver version data transfer buffer */
3507         instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
3508         instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
3509         instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3510         instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3511         instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
3512         instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
3513 
3514         if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
3515             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3516                 con_log(CL_ANN, (CE_WARN,
3517                     "init_mfi : Could not allocate driver version buffer."));
3518                 return (DDI_FAILURE);
3519         }
3520         /* copy driver version to dma buffer */
3521         (void) memset(instance->drv_ver_dma_obj.buffer, 0,
3522             sizeof (drv_ver_info.drv_ver));
3523         ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
3524             (uint8_t *)drv_ver_info.drv_ver,
3525             (uint8_t *)instance->drv_ver_dma_obj.buffer,
3526             sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
3527 
3528 
3529         /* copy driver version physical address to init frame */
3530         ddi_put64(cmd->frame_dma_obj.acc_handle, &init_frame->driverversion,
3531             instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
3532 
3533         ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3534             sizeof (struct mrsas_init_queue_info));
3535 
3536         cmd->frame_count = 1;
3537 
3538         *cmd_ptr = cmd;
3539 
3540         return (DDI_SUCCESS);
3541 }
3542 
3543 
3544 /*
3545  * mrsas_init_adapter_ppc - Initialize MFI interface adapter.
3546  */
3547 int
3548 mrsas_init_adapter_ppc(struct mrsas_instance *instance)
3549 {
3550         struct mrsas_cmd                *cmd;
3551 
3552         /*
3553          * allocate memory for mfi adapter(cmd pool, individual commands, mfi
3554          * frames etc
3555          */
3556         if (alloc_space_for_mfi(instance) != DDI_SUCCESS) {
3557                 con_log(CL_ANN, (CE_NOTE,
3558                     "Error, failed to allocate memory for MFI adapter"));
3559                 return (DDI_FAILURE);
3560         }
3561 
3562         /* Build INIT command */
3563         cmd = mrsas_get_mfi_pkt(instance);
3564         if (cmd == NULL) {
3565                 DTRACE_PROBE2(init_adapter_mfi_err, uint16_t,
3566                     instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
3567                 return (DDI_FAILURE);
3568         }
3569 
3570         if (mrsas_build_init_cmd(instance, &cmd) != DDI_SUCCESS) {
3571                 con_log(CL_ANN,
3572                     (CE_NOTE, "Error, failed to build INIT command"));
3573 
3574                 goto fail_undo_alloc_mfi_space;
3575         }
3576 
3577         /*
3578          * Disable interrupt before sending init frame ( see linux driver code)
3579          * send INIT MFI frame in polled mode
3580          */
3581         if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3582                 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
3583                 goto fail_fw_init;
3584         }
3585 
3586         if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
3587                 goto fail_fw_init;
3588         mrsas_return_mfi_pkt(instance, cmd);
3589 
3590         if (ctio_enable &&
3591             (instance->func_ptr->read_fw_status_reg(instance) & 0x04000000)) {
3592                 con_log(CL_ANN, (CE_NOTE, "mr_sas: IEEE SGL's supported"));
3593                 instance->flag_ieee = 1;
3594         } else {
3595                 instance->flag_ieee = 0;
3596         }
3597 
3598         ASSERT(!instance->skinny || instance->flag_ieee);
3599 
3600         instance->unroll.alloc_space_mfi = 1;
3601         instance->unroll.verBuff = 1;
3602 
3603         return (DDI_SUCCESS);
3604 
3605 
3606 fail_fw_init:
3607         (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
3608 
3609 fail_undo_alloc_mfi_space:
3610         mrsas_return_mfi_pkt(instance, cmd);
3611         free_space_for_mfi(instance);
3612 
3613         return (DDI_FAILURE);
3614 
3615 }
3616 
3617 /*
3618  * mrsas_init_adapter - Initialize adapter.
3619  */
3620 int
3621 mrsas_init_adapter(struct mrsas_instance *instance)
3622 {
3623         struct mrsas_ctrl_info          ctrl_info;
3624 
3625 
3626         /* we expect the FW state to be READY */
3627         if (mfi_state_transition_to_ready(instance)) {
3628                 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready"));
3629                 return (DDI_FAILURE);
3630         }
3631 
3632         /* get various operational parameters from status register */
3633         instance->max_num_sge =
3634             (instance->func_ptr->read_fw_status_reg(instance) &
3635             0xFF0000) >> 0x10;
3636         instance->max_num_sge =
3637             (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ?
3638             MRSAS_MAX_SGE_CNT : instance->max_num_sge;
3639 
3640         /*
3641          * Reduce the max supported cmds by 1. This is to ensure that the
3642          * reply_q_sz (1 more than the max cmd that driver may send)
3643          * does not exceed max cmds that the FW can support
3644          */
3645         instance->max_fw_cmds =
3646             instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF;
3647         instance->max_fw_cmds = instance->max_fw_cmds - 1;
3648 
3649 
3650 
3651         /* Initialize adapter */
3652         if (instance->func_ptr->init_adapter(instance) != DDI_SUCCESS) {
3653                 con_log(CL_ANN,
3654                     (CE_WARN, "mr_sas: could not initialize adapter"));
3655                 return (DDI_FAILURE);
3656         }
3657 
3658         /* gather misc FW related information */
3659         instance->disable_online_ctrl_reset = 0;
3660 
3661         if (!get_ctrl_info(instance, &ctrl_info)) {
3662                 instance->max_sectors_per_req = ctrl_info.max_request_size;
3663                 con_log(CL_ANN1, (CE_NOTE,
3664                     "product name %s ld present %d",
3665                     ctrl_info.product_name, ctrl_info.ld_present_count));
3666         } else {
3667                 instance->max_sectors_per_req = instance->max_num_sge *
3668                     PAGESIZE / 512;
3669         }
3670 
3671         if (ctrl_info.properties.on_off_properties & DISABLE_OCR_PROP_FLAG)
3672                 instance->disable_online_ctrl_reset = 1;
3673 
3674         return (DDI_SUCCESS);
3675 
3676 }
3677 
3678 
3679 
3680 static int
3681 mrsas_issue_init_mfi(struct mrsas_instance *instance)
3682 {
3683         struct mrsas_cmd                *cmd;
3684         struct mrsas_init_frame         *init_frame;
3685         struct mrsas_init_queue_info    *initq_info;
3686 
3687 /*
3688  * Prepare a init frame. Note the init frame points to queue info
3689  * structure. Each frame has SGL allocated after first 64 bytes. For
3690  * this frame - since we don't need any SGL - we use SGL's space as
3691  * queue info structure
3692  */
3693         con_log(CL_ANN1, (CE_NOTE,
3694             "mrsas_issue_init_mfi: entry\n"));
3695         cmd = get_mfi_app_pkt(instance);
3696 
3697         if (!cmd) {
3698                 con_log(CL_ANN1, (CE_WARN,
3699                     "mrsas_issue_init_mfi: get_pkt failed\n"));
3700                 return (DDI_FAILURE);
3701         }
3702 
3703         /* Clear the frame buffer and assign back the context id */
3704         (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3705         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3706             cmd->index);
3707 
3708         init_frame = (struct mrsas_init_frame *)cmd->frame;
3709         initq_info = (struct mrsas_init_queue_info *)
3710             ((unsigned long)init_frame + 64);
3711 
3712         (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3713         (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3714 
3715         ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3716 
3717         ddi_put32(cmd->frame_dma_obj.acc_handle,
3718             &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3719         ddi_put32(cmd->frame_dma_obj.acc_handle,
3720             &initq_info->producer_index_phys_addr_hi, 0);
3721         ddi_put32(cmd->frame_dma_obj.acc_handle,
3722             &initq_info->producer_index_phys_addr_lo,
3723             instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3724         ddi_put32(cmd->frame_dma_obj.acc_handle,
3725             &initq_info->consumer_index_phys_addr_hi, 0);
3726         ddi_put32(cmd->frame_dma_obj.acc_handle,
3727             &initq_info->consumer_index_phys_addr_lo,
3728             instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3729 
3730         ddi_put32(cmd->frame_dma_obj.acc_handle,
3731             &initq_info->reply_queue_start_phys_addr_hi, 0);
3732         ddi_put32(cmd->frame_dma_obj.acc_handle,
3733             &initq_info->reply_queue_start_phys_addr_lo,
3734             instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3735 
3736         ddi_put8(cmd->frame_dma_obj.acc_handle,
3737             &init_frame->cmd, MFI_CMD_OP_INIT);
3738         ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3739             MFI_CMD_STATUS_POLL_MODE);
3740         ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3741         ddi_put32(cmd->frame_dma_obj.acc_handle,
3742             &init_frame->queue_info_new_phys_addr_lo,
3743             cmd->frame_phys_addr + 64);
3744         ddi_put32(cmd->frame_dma_obj.acc_handle,
3745             &init_frame->queue_info_new_phys_addr_hi, 0);
3746 
3747         ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3748             sizeof (struct mrsas_init_queue_info));
3749 
3750         cmd->frame_count = 1;
3751 
3752         /* issue the init frame in polled mode */
3753         if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3754                 con_log(CL_ANN1, (CE_WARN,
3755                     "mrsas_issue_init_mfi():failed to "
3756                     "init firmware"));
3757                 return_mfi_app_pkt(instance, cmd);
3758                 return (DDI_FAILURE);
3759         }
3760 
3761         if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3762                 return_mfi_app_pkt(instance, cmd);
3763                 return (DDI_FAILURE);
3764         }
3765 
3766         return_mfi_app_pkt(instance, cmd);
3767         con_log(CL_ANN1, (CE_CONT, "mrsas_issue_init_mfi: Done"));
3768 
3769         return (DDI_SUCCESS);
3770 }
3771 /*
3772  * mfi_state_transition_to_ready        : Move the FW to READY state
3773  *
3774  * @reg_set                     : MFI register set
3775  */
3776 int
3777 mfi_state_transition_to_ready(struct mrsas_instance *instance)
3778 {
3779         int             i;
3780         uint8_t         max_wait;
3781         uint32_t        fw_ctrl = 0;
3782         uint32_t        fw_state;
3783         uint32_t        cur_state;
3784         uint32_t        cur_abs_reg_val;
3785         uint32_t        prev_abs_reg_val;
3786         uint32_t        status;
3787 
3788         cur_abs_reg_val =
3789             instance->func_ptr->read_fw_status_reg(instance);
3790         fw_state =
3791             cur_abs_reg_val & MFI_STATE_MASK;
3792         con_log(CL_ANN1, (CE_CONT,
3793             "mfi_state_transition_to_ready:FW state = 0x%x", fw_state));
3794 
3795         while (fw_state != MFI_STATE_READY) {
3796                 con_log(CL_ANN, (CE_CONT,
3797                     "mfi_state_transition_to_ready:FW state%x", fw_state));
3798 
3799                 switch (fw_state) {
3800                 case MFI_STATE_FAULT:
3801                         con_log(CL_ANN, (CE_NOTE,
3802                             "mr_sas: FW in FAULT state!!"));
3803 
3804                         return (ENODEV);
3805                 case MFI_STATE_WAIT_HANDSHAKE:
3806                         /* set the CLR bit in IMR0 */
3807                         con_log(CL_ANN1, (CE_NOTE,
3808                             "mr_sas: FW waiting for HANDSHAKE"));
3809                         /*
3810                          * PCI_Hot Plug: MFI F/W requires
3811                          * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3812                          * to be set
3813                          */
3814                         /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
3815                         if (!instance->tbolt && !instance->skinny) {
3816                                 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
3817                                     MFI_INIT_HOTPLUG, instance);
3818                         } else {
3819                                 WR_RESERVED0_REGISTER(MFI_INIT_CLEAR_HANDSHAKE |
3820                                     MFI_INIT_HOTPLUG, instance);
3821                         }
3822                         max_wait        = (instance->tbolt == 1) ? 180 : 2;
3823                         cur_state       = MFI_STATE_WAIT_HANDSHAKE;
3824                         break;
3825                 case MFI_STATE_BOOT_MESSAGE_PENDING:
3826                         /* set the CLR bit in IMR0 */
3827                         con_log(CL_ANN1, (CE_NOTE,
3828                             "mr_sas: FW state boot message pending"));
3829                         /*
3830                          * PCI_Hot Plug: MFI F/W requires
3831                          * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3832                          * to be set
3833                          */
3834                         if (!instance->tbolt && !instance->skinny) {
3835                                 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
3836                         } else {
3837                                 WR_RESERVED0_REGISTER(MFI_INIT_HOTPLUG,
3838                                     instance);
3839                         }
3840                         max_wait        = (instance->tbolt == 1) ? 180 : 10;
3841                         cur_state       = MFI_STATE_BOOT_MESSAGE_PENDING;
3842                         break;
3843                 case MFI_STATE_OPERATIONAL:
3844                         /* bring it to READY state; assuming max wait 2 secs */
3845                         instance->func_ptr->disable_intr(instance);
3846                         con_log(CL_ANN1, (CE_NOTE,
3847                             "mr_sas: FW in OPERATIONAL state"));
3848                         /*
3849                          * PCI_Hot Plug: MFI F/W requires
3850                          * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
3851                          * to be set
3852                          */
3853                         /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
3854                         if (!instance->tbolt && !instance->skinny) {
3855                                 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
3856                         } else {
3857                                 WR_RESERVED0_REGISTER(MFI_RESET_FLAGS,
3858                                     instance);
3859 
3860                                 for (i = 0; i < (10 * 1000); i++) {
3861                                         status =
3862                                             RD_RESERVED0_REGISTER(instance);
3863                                         if (status & 1) {
3864                                                 delay(1 *
3865                                                     drv_usectohz(MILLISEC));
3866                                         } else {
3867                                                 break;
3868                                         }
3869                                 }
3870 
3871                         }
3872                         max_wait        = (instance->tbolt == 1) ? 180 : 10;
3873                         cur_state       = MFI_STATE_OPERATIONAL;
3874                         break;
3875                 case MFI_STATE_UNDEFINED:
3876                         /* this state should not last for more than 2 seconds */
3877                         con_log(CL_ANN1, (CE_NOTE, "FW state undefined"));
3878 
3879                         max_wait        = (instance->tbolt == 1) ? 180 : 2;
3880                         cur_state       = MFI_STATE_UNDEFINED;
3881                         break;
3882                 case MFI_STATE_BB_INIT:
3883                         max_wait        = (instance->tbolt == 1) ? 180 : 2;
3884                         cur_state       = MFI_STATE_BB_INIT;
3885                         break;
3886                 case MFI_STATE_FW_INIT:
3887                         max_wait        = (instance->tbolt == 1) ? 180 : 2;
3888                         cur_state       = MFI_STATE_FW_INIT;
3889                         break;
3890                 case MFI_STATE_FW_INIT_2:
3891                         max_wait        = 180;
3892                         cur_state       = MFI_STATE_FW_INIT_2;
3893                         break;
3894                 case MFI_STATE_DEVICE_SCAN:
3895                         max_wait        = 180;
3896                         cur_state       = MFI_STATE_DEVICE_SCAN;
3897                         prev_abs_reg_val = cur_abs_reg_val;
3898                         con_log(CL_NONE, (CE_NOTE,
3899                             "Device scan in progress ...\n"));
3900                         break;
3901                 case MFI_STATE_FLUSH_CACHE:
3902                         max_wait        = 180;
3903                         cur_state       = MFI_STATE_FLUSH_CACHE;
3904                         break;
3905                 default:
3906                         con_log(CL_ANN1, (CE_NOTE,
3907                             "mr_sas: Unknown state 0x%x", fw_state));
3908                         return (ENODEV);
3909                 }
3910 
3911                 /* the cur_state should not last for more than max_wait secs */
3912                 for (i = 0; i < (max_wait * MILLISEC); i++) {
3913                         /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
3914                         cur_abs_reg_val =
3915                             instance->func_ptr->read_fw_status_reg(instance);
3916                         fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3917 
3918                         if (fw_state == cur_state) {
3919                                 delay(1 * drv_usectohz(MILLISEC));
3920                         } else {
3921                                 break;
3922                         }
3923                 }
3924                 if (fw_state == MFI_STATE_DEVICE_SCAN) {
3925                         if (prev_abs_reg_val != cur_abs_reg_val) {
3926                                 continue;
3927                         }
3928                 }
3929 
3930                 /* return error if fw_state hasn't changed after max_wait */
3931                 if (fw_state == cur_state) {
3932                         con_log(CL_ANN1, (CE_WARN,
3933                             "FW state hasn't changed in %d secs", max_wait));
3934                         return (ENODEV);
3935                 }
3936         };
3937 
3938         /* This may also need to apply to Skinny, but for now, don't worry. */
3939         if (!instance->tbolt && !instance->skinny) {
3940                 fw_ctrl = RD_IB_DOORBELL(instance);
3941                 con_log(CL_ANN1, (CE_CONT,
3942                     "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
3943 
3944                 /*
3945                  * Write 0xF to the doorbell register to do the following.
3946                  * - Abort all outstanding commands (bit 0).
3947                  * - Transition from OPERATIONAL to READY state (bit 1).
3948                  * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
3949                  * - Set to release FW to continue running (i.e. BIOS handshake
3950                  *   (bit 3).
3951                  */
3952                 WR_IB_DOORBELL(0xF, instance);
3953         }
3954 
3955         if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
3956                 return (EIO);
3957         }
3958 
3959         return (DDI_SUCCESS);
3960 }
3961 
3962 /*
3963  * get_seq_num
3964  */
3965 static int
3966 get_seq_num(struct mrsas_instance *instance,
3967     struct mrsas_evt_log_info *eli)
3968 {
3969         int     ret = DDI_SUCCESS;
3970 
3971         dma_obj_t                       dcmd_dma_obj;
3972         struct mrsas_cmd                *cmd;
3973         struct mrsas_dcmd_frame         *dcmd;
3974         struct mrsas_evt_log_info *eli_tmp;
3975         if (instance->tbolt) {
3976                 cmd = get_raid_msg_mfi_pkt(instance);
3977         } else {
3978                 cmd = mrsas_get_mfi_pkt(instance);
3979         }
3980 
3981         if (!cmd) {
3982                 dev_err(instance->dip, CE_WARN, "failed to get a cmd");
3983                 DTRACE_PROBE2(seq_num_mfi_err, uint16_t,
3984                     instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
3985                 return (ENOMEM);
3986         }
3987 
3988         /* Clear the frame buffer and assign back the context id */
3989         (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3990         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3991             cmd->index);
3992 
3993         dcmd = &cmd->frame->dcmd;
3994 
3995         /* allocate the data transfer buffer */
3996         dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info);
3997         dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3998         dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3999         dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
4000         dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
4001         dcmd_dma_obj.dma_attr.dma_attr_align = 1;
4002 
4003         if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
4004             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
4005                 dev_err(instance->dip, CE_WARN,
4006                     "get_seq_num: could not allocate data transfer buffer.");
4007                 return (DDI_FAILURE);
4008         }
4009 
4010         (void) memset(dcmd_dma_obj.buffer, 0,
4011             sizeof (struct mrsas_evt_log_info));
4012 
4013         (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4014 
4015         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4016         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
4017         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
4018         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
4019             MFI_FRAME_DIR_READ);
4020         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4021         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
4022             sizeof (struct mrsas_evt_log_info));
4023         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4024             MR_DCMD_CTRL_EVENT_GET_INFO);
4025         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
4026             sizeof (struct mrsas_evt_log_info));
4027         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
4028             dcmd_dma_obj.dma_cookie[0].dmac_address);
4029 
4030         cmd->sync_cmd = MRSAS_TRUE;
4031         cmd->frame_count = 1;
4032 
4033         if (instance->tbolt) {
4034                 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4035         }
4036 
4037         if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
4038                 dev_err(instance->dip, CE_WARN, "get_seq_num: "
4039                     "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO");
4040                 ret = DDI_FAILURE;
4041         } else {
4042                 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer;
4043                 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle,
4044                     &eli_tmp->newest_seq_num);
4045                 ret = DDI_SUCCESS;
4046         }
4047 
4048         if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
4049                 ret = DDI_FAILURE;
4050 
4051         if (instance->tbolt) {
4052                 return_raid_msg_mfi_pkt(instance, cmd);
4053         } else {
4054                 mrsas_return_mfi_pkt(instance, cmd);
4055         }
4056 
4057         return (ret);
4058 }
4059 
4060 /*
4061  * start_mfi_aen
4062  */
4063 static int
4064 start_mfi_aen(struct mrsas_instance *instance)
4065 {
4066         int     ret = 0;
4067 
4068         struct mrsas_evt_log_info       eli;
4069         union mrsas_evt_class_locale    class_locale;
4070 
4071         /* get the latest sequence number from FW */
4072         (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info));
4073 
4074         if (get_seq_num(instance, &eli)) {
4075                 dev_err(instance->dip, CE_WARN,
4076                     "start_mfi_aen: failed to get seq num");
4077                 return (-1);
4078         }
4079 
4080         /* register AEN with FW for latest sequence number plus 1 */
4081         class_locale.members.reserved   = 0;
4082         class_locale.members.locale     = LE_16(MR_EVT_LOCALE_ALL);
4083         class_locale.members.class      = MR_EVT_CLASS_INFO;
4084         class_locale.word       = LE_32(class_locale.word);
4085         ret = register_mfi_aen(instance, eli.newest_seq_num + 1,
4086             class_locale.word);
4087 
4088         if (ret) {
4089                 dev_err(instance->dip, CE_WARN,
4090                     "start_mfi_aen: aen registration failed");
4091                 return (-1);
4092         }
4093 
4094 
4095         return (ret);
4096 }
4097 
4098 /*
4099  * flush_cache
4100  */
4101 static void
4102 flush_cache(struct mrsas_instance *instance)
4103 {
4104         struct mrsas_cmd                *cmd = NULL;
4105         struct mrsas_dcmd_frame         *dcmd;
4106         if (instance->tbolt) {
4107                 cmd = get_raid_msg_mfi_pkt(instance);
4108         } else {
4109                 cmd = mrsas_get_mfi_pkt(instance);
4110         }
4111 
4112         if (!cmd) {
4113                 con_log(CL_ANN1, (CE_WARN,
4114                     "flush_cache():Failed to get a cmd for flush_cache"));
4115                 DTRACE_PROBE2(flush_cache_err, uint16_t,
4116                     instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
4117                 return;
4118         }
4119 
4120         /* Clear the frame buffer and assign back the context id */
4121         (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4122         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
4123             cmd->index);
4124 
4125         dcmd = &cmd->frame->dcmd;
4126 
4127         (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4128 
4129         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4130         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
4131         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 0);
4132         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
4133             MFI_FRAME_DIR_NONE);
4134         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4135         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0);
4136         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4137             MR_DCMD_CTRL_CACHE_FLUSH);
4138         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0],
4139             MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE);
4140 
4141         cmd->frame_count = 1;
4142 
4143         if (instance->tbolt) {
4144                 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4145         }
4146 
4147         if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
4148                 con_log(CL_ANN1, (CE_WARN,
4149             "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH"));
4150         }
4151         con_log(CL_ANN1, (CE_CONT, "flush_cache done"));
4152         if (instance->tbolt) {
4153                 return_raid_msg_mfi_pkt(instance, cmd);
4154         } else {
4155                 mrsas_return_mfi_pkt(instance, cmd);
4156         }
4157 
4158 }
4159 
4160 /*
4161  * service_mfi_aen-     Completes an AEN command
4162  * @instance:                   Adapter soft state
4163  * @cmd:                        Command to be completed
4164  *
4165  */
4166 void
4167 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
4168 {
4169         uint32_t        seq_num;
4170         struct mrsas_evt_detail *evt_detail =
4171             (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
4172         int             rval = 0;
4173         int             tgt = 0;
4174         uint8_t         dtype;
4175         mrsas_pd_address_t      *pd_addr;
4176         ddi_acc_handle_t                acc_handle;
4177 
4178         con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
4179 
4180         acc_handle = cmd->frame_dma_obj.acc_handle;
4181         cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status);
4182         if (cmd->cmd_status == ENODATA) {
4183                 cmd->cmd_status = 0;
4184         }
4185 
4186         /*
4187          * log the MFI AEN event to the sysevent queue so that
4188          * application will get noticed
4189          */
4190         if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS",
4191             NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) {
4192                 int     instance_no = ddi_get_instance(instance->dip);
4193                 con_log(CL_ANN, (CE_WARN,
4194                     "mr_sas%d: Failed to log AEN event", instance_no));
4195         }
4196         /*
4197          * Check for any ld devices that has changed state. i.e. online
4198          * or offline.
4199          */
4200         con_log(CL_ANN1, (CE_CONT,
4201             "AEN: code = %x class = %x locale = %x args = %x",
4202             ddi_get32(acc_handle, &evt_detail->code),
4203             evt_detail->cl.members.class,
4204             ddi_get16(acc_handle, &evt_detail->cl.members.locale),
4205             ddi_get8(acc_handle, &evt_detail->arg_type)));
4206 
4207         switch (ddi_get32(acc_handle, &evt_detail->code)) {
4208         case MR_EVT_CFG_CLEARED: {
4209                 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
4210                         if (instance->mr_ld_list[tgt].dip != NULL) {
4211                                 mutex_enter(&instance->config_dev_mtx);
4212                                 instance->mr_ld_list[tgt].flag =
4213                                     (uint8_t)~MRDRV_TGT_VALID;
4214                                 mutex_exit(&instance->config_dev_mtx);
4215                                 rval = mrsas_service_evt(instance, tgt, 0,
4216                                     MRSAS_EVT_UNCONFIG_TGT, NULL);
4217                                 con_log(CL_ANN1, (CE_WARN,
4218                                     "mr_sas: CFG CLEARED AEN rval = %d "
4219                                     "tgt id = %d", rval, tgt));
4220                         }
4221                 }
4222                 break;
4223         }
4224 
4225         case MR_EVT_LD_DELETED: {
4226                 tgt = ddi_get16(acc_handle, &evt_detail->args.ld.target_id);
4227                 mutex_enter(&instance->config_dev_mtx);
4228                 instance->mr_ld_list[tgt].flag = (uint8_t)~MRDRV_TGT_VALID;
4229                 mutex_exit(&instance->config_dev_mtx);
4230                 rval = mrsas_service_evt(instance,
4231                     ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4232                     MRSAS_EVT_UNCONFIG_TGT, NULL);
4233                 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d "
4234                     "tgt id = %d index = %d", rval,
4235                     ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4236                     ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4237                 break;
4238         } /* End of MR_EVT_LD_DELETED */
4239 
4240         case MR_EVT_LD_CREATED: {
4241                 rval = mrsas_service_evt(instance,
4242                     ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4243                     MRSAS_EVT_CONFIG_TGT, NULL);
4244                 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d "
4245                     "tgt id = %d index = %d", rval,
4246                     ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4247                     ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4248                 break;
4249         } /* End of MR_EVT_LD_CREATED */
4250 
4251         case MR_EVT_PD_REMOVED_EXT: {
4252                 if (instance->tbolt || instance->skinny) {
4253                         pd_addr = &evt_detail->args.pd_addr;
4254                         dtype = pd_addr->scsi_dev_type;
4255                         con_log(CL_DLEVEL1, (CE_NOTE,
4256                             " MR_EVT_PD_REMOVED_EXT: dtype = %x,"
4257                             " arg_type = %d ", dtype, evt_detail->arg_type));
4258                         tgt = ddi_get16(acc_handle,
4259                             &evt_detail->args.pd.device_id);
4260                         mutex_enter(&instance->config_dev_mtx);
4261                         instance->mr_tbolt_pd_list[tgt].flag =
4262                             (uint8_t)~MRDRV_TGT_VALID;
4263                         mutex_exit(&instance->config_dev_mtx);
4264                         rval = mrsas_service_evt(instance, ddi_get16(
4265                             acc_handle, &evt_detail->args.pd.device_id),
4266                             1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4267                         con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4268                             "rval = %d tgt id = %d ", rval,
4269                             ddi_get16(acc_handle,
4270                             &evt_detail->args.pd.device_id)));
4271                 }
4272                 break;
4273         } /* End of MR_EVT_PD_REMOVED_EXT */
4274 
4275         case MR_EVT_PD_INSERTED_EXT: {
4276                 if (instance->tbolt || instance->skinny) {
4277                         rval = mrsas_service_evt(instance,
4278                             ddi_get16(acc_handle,
4279                             &evt_detail->args.pd.device_id),
4280                             1, MRSAS_EVT_CONFIG_TGT, NULL);
4281                         con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_INSERTEDi_EXT:"
4282                             "rval = %d tgt id = %d ", rval,
4283                             ddi_get16(acc_handle,
4284                             &evt_detail->args.pd.device_id)));
4285                 }
4286                 break;
4287         } /* End of MR_EVT_PD_INSERTED_EXT */
4288 
4289         case MR_EVT_PD_STATE_CHANGE: {
4290                 if (instance->tbolt || instance->skinny) {
4291                         tgt = ddi_get16(acc_handle,
4292                             &evt_detail->args.pd.device_id);
4293                         if ((evt_detail->args.pd_state.prevState ==
4294                             PD_SYSTEM) &&
4295                             (evt_detail->args.pd_state.newState != PD_SYSTEM)) {
4296                                 mutex_enter(&instance->config_dev_mtx);
4297                                 instance->mr_tbolt_pd_list[tgt].flag =
4298                                     (uint8_t)~MRDRV_TGT_VALID;
4299                                 mutex_exit(&instance->config_dev_mtx);
4300                                 rval = mrsas_service_evt(instance,
4301                                     ddi_get16(acc_handle,
4302                                     &evt_detail->args.pd.device_id),
4303                                     1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4304                                 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4305                                     "rval = %d tgt id = %d ", rval,
4306                                     ddi_get16(acc_handle,
4307                                     &evt_detail->args.pd.device_id)));
4308                                 break;
4309                         }
4310                         if ((evt_detail->args.pd_state.prevState
4311                             == UNCONFIGURED_GOOD) &&
4312                             (evt_detail->args.pd_state.newState == PD_SYSTEM)) {
4313                                 rval = mrsas_service_evt(instance,
4314                                     ddi_get16(acc_handle,
4315                                     &evt_detail->args.pd.device_id),
4316                                     1, MRSAS_EVT_CONFIG_TGT, NULL);
4317                                 con_log(CL_ANN1, (CE_WARN,
4318                                     "mr_sas: PD_INSERTED: rval = %d "
4319                                     " tgt id = %d ", rval,
4320                                     ddi_get16(acc_handle,
4321                                     &evt_detail->args.pd.device_id)));
4322                                 break;
4323                         }
4324                 }
4325                 break;
4326         }
4327 
4328         } /* End of Main Switch */
4329 
4330         /* get copy of seq_num and class/locale for re-registration */
4331         seq_num = ddi_get32(acc_handle, &evt_detail->seq_num);
4332         seq_num++;
4333         (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
4334             sizeof (struct mrsas_evt_detail));
4335 
4336         ddi_put8(acc_handle, &cmd->frame->dcmd.cmd_status, 0x0);
4337         ddi_put32(acc_handle, &cmd->frame->dcmd.mbox.w[0], seq_num);
4338 
4339         instance->aen_seq_num = seq_num;
4340 
4341         cmd->frame_count = 1;
4342 
4343         cmd->retry_count_for_ocr = 0;
4344         cmd->drv_pkt_time = 0;
4345 
4346         /* Issue the aen registration frame */
4347         instance->func_ptr->issue_cmd(cmd, instance);
4348 }
4349 
4350 /*
4351  * complete_cmd_in_sync_mode -  Completes an internal command
4352  * @instance:                   Adapter soft state
4353  * @cmd:                        Command to be completed
4354  *
4355  * The issue_cmd_in_sync_mode() function waits for a command to complete
4356  * after it issues a command. This function wakes up that waiting routine by
4357  * calling wake_up() on the wait queue.
4358  */
4359 static void
4360 complete_cmd_in_sync_mode(struct mrsas_instance *instance,
4361     struct mrsas_cmd *cmd)
4362 {
4363         cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
4364             &cmd->frame->io.cmd_status);
4365 
4366         cmd->sync_cmd = MRSAS_FALSE;
4367 
4368         con_log(CL_ANN1, (CE_NOTE, "complete_cmd_in_sync_mode called %p \n",
4369             (void *)cmd));
4370 
4371         mutex_enter(&instance->int_cmd_mtx);
4372         if (cmd->cmd_status == ENODATA) {
4373                 cmd->cmd_status = 0;
4374         }
4375         cv_broadcast(&instance->int_cmd_cv);
4376         mutex_exit(&instance->int_cmd_mtx);
4377 
4378 }
4379 
4380 /*
4381  * Call this function inside mrsas_softintr.
4382  * mrsas_initiate_ocr_if_fw_is_faulty  - Initiates OCR if FW status is faulty
4383  * @instance:                   Adapter soft state
4384  */
4385 
4386 static uint32_t
4387 mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *instance)
4388 {
4389         uint32_t        cur_abs_reg_val;
4390         uint32_t        fw_state;
4391 
4392         cur_abs_reg_val =  instance->func_ptr->read_fw_status_reg(instance);
4393         fw_state = cur_abs_reg_val & MFI_STATE_MASK;
4394         if (fw_state == MFI_STATE_FAULT) {
4395                 if (instance->disable_online_ctrl_reset == 1) {
4396                         dev_err(instance->dip, CE_WARN,
4397                             "mrsas_initiate_ocr_if_fw_is_faulty: "
4398                             "FW in Fault state, detected in ISR: "
4399                             "FW doesn't support ocr ");
4400 
4401                         return (ADAPTER_RESET_NOT_REQUIRED);
4402                 } else {
4403                         con_log(CL_ANN, (CE_NOTE,
4404                             "mrsas_initiate_ocr_if_fw_is_faulty: FW in Fault "
4405                             "state, detected in ISR: FW supports ocr "));
4406 
4407                         return (ADAPTER_RESET_REQUIRED);
4408                 }
4409         }
4410 
4411         return (ADAPTER_RESET_NOT_REQUIRED);
4412 }
4413 
4414 /*
4415  * mrsas_softintr - The Software ISR
4416  * @param arg   : HBA soft state
4417  *
4418  * called from high-level interrupt if hi-level interrupt are not there,
4419  * otherwise triggered as a soft interrupt
4420  */
4421 static uint_t
4422 mrsas_softintr(struct mrsas_instance *instance)
4423 {
4424         struct scsi_pkt         *pkt;
4425         struct scsa_cmd         *acmd;
4426         struct mrsas_cmd        *cmd;
4427         struct mlist_head       *pos, *next;
4428         mlist_t                 process_list;
4429         struct mrsas_header     *hdr;
4430         struct scsi_arq_status  *arqstat;
4431 
4432         con_log(CL_ANN1, (CE_NOTE, "mrsas_softintr() called."));
4433 
4434         ASSERT(instance);
4435 
4436         mutex_enter(&instance->completed_pool_mtx);
4437 
4438         if (mlist_empty(&instance->completed_pool_list)) {
4439                 mutex_exit(&instance->completed_pool_mtx);
4440                 return (DDI_INTR_CLAIMED);
4441         }
4442 
4443         instance->softint_running = 1;
4444 
4445         INIT_LIST_HEAD(&process_list);
4446         mlist_splice(&instance->completed_pool_list, &process_list);
4447         INIT_LIST_HEAD(&instance->completed_pool_list);
4448 
4449         mutex_exit(&instance->completed_pool_mtx);
4450 
4451         /* perform all callbacks first, before releasing the SCBs */
4452         mlist_for_each_safe(pos, next, &process_list) {
4453                 cmd = mlist_entry(pos, struct mrsas_cmd, list);
4454 
4455                 /* syncronize the Cmd frame for the controller */
4456                 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle,
4457                     0, 0, DDI_DMA_SYNC_FORCPU);
4458 
4459                 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
4460                     DDI_SUCCESS) {
4461                         mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
4462                         ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4463                         con_log(CL_ANN1, (CE_WARN,
4464                             "mrsas_softintr: "
4465                             "FMA check reports DMA handle failure"));
4466                         return (DDI_INTR_CLAIMED);
4467                 }
4468 
4469                 hdr = &cmd->frame->hdr;
4470 
4471                 /* remove the internal command from the process list */
4472                 mlist_del_init(&cmd->list);
4473 
4474                 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
4475                 case MFI_CMD_OP_PD_SCSI:
4476                 case MFI_CMD_OP_LD_SCSI:
4477                 case MFI_CMD_OP_LD_READ:
4478                 case MFI_CMD_OP_LD_WRITE:
4479                         /*
4480                          * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
4481                          * could have been issued either through an
4482                          * IO path or an IOCTL path. If it was via IOCTL,
4483                          * we will send it to internal completion.
4484                          */
4485                         if (cmd->sync_cmd == MRSAS_TRUE) {
4486                                 complete_cmd_in_sync_mode(instance, cmd);
4487                                 break;
4488                         }
4489 
4490                         /* regular commands */
4491                         acmd =  cmd->cmd;
4492                         pkt =   CMD2PKT(acmd);
4493 
4494                         if (acmd->cmd_flags & CFLAG_DMAVALID) {
4495                                 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
4496                                         (void) ddi_dma_sync(acmd->cmd_dmahandle,
4497                                             acmd->cmd_dma_offset,
4498                                             acmd->cmd_dma_len,
4499                                             DDI_DMA_SYNC_FORCPU);
4500                                 }
4501                         }
4502 
4503                         pkt->pkt_reason              = CMD_CMPLT;
4504                         pkt->pkt_statistics  = 0;
4505                         pkt->pkt_state = STATE_GOT_BUS
4506                             | STATE_GOT_TARGET | STATE_SENT_CMD
4507                             | STATE_XFERRED_DATA | STATE_GOT_STATUS;
4508 
4509                         con_log(CL_ANN, (CE_CONT,
4510                             "CDB[0] = %x completed for %s: size %lx context %x",
4511                             pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
4512                             acmd->cmd_dmacount, hdr->context));
4513                         DTRACE_PROBE3(softintr_cdb, uint8_t, pkt->pkt_cdbp[0],
4514                             uint_t, acmd->cmd_cdblen, ulong_t,
4515                             acmd->cmd_dmacount);
4516 
4517                         if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
4518                                 struct scsi_inquiry     *inq;
4519 
4520                                 if (acmd->cmd_dmacount != 0) {
4521                                         bp_mapin(acmd->cmd_buf);
4522                                         inq = (struct scsi_inquiry *)
4523                                             acmd->cmd_buf->b_un.b_addr;
4524 
4525                                         if (hdr->cmd_status == MFI_STAT_OK) {
4526                                                 display_scsi_inquiry(
4527                                                     (caddr_t)inq);
4528                                         }
4529                                 }
4530                         }
4531 
4532                         DTRACE_PROBE2(softintr_done, uint8_t, hdr->cmd,
4533                             uint8_t, hdr->cmd_status);
4534 
4535                         switch (hdr->cmd_status) {
4536                         case MFI_STAT_OK:
4537                                 pkt->pkt_scbp[0] = STATUS_GOOD;
4538                                 break;
4539                         case MFI_STAT_LD_CC_IN_PROGRESS:
4540                         case MFI_STAT_LD_RECON_IN_PROGRESS:
4541                                 pkt->pkt_scbp[0] = STATUS_GOOD;
4542                                 break;
4543                         case MFI_STAT_LD_INIT_IN_PROGRESS:
4544                                 con_log(CL_ANN,
4545                                     (CE_WARN, "Initialization in Progress"));
4546                                 pkt->pkt_reason      = CMD_TRAN_ERR;
4547 
4548                                 break;
4549                         case MFI_STAT_SCSI_DONE_WITH_ERROR:
4550                                 con_log(CL_ANN, (CE_CONT, "scsi_done error"));
4551 
4552                                 pkt->pkt_reason      = CMD_CMPLT;
4553                                 ((struct scsi_status *)
4554                                     pkt->pkt_scbp)->sts_chk = 1;
4555 
4556                                 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
4557                                         con_log(CL_ANN,
4558                                             (CE_WARN, "TEST_UNIT_READY fail"));
4559                                 } else {
4560                                         pkt->pkt_state |= STATE_ARQ_DONE;
4561                                         arqstat = (void *)(pkt->pkt_scbp);
4562                                         arqstat->sts_rqpkt_reason = CMD_CMPLT;
4563                                         arqstat->sts_rqpkt_resid = 0;
4564                                         arqstat->sts_rqpkt_state |=
4565                                             STATE_GOT_BUS | STATE_GOT_TARGET
4566                                             | STATE_SENT_CMD
4567                                             | STATE_XFERRED_DATA;
4568                                         *(uint8_t *)&arqstat->sts_rqpkt_status =
4569                                             STATUS_GOOD;
4570                                         ddi_rep_get8(
4571                                             cmd->frame_dma_obj.acc_handle,
4572                                             (uint8_t *)
4573                                             &(arqstat->sts_sensedata),
4574                                             cmd->sense,
4575                                             sizeof (struct scsi_extended_sense),
4576                                             DDI_DEV_AUTOINCR);
4577                                 }
4578                                 break;
4579                         case MFI_STAT_LD_OFFLINE:
4580                         case MFI_STAT_DEVICE_NOT_FOUND:
4581                                 con_log(CL_ANN, (CE_CONT,
4582                                 "mrsas_softintr:device not found error"));
4583                                 pkt->pkt_reason      = CMD_DEV_GONE;
4584                                 pkt->pkt_statistics  = STAT_DISCON;
4585                                 break;
4586                         case MFI_STAT_LD_LBA_OUT_OF_RANGE:
4587                                 pkt->pkt_state |= STATE_ARQ_DONE;
4588                                 pkt->pkt_reason      = CMD_CMPLT;
4589                                 ((struct scsi_status *)
4590                                     pkt->pkt_scbp)->sts_chk = 1;
4591 
4592                                 arqstat = (void *)(pkt->pkt_scbp);
4593                                 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4594                                 arqstat->sts_rqpkt_resid = 0;
4595                                 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
4596                                     | STATE_GOT_TARGET | STATE_SENT_CMD
4597                                     | STATE_XFERRED_DATA;
4598                                 *(uint8_t *)&arqstat->sts_rqpkt_status =
4599                                     STATUS_GOOD;
4600 
4601                                 arqstat->sts_sensedata.es_valid = 1;
4602                                 arqstat->sts_sensedata.es_key =
4603                                     KEY_ILLEGAL_REQUEST;
4604                                 arqstat->sts_sensedata.es_class =
4605                                     CLASS_EXTENDED_SENSE;
4606 
4607                                 /*
4608                                  * LOGICAL BLOCK ADDRESS OUT OF RANGE:
4609                                  * ASC: 0x21h; ASCQ: 0x00h;
4610                                  */
4611                                 arqstat->sts_sensedata.es_add_code = 0x21;
4612                                 arqstat->sts_sensedata.es_qual_code = 0x00;
4613 
4614                                 break;
4615 
4616                         default:
4617                                 con_log(CL_ANN, (CE_CONT, "Unknown status!"));
4618                                 pkt->pkt_reason      = CMD_TRAN_ERR;
4619 
4620                                 break;
4621                         }
4622 
4623                         atomic_add_16(&instance->fw_outstanding, (-1));
4624 
4625                         (void) mrsas_common_check(instance, cmd);
4626 
4627                         if (acmd->cmd_dmahandle) {
4628                                 if (mrsas_check_dma_handle(
4629                                     acmd->cmd_dmahandle) != DDI_SUCCESS) {
4630                                         ddi_fm_service_impact(instance->dip,
4631                                             DDI_SERVICE_UNAFFECTED);
4632                                         pkt->pkt_reason = CMD_TRAN_ERR;
4633                                         pkt->pkt_statistics = 0;
4634                                 }
4635                         }
4636 
4637                         mrsas_return_mfi_pkt(instance, cmd);
4638 
4639                         /* Call the callback routine */
4640                         if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4641                             pkt->pkt_comp) {
4642                                 (*pkt->pkt_comp)(pkt);
4643                         }
4644 
4645                         break;
4646 
4647                 case MFI_CMD_OP_SMP:
4648                 case MFI_CMD_OP_STP:
4649                         complete_cmd_in_sync_mode(instance, cmd);
4650                         break;
4651 
4652                 case MFI_CMD_OP_DCMD:
4653                         /* see if got an event notification */
4654                         if (ddi_get32(cmd->frame_dma_obj.acc_handle,
4655                             &cmd->frame->dcmd.opcode) ==
4656                             MR_DCMD_CTRL_EVENT_WAIT) {
4657                                 if ((instance->aen_cmd == cmd) &&
4658                                     (instance->aen_cmd->abort_aen)) {
4659                                         con_log(CL_ANN, (CE_WARN,
4660                                             "mrsas_softintr: "
4661                                             "aborted_aen returned"));
4662                                 } else {
4663                                         atomic_add_16(&instance->fw_outstanding,
4664                                             (-1));
4665                                         service_mfi_aen(instance, cmd);
4666                                 }
4667                         } else {
4668                                 complete_cmd_in_sync_mode(instance, cmd);
4669                         }
4670 
4671                         break;
4672 
4673                 case MFI_CMD_OP_ABORT:
4674                         con_log(CL_ANN, (CE_NOTE, "MFI_CMD_OP_ABORT complete"));
4675                         /*
4676                          * MFI_CMD_OP_ABORT successfully completed
4677                          * in the synchronous mode
4678                          */
4679                         complete_cmd_in_sync_mode(instance, cmd);
4680                         break;
4681 
4682                 default:
4683                         mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
4684                         ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4685 
4686                         if (cmd->pkt != NULL) {
4687                                 pkt = cmd->pkt;
4688                                 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4689                                     pkt->pkt_comp) {
4690 
4691                                         con_log(CL_ANN1, (CE_CONT, "posting to "
4692                                             "scsa cmd %p index %x pkt %p"
4693                                             "time %llx, default ", (void *)cmd,
4694                                             cmd->index, (void *)pkt,
4695                                             gethrtime()));
4696 
4697                                         (*pkt->pkt_comp)(pkt);
4698 
4699                                 }
4700                         }
4701                         con_log(CL_ANN, (CE_WARN, "Cmd type unknown !"));
4702                         break;
4703                 }
4704         }
4705 
4706         instance->softint_running = 0;
4707 
4708         return (DDI_INTR_CLAIMED);
4709 }
4710 
4711 /*
4712  * mrsas_alloc_dma_obj
4713  *
4714  * Allocate the memory and other resources for an dma object.
4715  */
4716 int
4717 mrsas_alloc_dma_obj(struct mrsas_instance *instance, dma_obj_t *obj,
4718     uchar_t endian_flags)
4719 {
4720         int     i;
4721         size_t  alen = 0;
4722         uint_t  cookie_cnt;
4723         struct ddi_device_acc_attr tmp_endian_attr;
4724 
4725         tmp_endian_attr = endian_attr;
4726         tmp_endian_attr.devacc_attr_endian_flags = endian_flags;
4727         tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4728 
4729         i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr,
4730             DDI_DMA_SLEEP, NULL, &obj->dma_handle);
4731         if (i != DDI_SUCCESS) {
4732 
4733                 switch (i) {
4734                         case DDI_DMA_BADATTR :
4735                                 con_log(CL_ANN, (CE_WARN,
4736                                 "Failed ddi_dma_alloc_handle- Bad attribute"));
4737                                 break;
4738                         case DDI_DMA_NORESOURCES :
4739                                 con_log(CL_ANN, (CE_WARN,
4740                                 "Failed ddi_dma_alloc_handle- No Resources"));
4741                                 break;
4742                         default :
4743                                 con_log(CL_ANN, (CE_WARN,
4744                                 "Failed ddi_dma_alloc_handle: "
4745                                 "unknown status %d", i));
4746                                 break;
4747                 }
4748 
4749                 return (-1);
4750         }
4751 
4752         if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr,
4753             DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
4754             &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
4755             alen < obj->size) {
4756 
4757                 ddi_dma_free_handle(&obj->dma_handle);
4758 
4759                 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
4760 
4761                 return (-1);
4762         }
4763 
4764         if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
4765             obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
4766             NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
4767 
4768                 ddi_dma_mem_free(&obj->acc_handle);
4769                 ddi_dma_free_handle(&obj->dma_handle);
4770 
4771                 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
4772 
4773                 return (-1);
4774         }
4775 
4776         if (mrsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) {
4777                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4778                 return (-1);
4779         }
4780 
4781         if (mrsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) {
4782                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4783                 return (-1);
4784         }
4785 
4786         return (cookie_cnt);
4787 }
4788 
4789 /*
4790  * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t)
4791  *
4792  * De-allocate the memory and other resources for an dma object, which must
4793  * have been alloated by a previous call to mrsas_alloc_dma_obj()
4794  */
4795 int
4796 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj)
4797 {
4798 
4799         if ((obj.dma_handle == NULL) || (obj.acc_handle == NULL)) {
4800                 return (DDI_SUCCESS);
4801         }
4802 
4803         /*
4804          * NOTE: These check-handle functions fail if *_handle == NULL, but
4805          * this function succeeds because of the previous check.
4806          */
4807         if (mrsas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) {
4808                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4809                 return (DDI_FAILURE);
4810         }
4811 
4812         if (mrsas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) {
4813                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4814                 return (DDI_FAILURE);
4815         }
4816 
4817         (void) ddi_dma_unbind_handle(obj.dma_handle);
4818         ddi_dma_mem_free(&obj.acc_handle);
4819         ddi_dma_free_handle(&obj.dma_handle);
4820         obj.acc_handle = NULL;
4821         return (DDI_SUCCESS);
4822 }
4823 
4824 /*
4825  * mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
4826  * int, int (*)())
4827  *
4828  * Allocate dma resources for a new scsi command
4829  */
4830 int
4831 mrsas_dma_alloc(struct mrsas_instance *instance, struct scsi_pkt *pkt,
4832     struct buf *bp, int flags, int (*callback)())
4833 {
4834         int     dma_flags;
4835         int     (*cb)(caddr_t);
4836         int     i;
4837 
4838         ddi_dma_attr_t  tmp_dma_attr = mrsas_generic_dma_attr;
4839         struct scsa_cmd *acmd = PKT2CMD(pkt);
4840 
4841         acmd->cmd_buf = bp;
4842 
4843         if (bp->b_flags & B_READ) {
4844                 acmd->cmd_flags &= ~CFLAG_DMASEND;
4845                 dma_flags = DDI_DMA_READ;
4846         } else {
4847                 acmd->cmd_flags |= CFLAG_DMASEND;
4848                 dma_flags = DDI_DMA_WRITE;
4849         }
4850 
4851         if (flags & PKT_CONSISTENT) {
4852                 acmd->cmd_flags |= CFLAG_CONSISTENT;
4853                 dma_flags |= DDI_DMA_CONSISTENT;
4854         }
4855 
4856         if (flags & PKT_DMA_PARTIAL) {
4857                 dma_flags |= DDI_DMA_PARTIAL;
4858         }
4859 
4860         dma_flags |= DDI_DMA_REDZONE;
4861 
4862         cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
4863 
4864         tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge;
4865         tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
4866         if (instance->tbolt) {
4867                 /* OCR-RESET FIX */
4868                 tmp_dma_attr.dma_attr_count_max =
4869                     (U64)mrsas_tbolt_max_cap_maxxfer;  /* limit to 256K */
4870                 tmp_dma_attr.dma_attr_maxxfer =
4871                     (U64)mrsas_tbolt_max_cap_maxxfer;  /* limit to 256K */
4872         }
4873 
4874         if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr,
4875             cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) {
4876                 switch (i) {
4877                 case DDI_DMA_BADATTR:
4878                         bioerror(bp, EFAULT);
4879                         return (DDI_FAILURE);
4880 
4881                 case DDI_DMA_NORESOURCES:
4882                         bioerror(bp, 0);
4883                         return (DDI_FAILURE);
4884 
4885                 default:
4886                         con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: "
4887                             "impossible result (0x%x)", i));
4888                         bioerror(bp, EFAULT);
4889                         return (DDI_FAILURE);
4890                 }
4891         }
4892 
4893         i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags,
4894             cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies);
4895 
4896         switch (i) {
4897         case DDI_DMA_PARTIAL_MAP:
4898                 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
4899                         con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
4900                             "DDI_DMA_PARTIAL_MAP impossible"));
4901                         goto no_dma_cookies;
4902                 }
4903 
4904                 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) ==
4905                     DDI_FAILURE) {
4906                         con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed"));
4907                         goto no_dma_cookies;
4908                 }
4909 
4910                 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
4911                     &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
4912                     &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
4913                     DDI_FAILURE) {
4914 
4915                         con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed"));
4916                         goto no_dma_cookies;
4917                 }
4918 
4919                 goto get_dma_cookies;
4920         case DDI_DMA_MAPPED:
4921                 acmd->cmd_nwin = 1;
4922                 acmd->cmd_dma_len = 0;
4923                 acmd->cmd_dma_offset = 0;
4924 
4925 get_dma_cookies:
4926                 i = 0;
4927                 acmd->cmd_dmacount = 0;
4928                 for (;;) {
4929                         acmd->cmd_dmacount +=
4930                             acmd->cmd_dmacookies[i++].dmac_size;
4931 
4932                         if (i == instance->max_num_sge ||
4933                             i == acmd->cmd_ncookies)
4934                                 break;
4935 
4936                         ddi_dma_nextcookie(acmd->cmd_dmahandle,
4937                             &acmd->cmd_dmacookies[i]);
4938                 }
4939 
4940                 acmd->cmd_cookie = i;
4941                 acmd->cmd_cookiecnt = i;
4942 
4943                 acmd->cmd_flags |= CFLAG_DMAVALID;
4944 
4945                 if (bp->b_bcount >= acmd->cmd_dmacount) {
4946                         pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
4947                 } else {
4948                         pkt->pkt_resid = 0;
4949                 }
4950 
4951                 return (DDI_SUCCESS);
4952         case DDI_DMA_NORESOURCES:
4953                 bioerror(bp, 0);
4954                 break;
4955         case DDI_DMA_NOMAPPING:
4956                 bioerror(bp, EFAULT);
4957                 break;
4958         case DDI_DMA_TOOBIG:
4959                 bioerror(bp, EINVAL);
4960                 break;
4961         case DDI_DMA_INUSE:
4962                 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:"
4963                     " DDI_DMA_INUSE impossible"));
4964                 break;
4965         default:
4966                 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
4967                     "impossible result (0x%x)", i));
4968                 break;
4969         }
4970 
4971 no_dma_cookies:
4972         ddi_dma_free_handle(&acmd->cmd_dmahandle);
4973         acmd->cmd_dmahandle = NULL;
4974         acmd->cmd_flags &= ~CFLAG_DMAVALID;
4975         return (DDI_FAILURE);
4976 }
4977 
4978 /*
4979  * mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *)
4980  *
4981  * move dma resources to next dma window
4982  *
4983  */
4984 int
4985 mrsas_dma_move(struct mrsas_instance *instance, struct scsi_pkt *pkt,
4986     struct buf *bp)
4987 {
4988         int     i = 0;
4989 
4990         struct scsa_cmd *acmd = PKT2CMD(pkt);
4991 
4992         /*
4993          * If there are no more cookies remaining in this window,
4994          * must move to the next window first.
4995          */
4996         if (acmd->cmd_cookie == acmd->cmd_ncookies) {
4997                 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) {
4998                         return (DDI_SUCCESS);
4999                 }
5000 
5001                 /* at last window, cannot move */
5002                 if (++acmd->cmd_curwin >= acmd->cmd_nwin) {
5003                         return (DDI_FAILURE);
5004                 }
5005 
5006                 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
5007                     &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
5008                     &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
5009                     DDI_FAILURE) {
5010                         return (DDI_FAILURE);
5011                 }
5012 
5013                 acmd->cmd_cookie = 0;
5014         } else {
5015                 /* still more cookies in this window - get the next one */
5016                 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5017                     &acmd->cmd_dmacookies[0]);
5018         }
5019 
5020         /* get remaining cookies in this window, up to our maximum */
5021         for (;;) {
5022                 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size;
5023                 acmd->cmd_cookie++;
5024 
5025                 if (i == instance->max_num_sge ||
5026                     acmd->cmd_cookie == acmd->cmd_ncookies) {
5027                         break;
5028                 }
5029 
5030                 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5031                     &acmd->cmd_dmacookies[i]);
5032         }
5033 
5034         acmd->cmd_cookiecnt = i;
5035 
5036         if (bp->b_bcount >= acmd->cmd_dmacount) {
5037                 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
5038         } else {
5039                 pkt->pkt_resid = 0;
5040         }
5041 
5042         return (DDI_SUCCESS);
5043 }
5044 
5045 /*
5046  * build_cmd
5047  */
5048 static struct mrsas_cmd *
5049 build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
5050     struct scsi_pkt *pkt, uchar_t *cmd_done)
5051 {
5052         uint16_t        flags = 0;
5053         uint32_t        i;
5054         uint32_t        sge_bytes;
5055         uint32_t        tmp_data_xfer_len;
5056         ddi_acc_handle_t acc_handle;
5057         struct mrsas_cmd                *cmd;
5058         struct mrsas_sge64              *mfi_sgl;
5059         struct mrsas_sge_ieee           *mfi_sgl_ieee;
5060         struct scsa_cmd                 *acmd = PKT2CMD(pkt);
5061         struct mrsas_pthru_frame        *pthru;
5062         struct mrsas_io_frame           *ldio;
5063 
5064         /* find out if this is logical or physical drive command.  */
5065         acmd->islogical = MRDRV_IS_LOGICAL(ap);
5066         acmd->device_id = MAP_DEVICE_ID(instance, ap);
5067         *cmd_done = 0;
5068 
5069         /* get the command packet */
5070         if (!(cmd = mrsas_get_mfi_pkt(instance))) {
5071                 DTRACE_PROBE2(build_cmd_mfi_err, uint16_t,
5072                     instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
5073                 return (NULL);
5074         }
5075 
5076         acc_handle = cmd->frame_dma_obj.acc_handle;
5077 
5078         /* Clear the frame buffer and assign back the context id */
5079         (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
5080         ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index);
5081 
5082         cmd->pkt = pkt;
5083         cmd->cmd = acmd;
5084         DTRACE_PROBE3(build_cmds, uint8_t, pkt->pkt_cdbp[0],
5085             ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len);
5086 
5087         /* lets get the command directions */
5088         if (acmd->cmd_flags & CFLAG_DMASEND) {
5089                 flags = MFI_FRAME_DIR_WRITE;
5090 
5091                 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5092                         (void) ddi_dma_sync(acmd->cmd_dmahandle,
5093                             acmd->cmd_dma_offset, acmd->cmd_dma_len,
5094                             DDI_DMA_SYNC_FORDEV);
5095                 }
5096         } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
5097                 flags = MFI_FRAME_DIR_READ;
5098 
5099                 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5100                         (void) ddi_dma_sync(acmd->cmd_dmahandle,
5101                             acmd->cmd_dma_offset, acmd->cmd_dma_len,
5102                             DDI_DMA_SYNC_FORCPU);
5103                 }
5104         } else {
5105                 flags = MFI_FRAME_DIR_NONE;
5106         }
5107 
5108         if (instance->flag_ieee) {
5109                 flags |= MFI_FRAME_IEEE;
5110         }
5111         flags |= MFI_FRAME_SGL64;
5112 
5113         switch (pkt->pkt_cdbp[0]) {
5114 
5115         /*
5116          * case SCMD_SYNCHRONIZE_CACHE:
5117          *      flush_cache(instance);
5118          *      mrsas_return_mfi_pkt(instance, cmd);
5119          *      *cmd_done = 1;
5120          *
5121          *      return (NULL);
5122          */
5123 
5124         case SCMD_READ:
5125         case SCMD_WRITE:
5126         case SCMD_READ_G1:
5127         case SCMD_WRITE_G1:
5128         case SCMD_READ_G4:
5129         case SCMD_WRITE_G4:
5130         case SCMD_READ_G5:
5131         case SCMD_WRITE_G5:
5132                 if (acmd->islogical) {
5133                         ldio = (struct mrsas_io_frame *)cmd->frame;
5134 
5135                         /*
5136                          * preare the Logical IO frame:
5137                          * 2nd bit is zero for all read cmds
5138                          */
5139                         ddi_put8(acc_handle, &ldio->cmd,
5140                             (pkt->pkt_cdbp[0] & 0x02) ? MFI_CMD_OP_LD_WRITE
5141                             : MFI_CMD_OP_LD_READ);
5142                         ddi_put8(acc_handle, &ldio->cmd_status, 0x0);
5143                         ddi_put8(acc_handle, &ldio->scsi_status, 0x0);
5144                         ddi_put8(acc_handle, &ldio->target_id, acmd->device_id);
5145                         ddi_put16(acc_handle, &ldio->timeout, 0);
5146                         ddi_put8(acc_handle, &ldio->reserved_0, 0);
5147                         ddi_put16(acc_handle, &ldio->pad_0, 0);
5148                         ddi_put16(acc_handle, &ldio->flags, flags);
5149 
5150                         /* Initialize sense Information */
5151                         bzero(cmd->sense, SENSE_LENGTH);
5152                         ddi_put8(acc_handle, &ldio->sense_len, SENSE_LENGTH);
5153                         ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_hi, 0);
5154                         ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_lo,
5155                             cmd->sense_phys_addr);
5156                         ddi_put32(acc_handle, &ldio->start_lba_hi, 0);
5157                         ddi_put8(acc_handle, &ldio->access_byte,
5158                             (acmd->cmd_cdblen != 6) ? pkt->pkt_cdbp[1] : 0);
5159                         ddi_put8(acc_handle, &ldio->sge_count,
5160                             acmd->cmd_cookiecnt);
5161                         if (instance->flag_ieee) {
5162                                 mfi_sgl_ieee =
5163                                     (struct mrsas_sge_ieee *)&ldio->sgl;
5164                         } else {
5165                                 mfi_sgl = (struct mrsas_sge64   *)&ldio->sgl;
5166                         }
5167 
5168                         (void) ddi_get32(acc_handle, &ldio->context);
5169 
5170                         if (acmd->cmd_cdblen == CDB_GROUP0) {
5171                                 /* 6-byte cdb */
5172                                 ddi_put32(acc_handle, &ldio->lba_count, (
5173                                     (uint16_t)(pkt->pkt_cdbp[4])));
5174 
5175                                 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5176                                     ((uint32_t)(pkt->pkt_cdbp[3])) |
5177                                     ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
5178                                     ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
5179                                     << 16)));
5180                         } else if (acmd->cmd_cdblen == CDB_GROUP1) {
5181                                 /* 10-byte cdb */
5182                                 ddi_put32(acc_handle, &ldio->lba_count, (
5183                                     ((uint16_t)(pkt->pkt_cdbp[8])) |
5184                                     ((uint16_t)(pkt->pkt_cdbp[7]) << 8)));
5185 
5186                                 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5187                                     ((uint32_t)(pkt->pkt_cdbp[5])) |
5188                                     ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5189                                     ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5190                                     ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5191                         } else if (acmd->cmd_cdblen == CDB_GROUP5) {
5192                                 /* 12-byte cdb */
5193                                 ddi_put32(acc_handle, &ldio->lba_count, (
5194                                     ((uint32_t)(pkt->pkt_cdbp[9])) |
5195                                     ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5196                                     ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5197                                     ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5198 
5199                                 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5200                                     ((uint32_t)(pkt->pkt_cdbp[5])) |
5201                                     ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5202                                     ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5203                                     ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5204                         } else if (acmd->cmd_cdblen == CDB_GROUP4) {
5205                                 /* 16-byte cdb */
5206                                 ddi_put32(acc_handle, &ldio->lba_count, (
5207                                     ((uint32_t)(pkt->pkt_cdbp[13])) |
5208                                     ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
5209                                     ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
5210                                     ((uint32_t)(pkt->pkt_cdbp[10]) << 24)));
5211 
5212                                 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5213                                     ((uint32_t)(pkt->pkt_cdbp[9])) |
5214                                     ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5215                                     ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5216                                     ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5217 
5218                                 ddi_put32(acc_handle, &ldio->start_lba_hi, (
5219                                     ((uint32_t)(pkt->pkt_cdbp[5])) |
5220                                     ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5221                                     ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5222                                     ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5223                         }
5224 
5225                         break;
5226                 }
5227                 /* fall through For all non-rd/wr and physical disk cmds */
5228         default:
5229 
5230                 switch (pkt->pkt_cdbp[0]) {
5231                 case SCMD_MODE_SENSE:
5232                 case SCMD_MODE_SENSE_G1: {
5233                         union scsi_cdb  *cdbp;
5234                         uint16_t        page_code;
5235 
5236                         cdbp = (void *)pkt->pkt_cdbp;
5237                         page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
5238                         switch (page_code) {
5239                         case 0x3:
5240                         case 0x4:
5241                                 (void) mrsas_mode_sense_build(pkt);
5242                                 mrsas_return_mfi_pkt(instance, cmd);
5243                                 *cmd_done = 1;
5244                                 return (NULL);
5245                         }
5246                         break;
5247                 }
5248                 default:
5249                         break;
5250                 }
5251 
5252                 pthru   = (struct mrsas_pthru_frame *)cmd->frame;
5253 
5254                 /* prepare the DCDB frame */
5255                 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ?
5256                     MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI);
5257                 ddi_put8(acc_handle, &pthru->cmd_status, 0x0);
5258                 ddi_put8(acc_handle, &pthru->scsi_status, 0x0);
5259                 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id);
5260                 ddi_put8(acc_handle, &pthru->lun, 0);
5261                 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen);
5262                 ddi_put16(acc_handle, &pthru->timeout, 0);
5263                 ddi_put16(acc_handle, &pthru->flags, flags);
5264                 tmp_data_xfer_len = 0;
5265                 for (i = 0; i < acmd->cmd_cookiecnt; i++) {
5266                         tmp_data_xfer_len += acmd->cmd_dmacookies[i].dmac_size;
5267                 }
5268                 ddi_put32(acc_handle, &pthru->data_xfer_len,
5269                     tmp_data_xfer_len);
5270                 ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt);
5271                 if (instance->flag_ieee) {
5272                         mfi_sgl_ieee = (struct mrsas_sge_ieee *)&pthru->sgl;
5273                 } else {
5274                         mfi_sgl = (struct mrsas_sge64 *)&pthru->sgl;
5275                 }
5276 
5277                 bzero(cmd->sense, SENSE_LENGTH);
5278                 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5279                 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5280                 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo,
5281                     cmd->sense_phys_addr);
5282 
5283                 (void) ddi_get32(acc_handle, &pthru->context);
5284                 ddi_rep_put8(acc_handle, (uint8_t *)pkt->pkt_cdbp,
5285                     (uint8_t *)pthru->cdb, acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
5286 
5287                 break;
5288         }
5289 
5290         /* prepare the scatter-gather list for the firmware */
5291         if (instance->flag_ieee) {
5292                 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl_ieee++) {
5293                         ddi_put64(acc_handle, &mfi_sgl_ieee->phys_addr,
5294                             acmd->cmd_dmacookies[i].dmac_laddress);
5295                         ddi_put32(acc_handle, &mfi_sgl_ieee->length,
5296                             acmd->cmd_dmacookies[i].dmac_size);
5297                 }
5298                 sge_bytes = sizeof (struct mrsas_sge_ieee)*acmd->cmd_cookiecnt;
5299         } else {
5300                 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) {
5301                         ddi_put64(acc_handle, &mfi_sgl->phys_addr,
5302                             acmd->cmd_dmacookies[i].dmac_laddress);
5303                         ddi_put32(acc_handle, &mfi_sgl->length,
5304                             acmd->cmd_dmacookies[i].dmac_size);
5305                 }
5306                 sge_bytes = sizeof (struct mrsas_sge64)*acmd->cmd_cookiecnt;
5307         }
5308 
5309         cmd->frame_count = (sge_bytes / MRMFI_FRAME_SIZE) +
5310             ((sge_bytes % MRMFI_FRAME_SIZE) ? 1 : 0) + 1;
5311 
5312         if (cmd->frame_count >= 8) {
5313                 cmd->frame_count = 8;
5314         }
5315 
5316         return (cmd);
5317 }
5318 
5319 /*
5320  * wait_for_outstanding -       Wait for all outstanding cmds
5321  * @instance:                           Adapter soft state
5322  *
5323  * This function waits for upto MRDRV_RESET_WAIT_TIME seconds for FW to
5324  * complete all its outstanding commands. Returns error if one or more IOs
5325  * are pending after this time period.
5326  */
5327 static int
5328 wait_for_outstanding(struct mrsas_instance *instance)
5329 {
5330         int             i;
5331         uint32_t        wait_time = 90;
5332 
5333         for (i = 0; i < wait_time; i++) {
5334                 if (!instance->fw_outstanding) {
5335                         break;
5336                 }
5337 
5338                 drv_usecwait(MILLISEC); /* wait for 1000 usecs */;
5339         }
5340 
5341         if (instance->fw_outstanding) {
5342                 return (1);
5343         }
5344 
5345         return (0);
5346 }
5347 
5348 /*
5349  * issue_mfi_pthru
5350  */
5351 static int
5352 issue_mfi_pthru(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5353     struct mrsas_cmd *cmd, int mode)
5354 {
5355         void            *ubuf;
5356         uint32_t        kphys_addr = 0;
5357         uint32_t        xferlen = 0;
5358         uint32_t        new_xfer_length = 0;
5359         uint_t          model;
5360         ddi_acc_handle_t        acc_handle = cmd->frame_dma_obj.acc_handle;
5361         dma_obj_t                       pthru_dma_obj;
5362         struct mrsas_pthru_frame        *kpthru;
5363         struct mrsas_pthru_frame        *pthru;
5364         int i;
5365         pthru = &cmd->frame->pthru;
5366         kpthru = (struct mrsas_pthru_frame *)&ioctl->frame[0];
5367 
5368         if (instance->adapterresetinprogress) {
5369                 con_log(CL_ANN1, (CE_WARN, "issue_mfi_pthru: Reset flag set, "
5370                 "returning mfi_pkt and setting TRAN_BUSY\n"));
5371                 return (DDI_FAILURE);
5372         }
5373         model = ddi_model_convert_from(mode & FMODELS);
5374         if (model == DDI_MODEL_ILP32) {
5375                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5376 
5377                 xferlen = kpthru->sgl.sge32[0].length;
5378 
5379                 ubuf    = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5380         } else {
5381 #ifdef _ILP32
5382                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5383                 xferlen = kpthru->sgl.sge32[0].length;
5384                 ubuf    = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5385 #else
5386                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP64"));
5387                 xferlen = kpthru->sgl.sge64[0].length;
5388                 ubuf    = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr;
5389 #endif
5390         }
5391 
5392         if (xferlen) {
5393                 /* means IOCTL requires DMA */
5394                 /* allocate the data transfer buffer */
5395                 /* pthru_dma_obj.size = xferlen; */
5396                 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length,
5397                     PAGESIZE);
5398                 pthru_dma_obj.size = new_xfer_length;
5399                 pthru_dma_obj.dma_attr = mrsas_generic_dma_attr;
5400                 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5401                 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5402                 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1;
5403                 pthru_dma_obj.dma_attr.dma_attr_align = 1;
5404 
5405                 /* allocate kernel buffer for DMA */
5406                 if (mrsas_alloc_dma_obj(instance, &pthru_dma_obj,
5407                     (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5408                         con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
5409                             "could not allocate data transfer buffer."));
5410                         return (DDI_FAILURE);
5411                 }
5412                 (void) memset(pthru_dma_obj.buffer, 0, xferlen);
5413 
5414                 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5415                 if (kpthru->flags & MFI_FRAME_DIR_WRITE) {
5416                         for (i = 0; i < xferlen; i++) {
5417                                 if (ddi_copyin((uint8_t *)ubuf+i,
5418                                     (uint8_t *)pthru_dma_obj.buffer+i,
5419                                     1, mode)) {
5420                                         con_log(CL_ANN, (CE_WARN,
5421                                             "issue_mfi_pthru : "
5422                                             "copy from user space failed"));
5423                                         return (DDI_FAILURE);
5424                                 }
5425                         }
5426                 }
5427 
5428                 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address;
5429         }
5430 
5431         ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd);
5432         ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5433         ddi_put8(acc_handle, &pthru->cmd_status, 0);
5434         ddi_put8(acc_handle, &pthru->scsi_status, 0);
5435         ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id);
5436         ddi_put8(acc_handle, &pthru->lun, kpthru->lun);
5437         ddi_put8(acc_handle, &pthru->cdb_len, kpthru->cdb_len);
5438         ddi_put8(acc_handle, &pthru->sge_count, kpthru->sge_count);
5439         ddi_put16(acc_handle, &pthru->timeout, kpthru->timeout);
5440         ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len);
5441 
5442         ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5443         pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
5444         /* ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); */
5445 
5446         ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb,
5447             pthru->cdb_len, DDI_DEV_AUTOINCR);
5448 
5449         ddi_put16(acc_handle, &pthru->flags, kpthru->flags & ~MFI_FRAME_SGL64);
5450         ddi_put32(acc_handle, &pthru->sgl.sge32[0].length, xferlen);
5451         ddi_put32(acc_handle, &pthru->sgl.sge32[0].phys_addr, kphys_addr);
5452 
5453         cmd->sync_cmd = MRSAS_TRUE;
5454         cmd->frame_count = 1;
5455 
5456         if (instance->tbolt) {
5457                 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5458         }
5459 
5460         if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5461                 con_log(CL_ANN, (CE_WARN,
5462                     "issue_mfi_pthru: fw_ioctl failed"));
5463         } else {
5464                 if (xferlen && kpthru->flags & MFI_FRAME_DIR_READ) {
5465                         for (i = 0; i < xferlen; i++) {
5466                                 if (ddi_copyout(
5467                                     (uint8_t *)pthru_dma_obj.buffer+i,
5468                                     (uint8_t *)ubuf+i, 1, mode)) {
5469                                         con_log(CL_ANN, (CE_WARN,
5470                                             "issue_mfi_pthru : "
5471                                             "copy to user space failed"));
5472                                         return (DDI_FAILURE);
5473                                 }
5474                         }
5475                 }
5476         }
5477 
5478         kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status);
5479         kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status);
5480 
5481         con_log(CL_ANN, (CE_CONT, "issue_mfi_pthru: cmd_status %x, "
5482             "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status));
5483         DTRACE_PROBE3(issue_pthru, uint8_t, kpthru->cmd, uint8_t,
5484             kpthru->cmd_status, uint8_t, kpthru->scsi_status);
5485 
5486         if (kpthru->sense_len) {
5487                 uint_t sense_len = SENSE_LENGTH;
5488                 void *sense_ubuf =
5489                     (void *)(ulong_t)kpthru->sense_buf_phys_addr_lo;
5490                 if (kpthru->sense_len <= SENSE_LENGTH) {
5491                         sense_len = kpthru->sense_len;
5492                 }
5493 
5494                 for (i = 0; i < sense_len; i++) {
5495                         if (ddi_copyout(
5496                             (uint8_t *)cmd->sense+i,
5497                             (uint8_t *)sense_ubuf+i, 1, mode)) {
5498                                 con_log(CL_ANN, (CE_WARN,
5499                                     "issue_mfi_pthru : "
5500                                     "copy to user space failed"));
5501                         }
5502                         con_log(CL_DLEVEL1, (CE_WARN,
5503                             "Copying Sense info sense_buff[%d] = 0x%X",
5504                             i, *((uint8_t *)cmd->sense + i)));
5505                 }
5506         }
5507         (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
5508             DDI_DMA_SYNC_FORDEV);
5509 
5510         if (xferlen) {
5511                 /* free kernel buffer */
5512                 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
5513                         return (DDI_FAILURE);
5514         }
5515 
5516         return (DDI_SUCCESS);
5517 }
5518 
5519 /*
5520  * issue_mfi_dcmd
5521  */
5522 static int
5523 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5524     struct mrsas_cmd *cmd, int mode)
5525 {
5526         void            *ubuf;
5527         uint32_t        kphys_addr = 0;
5528         uint32_t        xferlen = 0;
5529         uint32_t        new_xfer_length = 0;
5530         uint32_t        model;
5531         dma_obj_t       dcmd_dma_obj;
5532         struct mrsas_dcmd_frame *kdcmd;
5533         struct mrsas_dcmd_frame *dcmd;
5534         ddi_acc_handle_t        acc_handle = cmd->frame_dma_obj.acc_handle;
5535         int i;
5536         dcmd = &cmd->frame->dcmd;
5537         kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
5538 
5539         if (instance->adapterresetinprogress) {
5540                 con_log(CL_ANN1, (CE_NOTE, "Reset flag set, "
5541                 "returning mfi_pkt and setting TRAN_BUSY"));
5542                 return (DDI_FAILURE);
5543         }
5544         model = ddi_model_convert_from(mode & FMODELS);
5545         if (model == DDI_MODEL_ILP32) {
5546                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5547 
5548                 xferlen = kdcmd->sgl.sge32[0].length;
5549 
5550                 ubuf    = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5551         } else {
5552 #ifdef _ILP32
5553                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5554                 xferlen = kdcmd->sgl.sge32[0].length;
5555                 ubuf    = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5556 #else
5557                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_LP64"));
5558                 xferlen = kdcmd->sgl.sge64[0].length;
5559                 ubuf    = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
5560 #endif
5561         }
5562         if (xferlen) {
5563                 /* means IOCTL requires DMA */
5564                 /* allocate the data transfer buffer */
5565                 /* dcmd_dma_obj.size = xferlen; */
5566                 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length,
5567                     PAGESIZE);
5568                 dcmd_dma_obj.size = new_xfer_length;
5569                 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
5570                 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5571                 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5572                 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
5573                 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
5574 
5575                 /* allocate kernel buffer for DMA */
5576                         if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
5577                             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5578                                 con_log(CL_ANN,
5579                                     (CE_WARN, "issue_mfi_dcmd: could not "
5580                                     "allocate data transfer buffer."));
5581                                 return (DDI_FAILURE);
5582                         }
5583                 (void) memset(dcmd_dma_obj.buffer, 0, xferlen);
5584 
5585                 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5586                 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) {
5587                         for (i = 0; i < xferlen; i++) {
5588                                 if (ddi_copyin((uint8_t *)ubuf + i,
5589                                     (uint8_t *)dcmd_dma_obj.buffer + i,
5590                                     1, mode)) {
5591                                         con_log(CL_ANN, (CE_WARN,
5592                                             "issue_mfi_dcmd : "
5593                                             "copy from user space failed"));
5594                                         return (DDI_FAILURE);
5595                                 }
5596                         }
5597                 }
5598 
5599                 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
5600         }
5601 
5602         ddi_put8(acc_handle, &dcmd->cmd, kdcmd->cmd);
5603         ddi_put8(acc_handle, &dcmd->cmd_status, 0);
5604         ddi_put8(acc_handle, &dcmd->sge_count, kdcmd->sge_count);
5605         ddi_put16(acc_handle, &dcmd->timeout, kdcmd->timeout);
5606         ddi_put32(acc_handle, &dcmd->data_xfer_len, kdcmd->data_xfer_len);
5607         ddi_put32(acc_handle, &dcmd->opcode, kdcmd->opcode);
5608 
5609         ddi_rep_put8(acc_handle, (uint8_t *)kdcmd->mbox.b,
5610             (uint8_t *)dcmd->mbox.b, DCMD_MBOX_SZ, DDI_DEV_AUTOINCR);
5611 
5612         ddi_put16(acc_handle, &dcmd->flags, kdcmd->flags & ~MFI_FRAME_SGL64);
5613         ddi_put32(acc_handle, &dcmd->sgl.sge32[0].length, xferlen);
5614         ddi_put32(acc_handle, &dcmd->sgl.sge32[0].phys_addr, kphys_addr);
5615 
5616         cmd->sync_cmd = MRSAS_TRUE;
5617         cmd->frame_count = 1;
5618 
5619         if (instance->tbolt) {
5620                 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5621         }
5622 
5623         if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5624                 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed"));
5625         } else {
5626                 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) {
5627                         for (i = 0; i < xferlen; i++) {
5628                                 if (ddi_copyout(
5629                                     (uint8_t *)dcmd_dma_obj.buffer + i,
5630                                     (uint8_t *)ubuf + i,
5631                                     1, mode)) {
5632                                         con_log(CL_ANN, (CE_WARN,
5633                                             "issue_mfi_dcmd : "
5634                                             "copy to user space failed"));
5635                                         return (DDI_FAILURE);
5636                                 }
5637                         }
5638                 }
5639         }
5640 
5641         kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status);
5642         con_log(CL_ANN,
5643             (CE_CONT, "issue_mfi_dcmd: cmd_status %x", kdcmd->cmd_status));
5644         DTRACE_PROBE3(issue_dcmd, uint32_t, kdcmd->opcode, uint8_t,
5645             kdcmd->cmd, uint8_t, kdcmd->cmd_status);
5646 
5647         if (xferlen) {
5648                 /* free kernel buffer */
5649                 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
5650                         return (DDI_FAILURE);
5651         }
5652 
5653         return (DDI_SUCCESS);
5654 }
5655 
5656 /*
5657  * issue_mfi_smp
5658  */
5659 static int
5660 issue_mfi_smp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5661     struct mrsas_cmd *cmd, int mode)
5662 {
5663         void            *request_ubuf;
5664         void            *response_ubuf;
5665         uint32_t        request_xferlen = 0;
5666         uint32_t        response_xferlen = 0;
5667         uint32_t        new_xfer_length1 = 0;
5668         uint32_t        new_xfer_length2 = 0;
5669         uint_t          model;
5670         dma_obj_t                       request_dma_obj;
5671         dma_obj_t                       response_dma_obj;
5672         ddi_acc_handle_t        acc_handle = cmd->frame_dma_obj.acc_handle;
5673         struct mrsas_smp_frame          *ksmp;
5674         struct mrsas_smp_frame          *smp;
5675         struct mrsas_sge32              *sge32;
5676 #ifndef _ILP32
5677         struct mrsas_sge64              *sge64;
5678 #endif
5679         int i;
5680         uint64_t                        tmp_sas_addr;
5681 
5682         smp = &cmd->frame->smp;
5683         ksmp = (struct mrsas_smp_frame *)&ioctl->frame[0];
5684 
5685         if (instance->adapterresetinprogress) {
5686                 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5687                 "returning mfi_pkt and setting TRAN_BUSY\n"));
5688                 return (DDI_FAILURE);
5689         }
5690         model = ddi_model_convert_from(mode & FMODELS);
5691         if (model == DDI_MODEL_ILP32) {
5692                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5693 
5694                 sge32                   = &ksmp->sgl[0].sge32[0];
5695                 response_xferlen        = sge32[0].length;
5696                 request_xferlen         = sge32[1].length;
5697                 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5698                     "response_xferlen = %x, request_xferlen = %x",
5699                     response_xferlen, request_xferlen));
5700 
5701                 response_ubuf   = (void *)(ulong_t)sge32[0].phys_addr;
5702                 request_ubuf    = (void *)(ulong_t)sge32[1].phys_addr;
5703                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5704                     "response_ubuf = %p, request_ubuf = %p",
5705                     response_ubuf, request_ubuf));
5706         } else {
5707 #ifdef _ILP32
5708                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5709 
5710                 sge32                   = &ksmp->sgl[0].sge32[0];
5711                 response_xferlen        = sge32[0].length;
5712                 request_xferlen         = sge32[1].length;
5713                 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5714                     "response_xferlen = %x, request_xferlen = %x",
5715                     response_xferlen, request_xferlen));
5716 
5717                 response_ubuf   = (void *)(ulong_t)sge32[0].phys_addr;
5718                 request_ubuf    = (void *)(ulong_t)sge32[1].phys_addr;
5719                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5720                     "response_ubuf = %p, request_ubuf = %p",
5721                     response_ubuf, request_ubuf));
5722 #else
5723                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_LP64"));
5724 
5725                 sge64                   = &ksmp->sgl[0].sge64[0];
5726                 response_xferlen        = sge64[0].length;
5727                 request_xferlen         = sge64[1].length;
5728 
5729                 response_ubuf   = (void *)(ulong_t)sge64[0].phys_addr;
5730                 request_ubuf    = (void *)(ulong_t)sge64[1].phys_addr;
5731 #endif
5732         }
5733         if (request_xferlen) {
5734                 /* means IOCTL requires DMA */
5735                 /* allocate the data transfer buffer */
5736                 /* request_dma_obj.size = request_xferlen; */
5737                 MRSAS_GET_BOUNDARY_ALIGNED_LEN(request_xferlen,
5738                     new_xfer_length1, PAGESIZE);
5739                 request_dma_obj.size = new_xfer_length1;
5740                 request_dma_obj.dma_attr = mrsas_generic_dma_attr;
5741                 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5742                 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5743                 request_dma_obj.dma_attr.dma_attr_sgllen = 1;
5744                 request_dma_obj.dma_attr.dma_attr_align = 1;
5745 
5746                 /* allocate kernel buffer for DMA */
5747                 if (mrsas_alloc_dma_obj(instance, &request_dma_obj,
5748                     (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5749                         con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5750                             "could not allocate data transfer buffer."));
5751                         return (DDI_FAILURE);
5752                 }
5753                 (void) memset(request_dma_obj.buffer, 0, request_xferlen);
5754 
5755                 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5756                 for (i = 0; i < request_xferlen; i++) {
5757                         if (ddi_copyin((uint8_t *)request_ubuf + i,
5758                             (uint8_t *)request_dma_obj.buffer + i,
5759                             1, mode)) {
5760                                 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5761                                     "copy from user space failed"));
5762                                 return (DDI_FAILURE);
5763                         }
5764                 }
5765         }
5766 
5767         if (response_xferlen) {
5768                 /* means IOCTL requires DMA */
5769                 /* allocate the data transfer buffer */
5770                 /* response_dma_obj.size = response_xferlen; */
5771                 MRSAS_GET_BOUNDARY_ALIGNED_LEN(response_xferlen,
5772                     new_xfer_length2, PAGESIZE);
5773                 response_dma_obj.size = new_xfer_length2;
5774                 response_dma_obj.dma_attr = mrsas_generic_dma_attr;
5775                 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5776                 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5777                 response_dma_obj.dma_attr.dma_attr_sgllen = 1;
5778                 response_dma_obj.dma_attr.dma_attr_align = 1;
5779 
5780                 /* allocate kernel buffer for DMA */
5781                 if (mrsas_alloc_dma_obj(instance, &response_dma_obj,
5782                     (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5783                         con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5784                             "could not allocate data transfer buffer."));
5785                         return (DDI_FAILURE);
5786                 }
5787                 (void) memset(response_dma_obj.buffer, 0, response_xferlen);
5788 
5789                 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5790                 for (i = 0; i < response_xferlen; i++) {
5791                         if (ddi_copyin((uint8_t *)response_ubuf + i,
5792                             (uint8_t *)response_dma_obj.buffer + i,
5793                             1, mode)) {
5794                                 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5795                                     "copy from user space failed"));
5796                                 return (DDI_FAILURE);
5797                         }
5798                 }
5799         }
5800 
5801         ddi_put8(acc_handle, &smp->cmd, ksmp->cmd);
5802         ddi_put8(acc_handle, &smp->cmd_status, 0);
5803         ddi_put8(acc_handle, &smp->connection_status, 0);
5804         ddi_put8(acc_handle, &smp->sge_count, ksmp->sge_count);
5805         /* smp->context              = ksmp->context; */
5806         ddi_put16(acc_handle, &smp->timeout, ksmp->timeout);
5807         ddi_put32(acc_handle, &smp->data_xfer_len, ksmp->data_xfer_len);
5808 
5809         bcopy((void *)&ksmp->sas_addr, (void *)&tmp_sas_addr,
5810             sizeof (uint64_t));
5811         ddi_put64(acc_handle, &smp->sas_addr, tmp_sas_addr);
5812 
5813         ddi_put16(acc_handle, &smp->flags, ksmp->flags & ~MFI_FRAME_SGL64);
5814 
5815         model = ddi_model_convert_from(mode & FMODELS);
5816         if (model == DDI_MODEL_ILP32) {
5817                 con_log(CL_ANN1, (CE_CONT,
5818                     "issue_mfi_smp: DDI_MODEL_ILP32"));
5819 
5820                 sge32 = &smp->sgl[0].sge32[0];
5821                 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5822                 ddi_put32(acc_handle, &sge32[0].phys_addr,
5823                     response_dma_obj.dma_cookie[0].dmac_address);
5824                 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5825                 ddi_put32(acc_handle, &sge32[1].phys_addr,
5826                     request_dma_obj.dma_cookie[0].dmac_address);
5827         } else {
5828 #ifdef _ILP32
5829                 con_log(CL_ANN1, (CE_CONT,
5830                     "issue_mfi_smp: DDI_MODEL_ILP32"));
5831                 sge32 = &smp->sgl[0].sge32[0];
5832                 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5833                 ddi_put32(acc_handle, &sge32[0].phys_addr,
5834                     response_dma_obj.dma_cookie[0].dmac_address);
5835                 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5836                 ddi_put32(acc_handle, &sge32[1].phys_addr,
5837                     request_dma_obj.dma_cookie[0].dmac_address);
5838 #else
5839                 con_log(CL_ANN1, (CE_CONT,
5840                     "issue_mfi_smp: DDI_MODEL_LP64"));
5841                 sge64 = &smp->sgl[0].sge64[0];
5842                 ddi_put32(acc_handle, &sge64[0].length, response_xferlen);
5843                 ddi_put64(acc_handle, &sge64[0].phys_addr,
5844                     response_dma_obj.dma_cookie[0].dmac_address);
5845                 ddi_put32(acc_handle, &sge64[1].length, request_xferlen);
5846                 ddi_put64(acc_handle, &sge64[1].phys_addr,
5847                     request_dma_obj.dma_cookie[0].dmac_address);
5848 #endif
5849         }
5850         con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp : "
5851             "smp->response_xferlen = %d, smp->request_xferlen = %d "
5852             "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length),
5853             ddi_get32(acc_handle, &sge32[1].length),
5854             ddi_get32(acc_handle, &smp->data_xfer_len)));
5855 
5856         cmd->sync_cmd = MRSAS_TRUE;
5857         cmd->frame_count = 1;
5858 
5859         if (instance->tbolt) {
5860                 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5861         }
5862 
5863         if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5864                 con_log(CL_ANN, (CE_WARN,
5865                     "issue_mfi_smp: fw_ioctl failed"));
5866         } else {
5867                 con_log(CL_ANN1, (CE_CONT,
5868                     "issue_mfi_smp: copy to user space"));
5869 
5870                 if (request_xferlen) {
5871                         for (i = 0; i < request_xferlen; i++) {
5872                                 if (ddi_copyout(
5873                                     (uint8_t *)request_dma_obj.buffer +
5874                                     i, (uint8_t *)request_ubuf + i,
5875                                     1, mode)) {
5876                                         con_log(CL_ANN, (CE_WARN,
5877                                             "issue_mfi_smp : copy to user space"
5878                                             " failed"));
5879                                         return (DDI_FAILURE);
5880                                 }
5881                         }
5882                 }
5883 
5884                 if (response_xferlen) {
5885                         for (i = 0; i < response_xferlen; i++) {
5886                                 if (ddi_copyout(
5887                                     (uint8_t *)response_dma_obj.buffer
5888                                     + i, (uint8_t *)response_ubuf
5889                                     + i, 1, mode)) {
5890                                         con_log(CL_ANN, (CE_WARN,
5891                                             "issue_mfi_smp : copy to "
5892                                             "user space failed"));
5893                                         return (DDI_FAILURE);
5894                                 }
5895                         }
5896                 }
5897         }
5898 
5899         ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status);
5900         con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d",
5901             ksmp->cmd_status));
5902         DTRACE_PROBE2(issue_smp, uint8_t, ksmp->cmd, uint8_t, ksmp->cmd_status);
5903 
5904         if (request_xferlen) {
5905                 /* free kernel buffer */
5906                 if (mrsas_free_dma_obj(instance, request_dma_obj) !=
5907                     DDI_SUCCESS)
5908                         return (DDI_FAILURE);
5909         }
5910 
5911         if (response_xferlen) {
5912                 /* free kernel buffer */
5913                 if (mrsas_free_dma_obj(instance, response_dma_obj) !=
5914                     DDI_SUCCESS)
5915                         return (DDI_FAILURE);
5916         }
5917 
5918         return (DDI_SUCCESS);
5919 }
5920 
5921 /*
5922  * issue_mfi_stp
5923  */
5924 static int
5925 issue_mfi_stp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5926     struct mrsas_cmd *cmd, int mode)
5927 {
5928         void            *fis_ubuf;
5929         void            *data_ubuf;
5930         uint32_t        fis_xferlen = 0;
5931         uint32_t   new_xfer_length1 = 0;
5932         uint32_t   new_xfer_length2 = 0;
5933         uint32_t        data_xferlen = 0;
5934         uint_t          model;
5935         dma_obj_t       fis_dma_obj;
5936         dma_obj_t       data_dma_obj;
5937         struct mrsas_stp_frame  *kstp;
5938         struct mrsas_stp_frame  *stp;
5939         ddi_acc_handle_t        acc_handle = cmd->frame_dma_obj.acc_handle;
5940         int i;
5941 
5942         stp = &cmd->frame->stp;
5943         kstp = (struct mrsas_stp_frame *)&ioctl->frame[0];
5944 
5945         if (instance->adapterresetinprogress) {
5946                 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5947                 "returning mfi_pkt and setting TRAN_BUSY\n"));
5948                 return (DDI_FAILURE);
5949         }
5950         model = ddi_model_convert_from(mode & FMODELS);
5951         if (model == DDI_MODEL_ILP32) {
5952                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
5953 
5954                 fis_xferlen     = kstp->sgl.sge32[0].length;
5955                 data_xferlen    = kstp->sgl.sge32[1].length;
5956 
5957                 fis_ubuf        = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
5958                 data_ubuf       = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
5959         } else {
5960 #ifdef _ILP32
5961                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
5962 
5963                 fis_xferlen     = kstp->sgl.sge32[0].length;
5964                 data_xferlen    = kstp->sgl.sge32[1].length;
5965 
5966                 fis_ubuf        = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
5967                 data_ubuf       = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
5968 #else
5969                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_LP64"));
5970 
5971                 fis_xferlen     = kstp->sgl.sge64[0].length;
5972                 data_xferlen    = kstp->sgl.sge64[1].length;
5973 
5974                 fis_ubuf        = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr;
5975                 data_ubuf       = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr;
5976 #endif
5977         }
5978 
5979 
5980         if (fis_xferlen) {
5981                 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: "
5982                     "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen));
5983 
5984                 /* means IOCTL requires DMA */
5985                 /* allocate the data transfer buffer */
5986                 /* fis_dma_obj.size = fis_xferlen; */
5987                 MRSAS_GET_BOUNDARY_ALIGNED_LEN(fis_xferlen,
5988                     new_xfer_length1, PAGESIZE);
5989                 fis_dma_obj.size = new_xfer_length1;
5990                 fis_dma_obj.dma_attr = mrsas_generic_dma_attr;
5991                 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5992                 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5993                 fis_dma_obj.dma_attr.dma_attr_sgllen = 1;
5994                 fis_dma_obj.dma_attr.dma_attr_align = 1;
5995 
5996                 /* allocate kernel buffer for DMA */
5997                 if (mrsas_alloc_dma_obj(instance, &fis_dma_obj,
5998                     (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5999                         con_log(CL_ANN, (CE_WARN, "issue_mfi_stp : "
6000                             "could not allocate data transfer buffer."));
6001                         return (DDI_FAILURE);
6002                 }
6003                 (void) memset(fis_dma_obj.buffer, 0, fis_xferlen);
6004 
6005                 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
6006                 for (i = 0; i < fis_xferlen; i++) {
6007                         if (ddi_copyin((uint8_t *)fis_ubuf + i,
6008                             (uint8_t *)fis_dma_obj.buffer + i, 1, mode)) {
6009                                 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6010                                     "copy from user space failed"));
6011                                 return (DDI_FAILURE);
6012                         }
6013                 }
6014         }
6015 
6016         if (data_xferlen) {
6017                 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: data_ubuf = %p "
6018                     "data_xferlen = %x", data_ubuf, data_xferlen));
6019 
6020                 /* means IOCTL requires DMA */
6021                 /* allocate the data transfer buffer */
6022                 /* data_dma_obj.size = data_xferlen; */
6023                 MRSAS_GET_BOUNDARY_ALIGNED_LEN(data_xferlen, new_xfer_length2,
6024                     PAGESIZE);
6025                 data_dma_obj.size = new_xfer_length2;
6026                 data_dma_obj.dma_attr = mrsas_generic_dma_attr;
6027                 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
6028                 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
6029                 data_dma_obj.dma_attr.dma_attr_sgllen = 1;
6030                 data_dma_obj.dma_attr.dma_attr_align = 1;
6031 
6032                 /* allocate kernel buffer for DMA */
6033                 if (mrsas_alloc_dma_obj(instance, &data_dma_obj,
6034                     (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
6035                         con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6036                             "could not allocate data transfer buffer."));
6037                         return (DDI_FAILURE);
6038                 }
6039                 (void) memset(data_dma_obj.buffer, 0, data_xferlen);
6040 
6041                 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
6042                 for (i = 0; i < data_xferlen; i++) {
6043                         if (ddi_copyin((uint8_t *)data_ubuf + i,
6044                             (uint8_t *)data_dma_obj.buffer + i, 1, mode)) {
6045                                 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6046                                     "copy from user space failed"));
6047                                 return (DDI_FAILURE);
6048                         }
6049                 }
6050         }
6051 
6052         ddi_put8(acc_handle, &stp->cmd, kstp->cmd);
6053         ddi_put8(acc_handle, &stp->cmd_status, 0);
6054         ddi_put8(acc_handle, &stp->connection_status, 0);
6055         ddi_put8(acc_handle, &stp->target_id, kstp->target_id);
6056         ddi_put8(acc_handle, &stp->sge_count, kstp->sge_count);
6057 
6058         ddi_put16(acc_handle, &stp->timeout, kstp->timeout);
6059         ddi_put32(acc_handle, &stp->data_xfer_len, kstp->data_xfer_len);
6060 
6061         ddi_rep_put8(acc_handle, (uint8_t *)kstp->fis, (uint8_t *)stp->fis, 10,
6062             DDI_DEV_AUTOINCR);
6063 
6064         ddi_put16(acc_handle, &stp->flags, kstp->flags & ~MFI_FRAME_SGL64);
6065         ddi_put32(acc_handle, &stp->stp_flags, kstp->stp_flags);
6066         ddi_put32(acc_handle, &stp->sgl.sge32[0].length, fis_xferlen);
6067         ddi_put32(acc_handle, &stp->sgl.sge32[0].phys_addr,
6068             fis_dma_obj.dma_cookie[0].dmac_address);
6069         ddi_put32(acc_handle, &stp->sgl.sge32[1].length, data_xferlen);
6070         ddi_put32(acc_handle, &stp->sgl.sge32[1].phys_addr,
6071             data_dma_obj.dma_cookie[0].dmac_address);
6072 
6073         cmd->sync_cmd = MRSAS_TRUE;
6074         cmd->frame_count = 1;
6075 
6076         if (instance->tbolt) {
6077                 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6078         }
6079 
6080         if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
6081                 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed"));
6082         } else {
6083 
6084                 if (fis_xferlen) {
6085                         for (i = 0; i < fis_xferlen; i++) {
6086                                 if (ddi_copyout(
6087                                     (uint8_t *)fis_dma_obj.buffer + i,
6088                                     (uint8_t *)fis_ubuf + i, 1, mode)) {
6089                                         con_log(CL_ANN, (CE_WARN,
6090                                             "issue_mfi_stp : copy to "
6091                                             "user space failed"));
6092                                         return (DDI_FAILURE);
6093                                 }
6094                         }
6095                 }
6096         }
6097         if (data_xferlen) {
6098                 for (i = 0; i < data_xferlen; i++) {
6099                         if (ddi_copyout(
6100                             (uint8_t *)data_dma_obj.buffer + i,
6101                             (uint8_t *)data_ubuf + i, 1, mode)) {
6102                                 con_log(CL_ANN, (CE_WARN,
6103                                     "issue_mfi_stp : copy to"
6104                                     " user space failed"));
6105                                 return (DDI_FAILURE);
6106                         }
6107                 }
6108         }
6109 
6110         kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status);
6111         con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: stp->cmd_status = %d",
6112             kstp->cmd_status));
6113         DTRACE_PROBE2(issue_stp, uint8_t, kstp->cmd, uint8_t, kstp->cmd_status);
6114 
6115         if (fis_xferlen) {
6116                 /* free kernel buffer */
6117                 if (mrsas_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS)
6118                         return (DDI_FAILURE);
6119         }
6120 
6121         if (data_xferlen) {
6122                 /* free kernel buffer */
6123                 if (mrsas_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS)
6124                         return (DDI_FAILURE);
6125         }
6126 
6127         return (DDI_SUCCESS);
6128 }
6129 
6130 /*
6131  * fill_up_drv_ver
6132  */
6133 void
6134 fill_up_drv_ver(struct mrsas_drv_ver *dv)
6135 {
6136         (void) memset(dv, 0, sizeof (struct mrsas_drv_ver));
6137 
6138         (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
6139         (void) memcpy(dv->os_name, "Solaris", strlen("Solaris"));
6140         (void) memcpy(dv->drv_name, "mr_sas", strlen("mr_sas"));
6141         (void) memcpy(dv->drv_ver, MRSAS_VERSION, strlen(MRSAS_VERSION));
6142         (void) memcpy(dv->drv_rel_date, MRSAS_RELDATE,
6143             strlen(MRSAS_RELDATE));
6144 
6145 }
6146 
6147 /*
6148  * handle_drv_ioctl
6149  */
6150 static int
6151 handle_drv_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6152     int mode)
6153 {
6154         int     i;
6155         int     rval = DDI_SUCCESS;
6156         int     *props = NULL;
6157         void    *ubuf;
6158 
6159         uint8_t         *pci_conf_buf;
6160         uint32_t        xferlen;
6161         uint32_t        num_props;
6162         uint_t          model;
6163         struct mrsas_dcmd_frame *kdcmd;
6164         struct mrsas_drv_ver    dv;
6165         struct mrsas_pci_information pi;
6166 
6167         kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
6168 
6169         model = ddi_model_convert_from(mode & FMODELS);
6170         if (model == DDI_MODEL_ILP32) {
6171                 con_log(CL_ANN1, (CE_CONT,
6172                     "handle_drv_ioctl: DDI_MODEL_ILP32"));
6173 
6174                 xferlen = kdcmd->sgl.sge32[0].length;
6175 
6176                 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6177         } else {
6178 #ifdef _ILP32
6179                 con_log(CL_ANN1, (CE_CONT,
6180                     "handle_drv_ioctl: DDI_MODEL_ILP32"));
6181                 xferlen = kdcmd->sgl.sge32[0].length;
6182                 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6183 #else
6184                 con_log(CL_ANN1, (CE_CONT,
6185                     "handle_drv_ioctl: DDI_MODEL_LP64"));
6186                 xferlen = kdcmd->sgl.sge64[0].length;
6187                 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
6188 #endif
6189         }
6190         con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6191             "dataBuf=%p size=%d bytes", ubuf, xferlen));
6192 
6193         switch (kdcmd->opcode) {
6194         case MRSAS_DRIVER_IOCTL_DRIVER_VERSION:
6195                 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6196                     "MRSAS_DRIVER_IOCTL_DRIVER_VERSION"));
6197 
6198                 fill_up_drv_ver(&dv);
6199 
6200                 if (ddi_copyout(&dv, ubuf, xferlen, mode)) {
6201                         con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6202                             "MRSAS_DRIVER_IOCTL_DRIVER_VERSION : "
6203                             "copy to user space failed"));
6204                         kdcmd->cmd_status = 1;
6205                         rval = 1;
6206                 } else {
6207                         kdcmd->cmd_status = 0;
6208                 }
6209                 break;
6210         case MRSAS_DRIVER_IOCTL_PCI_INFORMATION:
6211                 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
6212                     "MRSAS_DRIVER_IOCTL_PCI_INFORMAITON"));
6213 
6214                 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip,
6215                     0, "reg", &props, &num_props)) {
6216                         con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6217                             "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6218                             "ddi_prop_look_int_array failed"));
6219                         rval = DDI_FAILURE;
6220                 } else {
6221 
6222                         pi.busNumber = (props[0] >> 16) & 0xFF;
6223                         pi.deviceNumber = (props[0] >> 11) & 0x1f;
6224                         pi.functionNumber = (props[0] >> 8) & 0x7;
6225                         ddi_prop_free((void *)props);
6226                 }
6227 
6228                 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo;
6229 
6230                 for (i = 0; i < (sizeof (struct mrsas_pci_information) -
6231                     offsetof(struct mrsas_pci_information, pciHeaderInfo));
6232                     i++) {
6233                         pci_conf_buf[i] =
6234                             pci_config_get8(instance->pci_handle, i);
6235                 }
6236 
6237                 if (ddi_copyout(&pi, ubuf, xferlen, mode)) {
6238                         con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6239                             "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6240                             "copy to user space failed"));
6241                         kdcmd->cmd_status = 1;
6242                         rval = 1;
6243                 } else {
6244                         kdcmd->cmd_status = 0;
6245                 }
6246                 break;
6247         default:
6248                 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6249                     "invalid driver specific IOCTL opcode = 0x%x",
6250                     kdcmd->opcode));
6251                 kdcmd->cmd_status = 1;
6252                 rval = DDI_FAILURE;
6253                 break;
6254         }
6255 
6256         return (rval);
6257 }
6258 
6259 /*
6260  * handle_mfi_ioctl
6261  */
6262 static int
6263 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6264     int mode)
6265 {
6266         int     rval = DDI_SUCCESS;
6267 
6268         struct mrsas_header     *hdr;
6269         struct mrsas_cmd        *cmd;
6270 
6271         if (instance->tbolt) {
6272                 cmd = get_raid_msg_mfi_pkt(instance);
6273         } else {
6274                 cmd = mrsas_get_mfi_pkt(instance);
6275         }
6276         if (!cmd) {
6277                 con_log(CL_ANN, (CE_WARN, "mr_sas: "
6278                     "failed to get a cmd packet"));
6279                 DTRACE_PROBE2(mfi_ioctl_err, uint16_t,
6280                     instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
6281                 return (DDI_FAILURE);
6282         }
6283 
6284         /* Clear the frame buffer and assign back the context id */
6285         (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6286         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6287             cmd->index);
6288 
6289         hdr = (struct mrsas_header *)&ioctl->frame[0];
6290 
6291         switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
6292         case MFI_CMD_OP_DCMD:
6293                 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
6294                 break;
6295         case MFI_CMD_OP_SMP:
6296                 rval = issue_mfi_smp(instance, ioctl, cmd, mode);
6297                 break;
6298         case MFI_CMD_OP_STP:
6299                 rval = issue_mfi_stp(instance, ioctl, cmd, mode);
6300                 break;
6301         case MFI_CMD_OP_LD_SCSI:
6302         case MFI_CMD_OP_PD_SCSI:
6303                 rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
6304                 break;
6305         default:
6306                 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
6307                     "invalid mfi ioctl hdr->cmd = %d", hdr->cmd));
6308                 rval = DDI_FAILURE;
6309                 break;
6310         }
6311 
6312         if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
6313                 rval = DDI_FAILURE;
6314 
6315         if (instance->tbolt) {
6316                 return_raid_msg_mfi_pkt(instance, cmd);
6317         } else {
6318                 mrsas_return_mfi_pkt(instance, cmd);
6319         }
6320 
6321         return (rval);
6322 }
6323 
6324 /*
6325  * AEN
6326  */
6327 static int
6328 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen)
6329 {
6330         int     rval = 0;
6331 
6332         rval = register_mfi_aen(instance, instance->aen_seq_num,
6333             aen->class_locale_word);
6334 
6335         aen->cmd_status = (uint8_t)rval;
6336 
6337         return (rval);
6338 }
6339 
6340 static int
6341 register_mfi_aen(struct mrsas_instance *instance, uint32_t seq_num,
6342     uint32_t class_locale_word)
6343 {
6344         int     ret_val;
6345 
6346         struct mrsas_cmd        *cmd, *aen_cmd;
6347         struct mrsas_dcmd_frame *dcmd;
6348         union mrsas_evt_class_locale    curr_aen;
6349         union mrsas_evt_class_locale    prev_aen;
6350 
6351         con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6352         /*
6353          * If there an AEN pending already (aen_cmd), check if the
6354          * class_locale of that pending AEN is inclusive of the new
6355          * AEN request we currently have. If it is, then we don't have
6356          * to do anything. In other words, whichever events the current
6357          * AEN request is subscribing to, have already been subscribed
6358          * to.
6359          *
6360          * If the old_cmd is _not_ inclusive, then we have to abort
6361          * that command, form a class_locale that is superset of both
6362          * old and current and re-issue to the FW
6363          */
6364 
6365         curr_aen.word = LE_32(class_locale_word);
6366         curr_aen.members.locale = LE_16(curr_aen.members.locale);
6367         aen_cmd = instance->aen_cmd;
6368         if (aen_cmd) {
6369                 prev_aen.word = ddi_get32(aen_cmd->frame_dma_obj.acc_handle,
6370                     &aen_cmd->frame->dcmd.mbox.w[1]);
6371                 prev_aen.word = LE_32(prev_aen.word);
6372                 prev_aen.members.locale = LE_16(prev_aen.members.locale);
6373                 /*
6374                  * A class whose enum value is smaller is inclusive of all
6375                  * higher values. If a PROGRESS (= -1) was previously
6376                  * registered, then a new registration requests for higher
6377                  * classes need not be sent to FW. They are automatically
6378                  * included.
6379                  *
6380                  * Locale numbers don't have such hierarchy. They are bitmap
6381                  * values
6382                  */
6383                 if ((prev_aen.members.class <= curr_aen.members.class) &&
6384                     !((prev_aen.members.locale & curr_aen.members.locale) ^
6385                     curr_aen.members.locale)) {
6386                         /*
6387                          * Previously issued event registration includes
6388                          * current request. Nothing to do.
6389                          */
6390 
6391                         return (0);
6392                 } else {
6393                         curr_aen.members.locale |= prev_aen.members.locale;
6394 
6395                         if (prev_aen.members.class < curr_aen.members.class)
6396                                 curr_aen.members.class = prev_aen.members.class;
6397 
6398                         ret_val = abort_aen_cmd(instance, aen_cmd);
6399 
6400                         if (ret_val) {
6401                                 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
6402                                     "failed to abort prevous AEN command"));
6403 
6404                                 return (ret_val);
6405                         }
6406                 }
6407         } else {
6408                 curr_aen.word = LE_32(class_locale_word);
6409                 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6410         }
6411 
6412         if (instance->tbolt) {
6413                 cmd = get_raid_msg_mfi_pkt(instance);
6414         } else {
6415                 cmd = mrsas_get_mfi_pkt(instance);
6416         }
6417 
6418         if (!cmd) {
6419                 DTRACE_PROBE2(mfi_aen_err, uint16_t, instance->fw_outstanding,
6420                     uint16_t, instance->max_fw_cmds);
6421                 return (ENOMEM);
6422         }
6423 
6424         /* Clear the frame buffer and assign back the context id */
6425         (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6426         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6427             cmd->index);
6428 
6429         dcmd = &cmd->frame->dcmd;
6430 
6431         /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
6432         (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
6433 
6434         (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
6435             sizeof (struct mrsas_evt_detail));
6436 
6437         /* Prepare DCMD for aen registration */
6438         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
6439         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
6440         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
6441         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
6442             MFI_FRAME_DIR_READ);
6443         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
6444         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
6445             sizeof (struct mrsas_evt_detail));
6446         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
6447             MR_DCMD_CTRL_EVENT_WAIT);
6448         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], seq_num);
6449         curr_aen.members.locale = LE_16(curr_aen.members.locale);
6450         curr_aen.word = LE_32(curr_aen.word);
6451         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[1],
6452             curr_aen.word);
6453         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
6454             instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address);
6455         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
6456             sizeof (struct mrsas_evt_detail));
6457 
6458         instance->aen_seq_num = seq_num;
6459 
6460 
6461         /*
6462          * Store reference to the cmd used to register for AEN. When an
6463          * application wants us to register for AEN, we have to abort this
6464          * cmd and re-register with a new EVENT LOCALE supplied by that app
6465          */
6466         instance->aen_cmd = cmd;
6467 
6468         cmd->frame_count = 1;
6469 
6470         /* Issue the aen registration frame */
6471         /* atomic_add_16 (&instance->fw_outstanding, 1); */
6472         if (instance->tbolt) {
6473                 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6474         }
6475         instance->func_ptr->issue_cmd(cmd, instance);
6476 
6477         return (0);
6478 }
6479 
6480 void
6481 display_scsi_inquiry(caddr_t scsi_inq)
6482 {
6483 #define MAX_SCSI_DEVICE_CODE    14
6484         int             i;
6485         char            inquiry_buf[256] = {0};
6486         int             len;
6487         const char      *const scsi_device_types[] = {
6488                 "Direct-Access    ",
6489                 "Sequential-Access",
6490                 "Printer          ",
6491                 "Processor        ",
6492                 "WORM             ",
6493                 "CD-ROM           ",
6494                 "Scanner          ",
6495                 "Optical Device   ",
6496                 "Medium Changer   ",
6497                 "Communications   ",
6498                 "Unknown          ",
6499                 "Unknown          ",
6500                 "Unknown          ",
6501                 "Enclosure        ",
6502         };
6503 
6504         len = 0;
6505 
6506         len += snprintf(inquiry_buf + len, 265 - len, "  Vendor: ");
6507         for (i = 8; i < 16; i++) {
6508                 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6509                     scsi_inq[i]);
6510         }
6511 
6512         len += snprintf(inquiry_buf + len, 265 - len, "  Model: ");
6513 
6514         for (i = 16; i < 32; i++) {
6515                 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6516                     scsi_inq[i]);
6517         }
6518 
6519         len += snprintf(inquiry_buf + len, 265 - len, "  Rev: ");
6520 
6521         for (i = 32; i < 36; i++) {
6522                 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6523                     scsi_inq[i]);
6524         }
6525 
6526         len += snprintf(inquiry_buf + len, 265 - len, "\n");
6527 
6528 
6529         i = scsi_inq[0] & 0x1f;
6530 
6531 
6532         len += snprintf(inquiry_buf + len, 265 - len, "  Type:   %s ",
6533             i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
6534             "Unknown          ");
6535 
6536 
6537         len += snprintf(inquiry_buf + len, 265 - len,
6538             "                 ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
6539 
6540         if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) {
6541                 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n");
6542         } else {
6543                 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6544         }
6545 
6546         con_log(CL_DLEVEL2, (CE_CONT, inquiry_buf));
6547 }
6548 
6549 static void
6550 io_timeout_checker(void *arg)
6551 {
6552         struct scsi_pkt *pkt;
6553         struct mrsas_instance *instance = arg;
6554         struct mrsas_cmd        *cmd = NULL;
6555         struct mrsas_header     *hdr;
6556         int time = 0;
6557         int counter = 0;
6558         struct mlist_head       *pos, *next;
6559         mlist_t                 process_list;
6560 
6561         if (instance->adapterresetinprogress == 1) {
6562                 con_log(CL_ANN, (CE_NOTE, "io_timeout_checker:"
6563                     " reset in progress"));
6564 
6565                 instance->timeout_id = timeout(io_timeout_checker,
6566                     (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6567                 return;
6568         }
6569 
6570         /* See if this check needs to be in the beginning or last in ISR */
6571         if (mrsas_initiate_ocr_if_fw_is_faulty(instance) ==  1) {
6572                 dev_err(instance->dip, CE_WARN, "io_timeout_checker: "
6573                     "FW Fault, calling reset adapter");
6574                 dev_err(instance->dip, CE_CONT, "io_timeout_checker: "
6575                     "fw_outstanding 0x%X max_fw_cmds 0x%X",
6576                     instance->fw_outstanding, instance->max_fw_cmds);
6577                 if (instance->adapterresetinprogress == 0) {
6578                         instance->adapterresetinprogress = 1;
6579                         if (instance->tbolt)
6580                                 (void) mrsas_tbolt_reset_ppc(instance);
6581                         else
6582                                 (void) mrsas_reset_ppc(instance);
6583                         instance->adapterresetinprogress = 0;
6584                 }
6585                 instance->timeout_id = timeout(io_timeout_checker,
6586                     (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6587                 return;
6588         }
6589 
6590         INIT_LIST_HEAD(&process_list);
6591 
6592         mutex_enter(&instance->cmd_pend_mtx);
6593         mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
6594                 cmd = mlist_entry(pos, struct mrsas_cmd, list);
6595 
6596                 if (cmd == NULL) {
6597                         continue;
6598                 }
6599 
6600                 if (cmd->sync_cmd == MRSAS_TRUE) {
6601                         hdr = (struct mrsas_header *)&cmd->frame->hdr;
6602                         if (hdr == NULL) {
6603                                 continue;
6604                         }
6605                         time = --cmd->drv_pkt_time;
6606                 } else {
6607                         pkt = cmd->pkt;
6608                         if (pkt == NULL) {
6609                                 continue;
6610                         }
6611                         time = --cmd->drv_pkt_time;
6612                 }
6613                 if (time <= 0) {
6614                         dev_err(instance->dip, CE_WARN, "%llx: "
6615                             "io_timeout_checker: TIMING OUT: pkt: %p, "
6616                             "cmd %p fw_outstanding 0x%X max_fw_cmds 0x%X",
6617                             gethrtime(), (void *)pkt, (void *)cmd,
6618                             instance->fw_outstanding, instance->max_fw_cmds);
6619 
6620                         counter++;
6621                         break;
6622                 }
6623         }
6624         mutex_exit(&instance->cmd_pend_mtx);
6625 
6626         if (counter) {
6627                 if (instance->disable_online_ctrl_reset == 1) {
6628                         dev_err(instance->dip, CE_WARN, "%s(): OCR is NOT "
6629                             "supported by Firmware, KILL adapter!!!",
6630                             __func__);
6631 
6632                         if (instance->tbolt)
6633                                 mrsas_tbolt_kill_adapter(instance);
6634                         else
6635                                 (void) mrsas_kill_adapter(instance);
6636 
6637                         return;
6638                 } else {
6639                         if (cmd->retry_count_for_ocr <=    IO_RETRY_COUNT) {
6640                                 if (instance->adapterresetinprogress == 0) {
6641                                         if (instance->tbolt) {
6642                                                 (void) mrsas_tbolt_reset_ppc(
6643                                                     instance);
6644                                         } else {
6645                                                 (void) mrsas_reset_ppc(
6646                                                     instance);
6647                                         }
6648                                 }
6649                         } else {
6650                                 dev_err(instance->dip, CE_WARN,
6651                                     "io_timeout_checker: "
6652                                     "cmd %p cmd->index %d "
6653                                     "timed out even after 3 resets: "
6654                                     "so KILL adapter", (void *)cmd, cmd->index);
6655 
6656                                 mrsas_print_cmd_details(instance, cmd, 0xDD);
6657 
6658                                 if (instance->tbolt)
6659                                         mrsas_tbolt_kill_adapter(instance);
6660                                 else
6661                                         (void) mrsas_kill_adapter(instance);
6662                                 return;
6663                         }
6664                 }
6665         }
6666         con_log(CL_ANN, (CE_NOTE, "mrsas: "
6667             "schedule next timeout check: "
6668             "do timeout \n"));
6669         instance->timeout_id =
6670             timeout(io_timeout_checker, (void *)instance,
6671             drv_usectohz(MRSAS_1_SECOND));
6672 }
6673 
6674 static uint32_t
6675 read_fw_status_reg_ppc(struct mrsas_instance *instance)
6676 {
6677         return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
6678 }
6679 
6680 static void
6681 issue_cmd_ppc(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
6682 {
6683         struct scsi_pkt *pkt;
6684         atomic_inc_16(&instance->fw_outstanding);
6685 
6686         pkt = cmd->pkt;
6687         if (pkt) {
6688                 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6689                     "ISSUED CMD TO FW : called : cmd:"
6690                     ": %p instance : %p pkt : %p pkt_time : %x\n",
6691                     gethrtime(), (void *)cmd, (void *)instance,
6692                     (void *)pkt, cmd->drv_pkt_time));
6693                 if (instance->adapterresetinprogress) {
6694                         cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6695                         con_log(CL_ANN1, (CE_NOTE, "Reset the scsi_pkt timer"));
6696                 } else {
6697                         push_pending_mfi_pkt(instance, cmd);
6698                 }
6699 
6700         } else {
6701                 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6702                     "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
6703                     "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
6704         }
6705 
6706         mutex_enter(&instance->reg_write_mtx);
6707         /* Issue the command to the FW */
6708         WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6709             (((cmd->frame_count - 1) << 1) | 1), instance);
6710         mutex_exit(&instance->reg_write_mtx);
6711 
6712 }
6713 
6714 /*
6715  * issue_cmd_in_sync_mode
6716  */
6717 static int
6718 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance,
6719     struct mrsas_cmd *cmd)
6720 {
6721         int     i;
6722         uint32_t        msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6723         struct mrsas_header *hdr = &cmd->frame->hdr;
6724 
6725         con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called"));
6726 
6727         if (instance->adapterresetinprogress) {
6728                 cmd->drv_pkt_time = ddi_get16(
6729                     cmd->frame_dma_obj.acc_handle, &hdr->timeout);
6730                 if (cmd->drv_pkt_time < debug_timeout_g)
6731                         cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6732 
6733                 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: "
6734                     "issue and return in reset case\n"));
6735                 WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6736                     (((cmd->frame_count - 1) << 1) | 1), instance);
6737 
6738                 return (DDI_SUCCESS);
6739         } else {
6740                 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: pushing the pkt\n"));
6741                 push_pending_mfi_pkt(instance, cmd);
6742         }
6743 
6744         cmd->cmd_status      = ENODATA;
6745 
6746         mutex_enter(&instance->reg_write_mtx);
6747         /* Issue the command to the FW */
6748         WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6749             (((cmd->frame_count - 1) << 1) | 1), instance);
6750         mutex_exit(&instance->reg_write_mtx);
6751 
6752         mutex_enter(&instance->int_cmd_mtx);
6753         for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
6754                 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
6755         }
6756         mutex_exit(&instance->int_cmd_mtx);
6757 
6758         con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done"));
6759 
6760         if (i < (msecs -1)) {
6761                 return (DDI_SUCCESS);
6762         } else {
6763                 return (DDI_FAILURE);
6764         }
6765 }
6766 
6767 /*
6768  * issue_cmd_in_poll_mode
6769  */
6770 static int
6771 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance,
6772     struct mrsas_cmd *cmd)
6773 {
6774         int             i;
6775         uint16_t        flags;
6776         uint32_t        msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6777         struct mrsas_header *frame_hdr;
6778 
6779         con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called"));
6780 
6781         frame_hdr = (struct mrsas_header *)cmd->frame;
6782         ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
6783             MFI_CMD_STATUS_POLL_MODE);
6784         flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
6785         flags   |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
6786 
6787         ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
6788 
6789         /* issue the frame using inbound queue port */
6790         WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6791             (((cmd->frame_count - 1) << 1) | 1), instance);
6792 
6793         /* wait for cmd_status to change from 0xFF */
6794         for (i = 0; i < msecs && (
6795             ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6796             == MFI_CMD_STATUS_POLL_MODE); i++) {
6797                 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
6798         }
6799 
6800         if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6801             == MFI_CMD_STATUS_POLL_MODE) {
6802                 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
6803                     "cmd polling timed out"));
6804                 return (DDI_FAILURE);
6805         }
6806 
6807         return (DDI_SUCCESS);
6808 }
6809 
6810 static void
6811 enable_intr_ppc(struct mrsas_instance *instance)
6812 {
6813         uint32_t        mask;
6814 
6815         con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called"));
6816 
6817         if (instance->skinny) {
6818                 /* For SKINNY, write ~0x1, from BSD's mfi driver. */
6819                 WR_OB_INTR_MASK(0xfffffffe, instance);
6820         } else {
6821                 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
6822                 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
6823 
6824                 /* WR_OB_INTR_MASK(~0x80000000, instance); */
6825                 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance);
6826         }
6827 
6828         /* dummy read to force PCI flush */
6829         mask = RD_OB_INTR_MASK(instance);
6830 
6831         con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
6832             "outbound_intr_mask = 0x%x", mask));
6833 }
6834 
6835 static void
6836 disable_intr_ppc(struct mrsas_instance *instance)
6837 {
6838         con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called"));
6839 
6840         con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
6841             "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6842 
6843         /* For now, assume there are no extras needed for Skinny support. */
6844 
6845         WR_OB_INTR_MASK(OB_INTR_MASK, instance);
6846 
6847         con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
6848             "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6849 
6850         /* dummy read to force PCI flush */
6851         (void) RD_OB_INTR_MASK(instance);
6852 }
6853 
6854 static int
6855 intr_ack_ppc(struct mrsas_instance *instance)
6856 {
6857         uint32_t        status;
6858         int ret = DDI_INTR_CLAIMED;
6859 
6860         con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called"));
6861 
6862         /* check if it is our interrupt */
6863         status = RD_OB_INTR_STATUS(instance);
6864 
6865         con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status));
6866 
6867         /*
6868          * NOTE:  Some drivers call out SKINNY here, but the return is the same
6869          * for SKINNY and 2108.
6870          */
6871         if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) {
6872                 ret = DDI_INTR_UNCLAIMED;
6873         }
6874 
6875         if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
6876                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
6877                 ret = DDI_INTR_UNCLAIMED;
6878         }
6879 
6880         if (ret == DDI_INTR_UNCLAIMED) {
6881                 return (ret);
6882         }
6883 
6884         /*
6885          * Clear the interrupt by writing back the same value.
6886          * Another case where SKINNY is slightly different.
6887          */
6888         if (instance->skinny) {
6889                 WR_OB_INTR_STATUS(status, instance);
6890         } else {
6891                 WR_OB_DOORBELL_CLEAR(status, instance);
6892         }
6893 
6894         /* dummy READ */
6895         status = RD_OB_INTR_STATUS(instance);
6896 
6897         con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared"));
6898 
6899         return (ret);
6900 }
6901 
6902 /*
6903  * Marks HBA as bad. This will be called either when an
6904  * IO packet times out even after 3 FW resets
6905  * or FW is found to be fault even after 3 continuous resets.
6906  */
6907 
6908 static int
6909 mrsas_kill_adapter(struct mrsas_instance *instance)
6910 {
6911         if (instance->deadadapter == 1)
6912                 return (DDI_FAILURE);
6913 
6914         con_log(CL_ANN1, (CE_NOTE, "mrsas_kill_adapter: "
6915             "Writing to doorbell with MFI_STOP_ADP "));
6916         mutex_enter(&instance->ocr_flags_mtx);
6917         instance->deadadapter = 1;
6918         mutex_exit(&instance->ocr_flags_mtx);
6919         instance->func_ptr->disable_intr(instance);
6920         WR_IB_DOORBELL(MFI_STOP_ADP, instance);
6921         (void) mrsas_complete_pending_cmds(instance);
6922         return (DDI_SUCCESS);
6923 }
6924 
6925 
6926 static int
6927 mrsas_reset_ppc(struct mrsas_instance *instance)
6928 {
6929         uint32_t status;
6930         uint32_t retry = 0;
6931         uint32_t cur_abs_reg_val;
6932         uint32_t fw_state;
6933 
6934         con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6935 
6936         if (instance->deadadapter == 1) {
6937                 dev_err(instance->dip, CE_WARN, "mrsas_reset_ppc: "
6938                     "no more resets as HBA has been marked dead ");
6939                 return (DDI_FAILURE);
6940         }
6941         mutex_enter(&instance->ocr_flags_mtx);
6942         instance->adapterresetinprogress = 1;
6943         mutex_exit(&instance->ocr_flags_mtx);
6944         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: adpterresetinprogress "
6945             "flag set, time %llx", gethrtime()));
6946 
6947         instance->func_ptr->disable_intr(instance);
6948 retry_reset:
6949         WR_IB_WRITE_SEQ(0, instance);
6950         WR_IB_WRITE_SEQ(4, instance);
6951         WR_IB_WRITE_SEQ(0xb, instance);
6952         WR_IB_WRITE_SEQ(2, instance);
6953         WR_IB_WRITE_SEQ(7, instance);
6954         WR_IB_WRITE_SEQ(0xd, instance);
6955         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: magic number written "
6956             "to write sequence register\n"));
6957         delay(100 * drv_usectohz(MILLISEC));
6958         status = RD_OB_DRWE(instance);
6959 
6960         while (!(status & DIAG_WRITE_ENABLE)) {
6961                 delay(100 * drv_usectohz(MILLISEC));
6962                 status = RD_OB_DRWE(instance);
6963                 if (retry++ == 100) {
6964                         dev_err(instance->dip, CE_WARN,
6965                             "mrsas_reset_ppc: DRWE bit "
6966                             "check retry count %d", retry);
6967                         return (DDI_FAILURE);
6968                 }
6969         }
6970         WR_IB_DRWE(status | DIAG_RESET_ADAPTER, instance);
6971         delay(100 * drv_usectohz(MILLISEC));
6972         status = RD_OB_DRWE(instance);
6973         while (status & DIAG_RESET_ADAPTER) {
6974                 delay(100 * drv_usectohz(MILLISEC));
6975                 status = RD_OB_DRWE(instance);
6976                 if (retry++ == 100) {
6977                         dev_err(instance->dip, CE_WARN, "mrsas_reset_ppc: "
6978                             "RESET FAILED. KILL adapter called.");
6979 
6980                         (void) mrsas_kill_adapter(instance);
6981                         return (DDI_FAILURE);
6982                 }
6983         }
6984         con_log(CL_ANN, (CE_NOTE, "mrsas_reset_ppc: Adapter reset complete"));
6985         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6986             "Calling mfi_state_transition_to_ready"));
6987 
6988         /* Mark HBA as bad, if FW is fault after 3 continuous resets */
6989         if (mfi_state_transition_to_ready(instance) ||
6990             debug_fw_faults_after_ocr_g == 1) {
6991                 cur_abs_reg_val =
6992                     instance->func_ptr->read_fw_status_reg(instance);
6993                 fw_state        = cur_abs_reg_val & MFI_STATE_MASK;
6994 
6995 #ifdef OCRDEBUG
6996                 con_log(CL_ANN1, (CE_NOTE,
6997                     "mrsas_reset_ppc :before fake: FW is not ready "
6998                     "FW state = 0x%x", fw_state));
6999                 if (debug_fw_faults_after_ocr_g == 1)
7000                         fw_state = MFI_STATE_FAULT;
7001 #endif
7002 
7003                 con_log(CL_ANN1, (CE_NOTE,  "mrsas_reset_ppc : FW is not ready "
7004                     "FW state = 0x%x", fw_state));
7005 
7006                 if (fw_state == MFI_STATE_FAULT) {
7007                         /* increment the count */
7008                         instance->fw_fault_count_after_ocr++;
7009                         if (instance->fw_fault_count_after_ocr
7010                             < MAX_FW_RESET_COUNT) {
7011                                 dev_err(instance->dip, CE_WARN,
7012                                     "mrsas_reset_ppc: "
7013                                     "FW is in fault after OCR count %d "
7014                                     "Retry Reset",
7015                                     instance->fw_fault_count_after_ocr);
7016                                 goto retry_reset;
7017 
7018                         } else {
7019                                 dev_err(instance->dip, CE_WARN,
7020                                     "mrsas_reset_ppc: "
7021                                     "Max Reset Count exceeded >%d"
7022                                     "Mark HBA as bad, KILL adapter",
7023                                     MAX_FW_RESET_COUNT);
7024 
7025                                 (void) mrsas_kill_adapter(instance);
7026                                 return (DDI_FAILURE);
7027                         }
7028                 }
7029         }
7030         /* reset the counter as FW is up after OCR */
7031         instance->fw_fault_count_after_ocr = 0;
7032 
7033 
7034         ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
7035             instance->producer, 0);
7036 
7037         ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
7038             instance->consumer, 0);
7039 
7040         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7041             " after resetting produconsumer chck indexs:"
7042             "producer %x consumer %x", *instance->producer,
7043             *instance->consumer));
7044 
7045         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7046             "Calling mrsas_issue_init_mfi"));
7047         (void) mrsas_issue_init_mfi(instance);
7048         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7049             "mrsas_issue_init_mfi Done"));
7050 
7051         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7052             "Calling mrsas_print_pending_cmd\n"));
7053         (void) mrsas_print_pending_cmds(instance);
7054         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7055             "mrsas_print_pending_cmd done\n"));
7056 
7057         instance->func_ptr->enable_intr(instance);
7058         instance->fw_outstanding = 0;
7059 
7060         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7061             "Calling mrsas_issue_pending_cmds"));
7062         (void) mrsas_issue_pending_cmds(instance);
7063         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7064             "issue_pending_cmds done.\n"));
7065 
7066         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7067             "Calling aen registration"));
7068 
7069 
7070         instance->aen_cmd->retry_count_for_ocr = 0;
7071         instance->aen_cmd->drv_pkt_time = 0;
7072 
7073         instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
7074         con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n"));
7075 
7076         mutex_enter(&instance->ocr_flags_mtx);
7077         instance->adapterresetinprogress = 0;
7078         mutex_exit(&instance->ocr_flags_mtx);
7079         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7080             "adpterresetinprogress flag unset"));
7081 
7082         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc done\n"));
7083         return (DDI_SUCCESS);
7084 }
7085 
7086 /*
7087  * FMA functions.
7088  */
7089 int
7090 mrsas_common_check(struct mrsas_instance *instance, struct  mrsas_cmd *cmd)
7091 {
7092         int ret = DDI_SUCCESS;
7093 
7094         if (cmd != NULL &&
7095             mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
7096             DDI_SUCCESS) {
7097                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7098                 if (cmd->pkt != NULL) {
7099                         cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7100                         cmd->pkt->pkt_statistics = 0;
7101                 }
7102                 ret = DDI_FAILURE;
7103         }
7104         if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
7105             != DDI_SUCCESS) {
7106                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7107                 if (cmd != NULL && cmd->pkt != NULL) {
7108                         cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7109                         cmd->pkt->pkt_statistics = 0;
7110                 }
7111                 ret = DDI_FAILURE;
7112         }
7113         if (mrsas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) !=
7114             DDI_SUCCESS) {
7115                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7116                 if (cmd != NULL && cmd->pkt != NULL) {
7117                         cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7118                         cmd->pkt->pkt_statistics = 0;
7119                 }
7120                 ret = DDI_FAILURE;
7121         }
7122         if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
7123                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7124 
7125                 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0);
7126 
7127                 if (cmd != NULL && cmd->pkt != NULL) {
7128                         cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7129                         cmd->pkt->pkt_statistics = 0;
7130                 }
7131                 ret = DDI_FAILURE;
7132         }
7133 
7134         return (ret);
7135 }
7136 
7137 /*ARGSUSED*/
7138 static int
7139 mrsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
7140 {
7141         /*
7142          * as the driver can always deal with an error in any dma or
7143          * access handle, we can just return the fme_status value.
7144          */
7145         pci_ereport_post(dip, err, NULL);
7146         return (err->fme_status);
7147 }
7148 
7149 static void
7150 mrsas_fm_init(struct mrsas_instance *instance)
7151 {
7152         /* Need to change iblock to priority for new MSI intr */
7153         ddi_iblock_cookie_t fm_ibc;
7154 
7155         /* Only register with IO Fault Services if we have some capability */
7156         if (instance->fm_capabilities) {
7157                 /* Adjust access and dma attributes for FMA */
7158                 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC;
7159                 mrsas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7160 
7161                 /*
7162                  * Register capabilities with IO Fault Services.
7163                  * fm_capabilities will be updated to indicate
7164                  * capabilities actually supported (not requested.)
7165                  */
7166 
7167                 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc);
7168 
7169                 /*
7170                  * Initialize pci ereport capabilities if ereport
7171                  * capable (should always be.)
7172                  */
7173 
7174                 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
7175                     DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7176                         pci_ereport_setup(instance->dip);
7177                 }
7178 
7179                 /*
7180                  * Register error callback if error callback capable.
7181                  */
7182                 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7183                         ddi_fm_handler_register(instance->dip,
7184                             mrsas_fm_error_cb, (void*) instance);
7185                 }
7186         } else {
7187                 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7188                 mrsas_generic_dma_attr.dma_attr_flags = 0;
7189         }
7190 }
7191 
7192 static void
7193 mrsas_fm_fini(struct mrsas_instance *instance)
7194 {
7195         /* Only unregister FMA capabilities if registered */
7196         if (instance->fm_capabilities) {
7197                 /*
7198                  * Un-register error callback if error callback capable.
7199                  */
7200                 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7201                         ddi_fm_handler_unregister(instance->dip);
7202                 }
7203 
7204                 /*
7205                  * Release any resources allocated by pci_ereport_setup()
7206                  */
7207                 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
7208                     DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7209                         pci_ereport_teardown(instance->dip);
7210                 }
7211 
7212                 /* Unregister from IO Fault Services */
7213                 ddi_fm_fini(instance->dip);
7214 
7215                 /* Adjust access and dma attributes for FMA */
7216                 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7217                 mrsas_generic_dma_attr.dma_attr_flags = 0;
7218         }
7219 }
7220 
7221 int
7222 mrsas_check_acc_handle(ddi_acc_handle_t handle)
7223 {
7224         ddi_fm_error_t de;
7225 
7226         if (handle == NULL) {
7227                 return (DDI_FAILURE);
7228         }
7229 
7230         ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
7231 
7232         return (de.fme_status);
7233 }
7234 
7235 int
7236 mrsas_check_dma_handle(ddi_dma_handle_t handle)
7237 {
7238         ddi_fm_error_t de;
7239 
7240         if (handle == NULL) {
7241                 return (DDI_FAILURE);
7242         }
7243 
7244         ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
7245 
7246         return (de.fme_status);
7247 }
7248 
7249 void
7250 mrsas_fm_ereport(struct mrsas_instance *instance, char *detail)
7251 {
7252         uint64_t ena;
7253         char buf[FM_MAX_CLASS];
7254 
7255         (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
7256         ena = fm_ena_generate(0, FM_ENA_FMT1);
7257         if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) {
7258                 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP,
7259                     FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
7260         }
7261 }
7262 
7263 static int
7264 mrsas_add_intrs(struct mrsas_instance *instance, int intr_type)
7265 {
7266 
7267         dev_info_t *dip = instance->dip;
7268         int     avail, actual, count;
7269         int     i, flag, ret;
7270 
7271         con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_type = %x",
7272             intr_type));
7273 
7274         /* Get number of interrupts */
7275         ret = ddi_intr_get_nintrs(dip, intr_type, &count);
7276         if ((ret != DDI_SUCCESS) || (count == 0)) {
7277                 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_nintrs() failed:"
7278                     "ret %d count %d", ret, count));
7279 
7280                 return (DDI_FAILURE);
7281         }
7282 
7283         con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: count = %d ", count));
7284 
7285         /* Get number of available interrupts */
7286         ret = ddi_intr_get_navail(dip, intr_type, &avail);
7287         if ((ret != DDI_SUCCESS) || (avail == 0)) {
7288                 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:"
7289                     "ret %d avail %d", ret, avail));
7290 
7291                 return (DDI_FAILURE);
7292         }
7293         con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: avail = %d ", avail));
7294 
7295         /* Only one interrupt routine. So limit the count to 1 */
7296         if (count > 1) {
7297                 count = 1;
7298         }
7299 
7300         /*
7301          * Allocate an array of interrupt handlers. Currently we support
7302          * only one interrupt. The framework can be extended later.
7303          */
7304         instance->intr_htable_size = count * sizeof (ddi_intr_handle_t);
7305         instance->intr_htable = kmem_zalloc(instance->intr_htable_size,
7306             KM_SLEEP);
7307         ASSERT(instance->intr_htable);
7308 
7309         flag = ((intr_type == DDI_INTR_TYPE_MSI) ||
7310             (intr_type == DDI_INTR_TYPE_MSIX)) ?
7311             DDI_INTR_ALLOC_STRICT : DDI_INTR_ALLOC_NORMAL;
7312 
7313         /* Allocate interrupt */
7314         ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0,
7315             count, &actual, flag);
7316 
7317         if ((ret != DDI_SUCCESS) || (actual == 0)) {
7318                 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7319                     "avail = %d", avail));
7320                 goto mrsas_free_htable;
7321         }
7322 
7323         if (actual < count) {
7324                 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7325                     "Requested = %d  Received = %d", count, actual));
7326         }
7327         instance->intr_cnt = actual;
7328 
7329         /*
7330          * Get the priority of the interrupt allocated.
7331          */
7332         if ((ret = ddi_intr_get_pri(instance->intr_htable[0],
7333             &instance->intr_pri)) != DDI_SUCCESS) {
7334                 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7335                     "get priority call failed"));
7336                 goto mrsas_free_handles;
7337         }
7338 
7339         /*
7340          * Test for high level mutex. we don't support them.
7341          */
7342         if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) {
7343                 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7344                     "High level interrupts not supported."));
7345                 goto mrsas_free_handles;
7346         }
7347 
7348         con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_pri = 0x%x ",
7349             instance->intr_pri));
7350 
7351         /* Call ddi_intr_add_handler() */
7352         for (i = 0; i < actual; i++) {
7353                 ret = ddi_intr_add_handler(instance->intr_htable[i],
7354                     (ddi_intr_handler_t *)mrsas_isr, (caddr_t)instance,
7355                     (caddr_t)(uintptr_t)i);
7356 
7357                 if (ret != DDI_SUCCESS) {
7358                         con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs:"
7359                             "failed %d", ret));
7360                         goto mrsas_free_handles;
7361                 }
7362 
7363         }
7364 
7365         con_log(CL_DLEVEL1, (CE_NOTE, " ddi_intr_add_handler done"));
7366 
7367         if ((ret = ddi_intr_get_cap(instance->intr_htable[0],
7368             &instance->intr_cap)) != DDI_SUCCESS) {
7369                 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d",
7370                     ret));
7371                 goto mrsas_free_handlers;
7372         }
7373 
7374         if (instance->intr_cap &  DDI_INTR_FLAG_BLOCK) {
7375                 con_log(CL_ANN, (CE_WARN, "Calling ddi_intr_block _enable"));
7376 
7377                 (void) ddi_intr_block_enable(instance->intr_htable,
7378                     instance->intr_cnt);
7379         } else {
7380                 con_log(CL_ANN, (CE_NOTE, " calling ddi_intr_enable"));
7381 
7382                 for (i = 0; i < instance->intr_cnt; i++) {
7383                         (void) ddi_intr_enable(instance->intr_htable[i]);
7384                         con_log(CL_ANN, (CE_NOTE, "ddi intr enable returns "
7385                             "%d", i));
7386                 }
7387         }
7388 
7389         return (DDI_SUCCESS);
7390 
7391 mrsas_free_handlers:
7392         for (i = 0; i < actual; i++)
7393                 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7394 
7395 mrsas_free_handles:
7396         for (i = 0; i < actual; i++)
7397                 (void) ddi_intr_free(instance->intr_htable[i]);
7398 
7399 mrsas_free_htable:
7400         if (instance->intr_htable != NULL)
7401                 kmem_free(instance->intr_htable, instance->intr_htable_size);
7402 
7403         instance->intr_htable = NULL;
7404         instance->intr_htable_size = 0;
7405 
7406         return (DDI_FAILURE);
7407 
7408 }
7409 
7410 
7411 static void
7412 mrsas_rem_intrs(struct mrsas_instance *instance)
7413 {
7414         int i;
7415 
7416         con_log(CL_ANN, (CE_NOTE, "mrsas_rem_intrs called"));
7417 
7418         /* Disable all interrupts first */
7419         if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
7420                 (void) ddi_intr_block_disable(instance->intr_htable,
7421                     instance->intr_cnt);
7422         } else {
7423                 for (i = 0; i < instance->intr_cnt; i++) {
7424                         (void) ddi_intr_disable(instance->intr_htable[i]);
7425                 }
7426         }
7427 
7428         /* Remove all the handlers */
7429 
7430         for (i = 0; i < instance->intr_cnt; i++) {
7431                 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7432                 (void) ddi_intr_free(instance->intr_htable[i]);
7433         }
7434 
7435         if (instance->intr_htable != NULL)
7436                 kmem_free(instance->intr_htable, instance->intr_htable_size);
7437 
7438         instance->intr_htable = NULL;
7439         instance->intr_htable_size = 0;
7440 
7441 }
7442 
7443 static int
7444 mrsas_tran_bus_config(dev_info_t *parent, uint_t flags,
7445     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
7446 {
7447         struct mrsas_instance *instance;
7448         int config;
7449         int rval  = NDI_SUCCESS;
7450 
7451         char *ptr = NULL;
7452         int tgt, lun;
7453 
7454         con_log(CL_ANN1, (CE_NOTE, "Bus config called for op = %x", op));
7455 
7456         if ((instance = ddi_get_soft_state(mrsas_state,
7457             ddi_get_instance(parent))) == NULL) {
7458                 return (NDI_FAILURE);
7459         }
7460 
7461         /* Hold nexus during bus_config */
7462         ndi_devi_enter(parent, &config);
7463         switch (op) {
7464         case BUS_CONFIG_ONE: {
7465 
7466                 /* parse wwid/target name out of name given */
7467                 if ((ptr = strchr((char *)arg, '@')) == NULL) {
7468                         rval = NDI_FAILURE;
7469                         break;
7470                 }
7471                 ptr++;
7472 
7473                 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) {
7474                         rval = NDI_FAILURE;
7475                         break;
7476                 }
7477 
7478                 if (lun == 0) {
7479                         rval = mrsas_config_ld(instance, tgt, lun, childp);
7480                 } else if ((instance->tbolt || instance->skinny) && lun != 0) {
7481                         rval = mrsas_tbolt_config_pd(instance,
7482                             tgt, lun, childp);
7483                 } else {
7484                         rval = NDI_FAILURE;
7485                 }
7486 
7487                 break;
7488         }
7489         case BUS_CONFIG_DRIVER:
7490         case BUS_CONFIG_ALL: {
7491 
7492                 rval = mrsas_config_all_devices(instance);
7493 
7494                 rval = NDI_SUCCESS;
7495                 break;
7496         }
7497         }
7498 
7499         if (rval == NDI_SUCCESS) {
7500                 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
7501 
7502         }
7503         ndi_devi_exit(parent, config);
7504 
7505         con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x",
7506             rval));
7507         return (rval);
7508 }
7509 
7510 static int
7511 mrsas_config_all_devices(struct mrsas_instance *instance)
7512 {
7513         int rval, tgt;
7514 
7515         for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
7516                 (void) mrsas_config_ld(instance, tgt, 0, NULL);
7517 
7518         }
7519 
7520         /* Config PD devices connected to the card */
7521         if (instance->tbolt || instance->skinny) {
7522                 for (tgt = 0; tgt < instance->mr_tbolt_pd_max; tgt++) {
7523                         (void) mrsas_tbolt_config_pd(instance, tgt, 1, NULL);
7524                 }
7525         }
7526 
7527         rval = NDI_SUCCESS;
7528         return (rval);
7529 }
7530 
7531 static int
7532 mrsas_parse_devname(char *devnm, int *tgt, int *lun)
7533 {
7534         char devbuf[SCSI_MAXNAMELEN];
7535         char *addr;
7536         char *p,  *tp, *lp;
7537         long num;
7538 
7539         /* Parse dev name and address */
7540         (void) strcpy(devbuf, devnm);
7541         addr = "";
7542         for (p = devbuf; *p != '\0'; p++) {
7543                 if (*p == '@') {
7544                         addr = p + 1;
7545                         *p = '\0';
7546                 } else if (*p == ':') {
7547                         *p = '\0';
7548                         break;
7549                 }
7550         }
7551 
7552         /* Parse target and lun */
7553         for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
7554                 if (*p == ',') {
7555                         lp = p + 1;
7556                         *p = '\0';
7557                         break;
7558                 }
7559         }
7560         if (tgt && tp) {
7561                 if (ddi_strtol(tp, NULL, 0x10, &num)) {
7562                         return (DDI_FAILURE); /* Can declare this as constant */
7563                 }
7564                         *tgt = (int)num;
7565         }
7566         if (lun && lp) {
7567                 if (ddi_strtol(lp, NULL, 0x10, &num)) {
7568                         return (DDI_FAILURE);
7569                 }
7570                         *lun = (int)num;
7571         }
7572         return (DDI_SUCCESS);  /* Success case */
7573 }
7574 
7575 static int
7576 mrsas_config_ld(struct mrsas_instance *instance, uint16_t tgt,
7577     uint8_t lun, dev_info_t **ldip)
7578 {
7579         struct scsi_device *sd;
7580         dev_info_t *child;
7581         int rval;
7582 
7583         con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d",
7584             tgt, lun));
7585 
7586         if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
7587                 if (ldip) {
7588                         *ldip = child;
7589                 }
7590                 if (instance->mr_ld_list[tgt].flag != MRDRV_TGT_VALID) {
7591                         rval = mrsas_service_evt(instance, tgt, 0,
7592                             MRSAS_EVT_UNCONFIG_TGT, NULL);
7593                         con_log(CL_ANN1, (CE_WARN,
7594                             "mr_sas: DELETING STALE ENTRY rval = %d "
7595                             "tgt id = %d ", rval, tgt));
7596                         return (NDI_FAILURE);
7597                 }
7598                 return (NDI_SUCCESS);
7599         }
7600 
7601         sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
7602         sd->sd_address.a_hba_tran = instance->tran;
7603         sd->sd_address.a_target = (uint16_t)tgt;
7604         sd->sd_address.a_lun = (uint8_t)lun;
7605 
7606         if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS)
7607                 rval = mrsas_config_scsi_device(instance, sd, ldip);
7608         else
7609                 rval = NDI_FAILURE;
7610 
7611         /* sd_unprobe is blank now. Free buffer manually */
7612         if (sd->sd_inq) {
7613                 kmem_free(sd->sd_inq, SUN_INQSIZE);
7614                 sd->sd_inq = (struct scsi_inquiry *)NULL;
7615         }
7616 
7617         kmem_free(sd, sizeof (struct scsi_device));
7618         con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: return rval = %d",
7619             rval));
7620         return (rval);
7621 }
7622 
7623 int
7624 mrsas_config_scsi_device(struct mrsas_instance *instance,
7625     struct scsi_device *sd, dev_info_t **dipp)
7626 {
7627         char *nodename = NULL;
7628         char **compatible = NULL;
7629         int ncompatible = 0;
7630         char *childname;
7631         dev_info_t *ldip = NULL;
7632         int tgt = sd->sd_address.a_target;
7633         int lun = sd->sd_address.a_lun;
7634         int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
7635         int rval;
7636 
7637         con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: scsi_device t%dL%d", tgt, lun));
7638         scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
7639             NULL, &nodename, &compatible, &ncompatible);
7640 
7641         if (nodename == NULL) {
7642                 con_log(CL_ANN1, (CE_WARN, "mr_sas: Found no compatible driver "
7643                     "for t%dL%d", tgt, lun));
7644                 rval = NDI_FAILURE;
7645                 goto finish;
7646         }
7647 
7648         childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename;
7649         con_log(CL_DLEVEL1, (CE_NOTE,
7650             "mr_sas: Childname = %2s nodename = %s", childname, nodename));
7651 
7652         /* Create a dev node */
7653         rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip);
7654         con_log(CL_DLEVEL1, (CE_NOTE,
7655             "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval));
7656         if (rval == NDI_SUCCESS) {
7657                 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) !=
7658                     DDI_PROP_SUCCESS) {
7659                         con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7660                             "property for t%dl%d target", tgt, lun));
7661                         rval = NDI_FAILURE;
7662                         goto finish;
7663                 }
7664                 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) !=
7665                     DDI_PROP_SUCCESS) {
7666                         con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7667                             "property for t%dl%d lun", tgt, lun));
7668                         rval = NDI_FAILURE;
7669                         goto finish;
7670                 }
7671 
7672                 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
7673                     "compatible", compatible, ncompatible) !=
7674                     DDI_PROP_SUCCESS) {
7675                         con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7676                             "property for t%dl%d compatible", tgt, lun));
7677                         rval = NDI_FAILURE;
7678                         goto finish;
7679                 }
7680 
7681                 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
7682                 if (rval != NDI_SUCCESS) {
7683                         con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to online "
7684                             "t%dl%d", tgt, lun));
7685                         ndi_prop_remove_all(ldip);
7686                         (void) ndi_devi_free(ldip);
7687                 } else {
7688                         con_log(CL_ANN1, (CE_CONT, "mr_sas: online Done :"
7689                             "0 t%dl%d", tgt, lun));
7690                 }
7691 
7692         }
7693 finish:
7694         if (dipp) {
7695                 *dipp = ldip;
7696         }
7697 
7698         con_log(CL_DLEVEL1, (CE_NOTE,
7699             "mr_sas: config_scsi_device rval = %d t%dL%d",
7700             rval, tgt, lun));
7701         scsi_hba_nodename_compatible_free(nodename, compatible);
7702         return (rval);
7703 }
7704 
7705 /*ARGSUSED*/
7706 int
7707 mrsas_service_evt(struct mrsas_instance *instance, int tgt, int lun, int event,
7708     uint64_t wwn)
7709 {
7710         struct mrsas_eventinfo *mrevt = NULL;
7711 
7712         con_log(CL_ANN1, (CE_NOTE,
7713             "mrsas_service_evt called for t%dl%d event = %d",
7714             tgt, lun, event));
7715 
7716         if ((instance->taskq == NULL) || (mrevt =
7717             kmem_zalloc(sizeof (struct mrsas_eventinfo), KM_NOSLEEP)) == NULL) {
7718                 return (ENOMEM);
7719         }
7720 
7721         mrevt->instance = instance;
7722         mrevt->tgt = tgt;
7723         mrevt->lun = lun;
7724         mrevt->event = event;
7725         mrevt->wwn = wwn;
7726 
7727         if ((ddi_taskq_dispatch(instance->taskq,
7728             (void (*)(void *))mrsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) !=
7729             DDI_SUCCESS) {
7730                 con_log(CL_ANN1, (CE_NOTE,
7731                     "mr_sas: Event task failed for t%dl%d event = %d",
7732                     tgt, lun, event));
7733                 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7734                 return (DDI_FAILURE);
7735         }
7736         DTRACE_PROBE3(service_evt, int, tgt, int, lun, int, event);
7737         return (DDI_SUCCESS);
7738 }
7739 
7740 static void
7741 mrsas_issue_evt_taskq(struct mrsas_eventinfo *mrevt)
7742 {
7743         struct mrsas_instance *instance = mrevt->instance;
7744         dev_info_t *dip, *pdip;
7745         int circ1 = 0;
7746         char *devname;
7747 
7748         con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_evt_taskq: called for"
7749             " tgt %d lun %d event %d",
7750             mrevt->tgt, mrevt->lun, mrevt->event));
7751 
7752         if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) {
7753                 mutex_enter(&instance->config_dev_mtx);
7754                 dip = instance->mr_ld_list[mrevt->tgt].dip;
7755                 mutex_exit(&instance->config_dev_mtx);
7756         } else {
7757                 mutex_enter(&instance->config_dev_mtx);
7758                 dip = instance->mr_tbolt_pd_list[mrevt->tgt].dip;
7759                 mutex_exit(&instance->config_dev_mtx);
7760         }
7761 
7762 
7763         ndi_devi_enter(instance->dip, &circ1);
7764         switch (mrevt->event) {
7765         case MRSAS_EVT_CONFIG_TGT:
7766                 if (dip == NULL) {
7767 
7768                         if (mrevt->lun == 0) {
7769                                 (void) mrsas_config_ld(instance, mrevt->tgt,
7770                                     0, NULL);
7771                         } else if (instance->tbolt || instance->skinny) {
7772                                 (void) mrsas_tbolt_config_pd(instance,
7773                                     mrevt->tgt,
7774                                     1, NULL);
7775                         }
7776                         con_log(CL_ANN1, (CE_NOTE,
7777                             "mr_sas: EVT_CONFIG_TGT called:"
7778                             " for tgt %d lun %d event %d",
7779                             mrevt->tgt, mrevt->lun, mrevt->event));
7780 
7781                 } else {
7782                         con_log(CL_ANN1, (CE_NOTE,
7783                             "mr_sas: EVT_CONFIG_TGT dip != NULL:"
7784                             " for tgt %d lun %d event %d",
7785                             mrevt->tgt, mrevt->lun, mrevt->event));
7786                 }
7787                 break;
7788         case MRSAS_EVT_UNCONFIG_TGT:
7789                 if (dip) {
7790                         if (i_ddi_devi_attached(dip)) {
7791 
7792                                 pdip = ddi_get_parent(dip);
7793 
7794                                 devname = kmem_zalloc(MAXNAMELEN + 1, KM_SLEEP);
7795                                 (void) ddi_deviname(dip, devname);
7796 
7797                                 (void) devfs_clean(pdip, devname + 1,
7798                                     DV_CLEAN_FORCE);
7799                                 kmem_free(devname, MAXNAMELEN + 1);
7800                         }
7801                         (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
7802                         con_log(CL_ANN1, (CE_NOTE,
7803                             "mr_sas: EVT_UNCONFIG_TGT called:"
7804                             " for tgt %d lun %d event %d",
7805                             mrevt->tgt, mrevt->lun, mrevt->event));
7806                 } else {
7807                         con_log(CL_ANN1, (CE_NOTE,
7808                             "mr_sas: EVT_UNCONFIG_TGT dip == NULL:"
7809                             " for tgt %d lun %d event %d",
7810                             mrevt->tgt, mrevt->lun, mrevt->event));
7811                 }
7812                 break;
7813         }
7814         kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7815         ndi_devi_exit(instance->dip, circ1);
7816 }
7817 
7818 
7819 int
7820 mrsas_mode_sense_build(struct scsi_pkt *pkt)
7821 {
7822         union scsi_cdb          *cdbp;
7823         uint16_t                page_code;
7824         struct scsa_cmd         *acmd;
7825         struct buf              *bp;
7826         struct mode_header      *modehdrp;
7827 
7828         cdbp = (void *)pkt->pkt_cdbp;
7829         page_code = cdbp->cdb_un.sg.scsi[0];
7830         acmd = PKT2CMD(pkt);
7831         bp = acmd->cmd_buf;
7832         if ((!bp) && bp->b_un.b_addr && bp->b_bcount && acmd->cmd_dmacount) {
7833                 con_log(CL_ANN1, (CE_WARN, "Failing MODESENSE Command"));
7834                 /* ADD pkt statistics as Command failed. */
7835                 return (NULL);
7836         }
7837 
7838         bp_mapin(bp);
7839         bzero(bp->b_un.b_addr, bp->b_bcount);
7840 
7841         switch (page_code) {
7842                 case 0x3: {
7843                         struct mode_format *page3p = NULL;
7844                         modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7845                         modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7846 
7847                         page3p = (void *)((caddr_t)modehdrp +
7848                             MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7849                         page3p->mode_page.code = 0x3;
7850                         page3p->mode_page.length =
7851                             (uchar_t)(sizeof (struct mode_format));
7852                         page3p->data_bytes_sect = 512;
7853                         page3p->sect_track = 63;
7854                         break;
7855                 }
7856                 case 0x4: {
7857                         struct mode_geometry *page4p = NULL;
7858                         modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7859                         modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7860 
7861                         page4p = (void *)((caddr_t)modehdrp +
7862                             MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7863                         page4p->mode_page.code = 0x4;
7864                         page4p->mode_page.length =
7865                             (uchar_t)(sizeof (struct mode_geometry));
7866                         page4p->heads = 255;
7867                         page4p->rpm = 10000;
7868                         break;
7869                 }
7870                 default:
7871                         break;
7872         }
7873         return (NULL);
7874 }