1 /*
   2  * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
   3  * i.e. Thunderbolt and Invader
   4  *
   5  * Solaris MegaRAID device driver for SAS2.0 controllers
   6  * Copyright (c) 2008-2012, LSI Logic Corporation.
   7  * All rights reserved.
   8  *
   9  * Version:
  10  * Author:
  11  *              Swaminathan K S
  12  *              Arun Chandrashekhar
  13  *              Manju R
  14  *              Rasheed
  15  *              Shakeel Bukhari
  16  */
  17 
  18 /*
  19  * Copyright 2018 Nexenta Systems, Inc.
  20  * Copyright 2015, 2017 Citrus IT Limited. All rights reserved.
  21  * Copyright 2015 Garrett D'Amore <garrett@damore.org>
  22  */
  23 
  24 
  25 #include <sys/types.h>
  26 #include <sys/file.h>
  27 #include <sys/atomic.h>
  28 #include <sys/scsi/scsi.h>
  29 #include <sys/byteorder.h>
  30 #include <sys/sdt.h>
  31 #include "ld_pd_map.h"
  32 #include "mr_sas.h"
  33 #include "fusion.h"
  34 
  35 /*
  36  * FMA header files
  37  */
  38 #include <sys/ddifm.h>
  39 #include <sys/fm/protocol.h>
  40 #include <sys/fm/util.h>
  41 #include <sys/fm/io/ddi.h>
  42 
  43 
  44 /* Pre-TB command size and TB command size. */
  45 #define MR_COMMAND_SIZE (64*20) /* 1280 bytes */
  46 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
  47 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
  48 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
  49 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *);
  50 extern ddi_dma_attr_t mrsas_generic_dma_attr;
  51 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
  52 extern struct ddi_device_acc_attr endian_attr;
  53 extern int      debug_level_g;
  54 extern unsigned int     enable_fp;
  55 volatile int dump_io_wait_time = 90;
  56 extern volatile int  debug_timeout_g;
  57 extern int      mrsas_issue_pending_cmds(struct mrsas_instance *);
  58 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
  59 extern void     push_pending_mfi_pkt(struct mrsas_instance *,
  60                         struct mrsas_cmd *);
  61 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
  62             MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
  63 
  64 /* Local static prototypes. */
  65 static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *,
  66     struct scsi_address *, struct scsi_pkt *, uchar_t *);
  67 static void mrsas_tbolt_set_pd_lba(U8 *, size_t, uint8_t *, U64, U32);
  68 static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
  69 static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
  70 static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
  71 static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *);
  72 static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
  73     struct mrsas_tbolt_pd_info *, int);
  74 
  75 static int mrsas_debug_tbolt_fw_faults_after_ocr = 0;
  76 
  77 /*
  78  * destroy_mfi_mpi_frame_pool
  79  */
  80 void
  81 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
  82 {
  83         int     i;
  84 
  85         struct mrsas_cmd        *cmd;
  86 
  87         /* return all mfi frames to pool */
  88         for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
  89                 cmd = instance->cmd_list[i];
  90                 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) {
  91                         (void) mrsas_free_dma_obj(instance,
  92                             cmd->frame_dma_obj);
  93                 }
  94                 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
  95         }
  96 }
  97 
  98 /*
  99  * destroy_mpi2_frame_pool
 100  */
 101 void
 102 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
 103 {
 104 
 105         if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
 106                 (void) mrsas_free_dma_obj(instance,
 107                     instance->mpi2_frame_pool_dma_obj);
 108                 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
 109         }
 110 }
 111 
 112 
 113 /*
 114  * mrsas_tbolt_free_additional_dma_buffer
 115  */
 116 void
 117 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
 118 {
 119         int i;
 120 
 121         if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
 122                 (void) mrsas_free_dma_obj(instance,
 123                     instance->mfi_internal_dma_obj);
 124                 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
 125         }
 126         if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
 127                 (void) mrsas_free_dma_obj(instance,
 128                     instance->mfi_evt_detail_obj);
 129                 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
 130         }
 131 
 132         for (i = 0; i < 2; i++) {
 133                 if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
 134                         (void) mrsas_free_dma_obj(instance,
 135                             instance->ld_map_obj[i]);
 136                         instance->ld_map_obj[i].status = DMA_OBJ_FREED;
 137                 }
 138         }
 139 }
 140 
 141 
 142 /*
 143  * free_req_desc_pool
 144  */
 145 void
 146 free_req_rep_desc_pool(struct mrsas_instance *instance)
 147 {
 148         if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
 149                 (void) mrsas_free_dma_obj(instance,
 150                     instance->request_desc_dma_obj);
 151                 instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
 152         }
 153 
 154         if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
 155                 (void) mrsas_free_dma_obj(instance,
 156                     instance->reply_desc_dma_obj);
 157                 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
 158         }
 159 
 160 
 161 }
 162 
 163 
 164 /*
 165  * ThunderBolt(TB) Request Message Frame Pool
 166  */
 167 int
 168 create_mpi2_frame_pool(struct mrsas_instance *instance)
 169 {
 170         int             i = 0;
 171         uint16_t        max_cmd;
 172         uint32_t        sgl_sz;
 173         uint32_t        raid_msg_size;
 174         uint32_t        total_size;
 175         uint32_t        offset;
 176         uint32_t        io_req_base_phys;
 177         uint8_t         *io_req_base;
 178         struct mrsas_cmd        *cmd;
 179 
 180         max_cmd = instance->max_fw_cmds;
 181 
 182         sgl_sz          = 1024;
 183         raid_msg_size   = MRSAS_THUNDERBOLT_MSG_SIZE;
 184 
 185         /* Allocating additional 256 bytes to accomodate SMID 0. */
 186         total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
 187             (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
 188 
 189         con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
 190             "max_cmd %x", max_cmd));
 191 
 192         con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
 193             "request message frame pool size %x", total_size));
 194 
 195         /*
 196          * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
 197          * and then split the memory to 1024 commands. Each command should be
 198          * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
 199          * within it. Further refer the "alloc_req_rep_desc" function where
 200          * we allocate request/reply descriptors queues for a clue.
 201          */
 202 
 203         instance->mpi2_frame_pool_dma_obj.size = total_size;
 204         instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
 205         instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
 206             0xFFFFFFFFU;
 207         instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
 208             0xFFFFFFFFU;
 209         instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
 210         instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
 211 
 212         if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
 213             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
 214                 dev_err(instance->dip, CE_WARN,
 215                     "could not alloc mpi2 frame pool");
 216                 return (DDI_FAILURE);
 217         }
 218 
 219         bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
 220         instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
 221 
 222         instance->io_request_frames =
 223             (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
 224         instance->io_request_frames_phy =
 225             (uint32_t)
 226             instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
 227 
 228         con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p",
 229             (void *)instance->io_request_frames));
 230 
 231         con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x",
 232             instance->io_request_frames_phy));
 233 
 234         io_req_base = (uint8_t *)instance->io_request_frames +
 235             MRSAS_THUNDERBOLT_MSG_SIZE;
 236         io_req_base_phys = instance->io_request_frames_phy +
 237             MRSAS_THUNDERBOLT_MSG_SIZE;
 238 
 239         con_log(CL_DLEVEL3, (CE_NOTE,
 240             "io req_base_phys 0x%x", io_req_base_phys));
 241 
 242         for (i = 0; i < max_cmd; i++) {
 243                 cmd = instance->cmd_list[i];
 244 
 245                 offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
 246 
 247                 cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
 248                     ((uint8_t *)io_req_base + offset);
 249                 cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
 250 
 251                 cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base +
 252                     (max_cmd * raid_msg_size) + i * sgl_sz);
 253 
 254                 cmd->sgl_phys_addr = (io_req_base_phys +
 255                     (max_cmd * raid_msg_size) + i * sgl_sz);
 256 
 257                 cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base +
 258                     (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
 259                     (i * SENSE_LENGTH));
 260 
 261                 cmd->sense_phys_addr1 = (io_req_base_phys +
 262                     (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
 263                     (i * SENSE_LENGTH));
 264 
 265 
 266                 cmd->SMID = i + 1;
 267 
 268                 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p",
 269                     cmd->index, (void *)cmd->scsi_io_request));
 270 
 271                 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x",
 272                     cmd->index, cmd->scsi_io_request_phys_addr));
 273 
 274                 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p",
 275                     cmd->index, (void *)cmd->sense1));
 276 
 277                 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x",
 278                     cmd->index, cmd->sense_phys_addr1));
 279 
 280                 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p",
 281                     cmd->index, (void *)cmd->sgl));
 282 
 283                 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x",
 284                     cmd->index, cmd->sgl_phys_addr));
 285         }
 286 
 287         return (DDI_SUCCESS);
 288 
 289 }
 290 
 291 
 292 /*
 293  * alloc_additional_dma_buffer for AEN
 294  */
 295 int
 296 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
 297 {
 298         uint32_t        internal_buf_size = PAGESIZE*2;
 299         int i;
 300 
 301         /* Initialize buffer status as free */
 302         instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
 303         instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
 304         instance->ld_map_obj[0].status = DMA_OBJ_FREED;
 305         instance->ld_map_obj[1].status = DMA_OBJ_FREED;
 306 
 307 
 308         instance->mfi_internal_dma_obj.size = internal_buf_size;
 309         instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
 310         instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
 311         instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
 312             0xFFFFFFFFU;
 313         instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
 314 
 315         if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
 316             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
 317                 dev_err(instance->dip, CE_WARN,
 318                     "could not alloc reply queue");
 319                 return (DDI_FAILURE);
 320         }
 321 
 322         bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
 323 
 324         instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
 325         instance->internal_buf =
 326             (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer));
 327         instance->internal_buf_dmac_add =
 328             instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
 329         instance->internal_buf_size = internal_buf_size;
 330 
 331         /* allocate evt_detail */
 332         instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
 333         instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
 334         instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
 335         instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
 336         instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
 337         instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
 338 
 339         if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
 340             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
 341                 dev_err(instance->dip, CE_WARN,
 342                     "mrsas_tbolt_alloc_additional_dma_buffer: "
 343                     "could not allocate data transfer buffer.");
 344                 goto fail_tbolt_additional_buff;
 345         }
 346 
 347         bzero(instance->mfi_evt_detail_obj.buffer,
 348             sizeof (struct mrsas_evt_detail));
 349 
 350         instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
 351 
 352         instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
 353             (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
 354 
 355         for (i = 0; i < 2; i++) {
 356                 /* allocate the data transfer buffer */
 357                 instance->ld_map_obj[i].size = instance->size_map_info;
 358                 instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
 359                 instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
 360                 instance->ld_map_obj[i].dma_attr.dma_attr_count_max =
 361                     0xFFFFFFFFU;
 362                 instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
 363                 instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
 364 
 365                 if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
 366                     (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
 367                         dev_err(instance->dip, CE_WARN,
 368                             "could not allocate data transfer buffer.");
 369                         goto fail_tbolt_additional_buff;
 370                 }
 371 
 372                 instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
 373 
 374                 bzero(instance->ld_map_obj[i].buffer, instance->size_map_info);
 375 
 376                 instance->ld_map[i] =
 377                     (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
 378                 instance->ld_map_phy[i] = (uint32_t)instance->
 379                     ld_map_obj[i].dma_cookie[0].dmac_address;
 380 
 381                 con_log(CL_DLEVEL3, (CE_NOTE,
 382                     "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
 383 
 384                 con_log(CL_DLEVEL3, (CE_NOTE,
 385                     "size_map_info 0x%x", instance->size_map_info));
 386         }
 387 
 388         return (DDI_SUCCESS);
 389 
 390 fail_tbolt_additional_buff:
 391         mrsas_tbolt_free_additional_dma_buffer(instance);
 392 
 393         return (DDI_FAILURE);
 394 }
 395 
 396 MRSAS_REQUEST_DESCRIPTOR_UNION *
 397 mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index)
 398 {
 399         MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
 400 
 401         if (index > instance->max_fw_cmds) {
 402                 con_log(CL_ANN1, (CE_NOTE,
 403                     "Invalid SMID 0x%x request for descriptor", index));
 404                 con_log(CL_ANN1, (CE_NOTE,
 405                     "max_fw_cmds : 0x%x", instance->max_fw_cmds));
 406                 return (NULL);
 407         }
 408 
 409         req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
 410             ((char *)instance->request_message_pool +
 411             (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
 412 
 413         con_log(CL_ANN1, (CE_NOTE,
 414             "request descriptor : 0x%08lx", (unsigned long)req_desc));
 415 
 416         con_log(CL_ANN1, (CE_NOTE,
 417             "request descriptor base phy : 0x%08lx",
 418             (unsigned long)instance->request_message_pool_phy));
 419 
 420         return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
 421 }
 422 
 423 
 424 /*
 425  * Allocate Request and Reply  Queue Descriptors.
 426  */
 427 int
 428 alloc_req_rep_desc(struct mrsas_instance *instance)
 429 {
 430         uint32_t        request_q_sz, reply_q_sz;
 431         int             i, max_reply_q_sz;
 432         MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
 433 
 434         /*
 435          * ThunderBolt(TB) There's no longer producer consumer mechanism.
 436          * Once we have an interrupt we are supposed to scan through the list of
 437          * reply descriptors and process them accordingly. We would be needing
 438          * to allocate memory for 1024 reply descriptors
 439          */
 440 
 441         /* Allocate Reply Descriptors */
 442         con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
 443             (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
 444 
 445         /* reply queue size should be multiple of 16 */
 446         max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
 447 
 448         reply_q_sz = 8 * max_reply_q_sz;
 449 
 450 
 451         con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
 452             (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
 453 
 454         instance->reply_desc_dma_obj.size = reply_q_sz;
 455         instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
 456         instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
 457         instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
 458         instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
 459         instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
 460 
 461         if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
 462             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
 463                 dev_err(instance->dip, CE_WARN, "could not alloc reply queue");
 464                 return (DDI_FAILURE);
 465         }
 466 
 467         bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
 468         instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
 469 
 470         /* virtual address of  reply queue */
 471         instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
 472             instance->reply_desc_dma_obj.buffer);
 473 
 474         instance->reply_q_depth = max_reply_q_sz;
 475 
 476         con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
 477             instance->reply_q_depth));
 478 
 479         con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
 480             (void *)instance->reply_frame_pool));
 481 
 482         /* initializing reply address to 0xFFFFFFFF */
 483         reply_desc = instance->reply_frame_pool;
 484 
 485         for (i = 0; i < instance->reply_q_depth; i++) {
 486                 reply_desc->Words = (uint64_t)~0;
 487                 reply_desc++;
 488         }
 489 
 490 
 491         instance->reply_frame_pool_phy =
 492             (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
 493 
 494         con_log(CL_ANN1, (CE_NOTE,
 495             "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
 496 
 497 
 498         instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
 499             reply_q_sz);
 500 
 501         con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
 502             instance->reply_pool_limit_phy));
 503 
 504 
 505         con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
 506             (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
 507 
 508         /* Allocate Request Descriptors */
 509         con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
 510             (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
 511 
 512         request_q_sz = 8 *
 513             (instance->max_fw_cmds);
 514 
 515         instance->request_desc_dma_obj.size = request_q_sz;
 516         instance->request_desc_dma_obj.dma_attr      = mrsas_generic_dma_attr;
 517         instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
 518         instance->request_desc_dma_obj.dma_attr.dma_attr_count_max =
 519             0xFFFFFFFFU;
 520         instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen      = 1;
 521         instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
 522 
 523         if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
 524             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
 525                 dev_err(instance->dip, CE_WARN,
 526                     "could not alloc request queue desc");
 527                 goto fail_undo_reply_queue;
 528         }
 529 
 530         bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
 531         instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
 532 
 533         /* virtual address of  request queue desc */
 534         instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
 535             (instance->request_desc_dma_obj.buffer);
 536 
 537         instance->request_message_pool_phy =
 538             (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
 539 
 540         return (DDI_SUCCESS);
 541 
 542 fail_undo_reply_queue:
 543         if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
 544                 (void) mrsas_free_dma_obj(instance,
 545                     instance->reply_desc_dma_obj);
 546                 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
 547         }
 548 
 549         return (DDI_FAILURE);
 550 }
 551 
 552 /*
 553  * mrsas_alloc_cmd_pool_tbolt
 554  *
 555  * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
 556  * routine
 557  */
 558 int
 559 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
 560 {
 561         int             i;
 562         int             count;
 563         uint32_t        max_cmd;
 564         uint32_t        reserve_cmd;
 565         size_t          sz;
 566 
 567         struct mrsas_cmd        *cmd;
 568 
 569         max_cmd = instance->max_fw_cmds;
 570         con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
 571             "max_cmd %x", max_cmd));
 572 
 573 
 574         sz = sizeof (struct mrsas_cmd *) * max_cmd;
 575 
 576         /*
 577          * instance->cmd_list is an array of struct mrsas_cmd pointers.
 578          * Allocate the dynamic array first and then allocate individual
 579          * commands.
 580          */
 581         instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
 582 
 583         /* create a frame pool and assign one frame to each cmd */
 584         for (count = 0; count < max_cmd; count++) {
 585                 instance->cmd_list[count] =
 586                     kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
 587         }
 588 
 589         /* add all the commands to command pool */
 590 
 591         INIT_LIST_HEAD(&instance->cmd_pool_list);
 592         INIT_LIST_HEAD(&instance->cmd_pend_list);
 593         INIT_LIST_HEAD(&instance->cmd_app_pool_list);
 594 
 595         reserve_cmd = MRSAS_APP_RESERVED_CMDS;
 596 
 597         /* cmd index 0 reservered for IOC INIT */
 598         for (i = 1; i < reserve_cmd; i++) {
 599                 cmd             = instance->cmd_list[i];
 600                 cmd->index   = i;
 601                 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
 602         }
 603 
 604 
 605         for (i = reserve_cmd; i < max_cmd; i++) {
 606                 cmd             = instance->cmd_list[i];
 607                 cmd->index   = i;
 608                 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
 609         }
 610 
 611         return (DDI_SUCCESS);
 612 
 613 mrsas_undo_cmds:
 614         if (count > 0) {
 615                 /* free each cmd */
 616                 for (i = 0; i < count; i++) {
 617                         if (instance->cmd_list[i] != NULL) {
 618                                 kmem_free(instance->cmd_list[i],
 619                                     sizeof (struct mrsas_cmd));
 620                         }
 621                         instance->cmd_list[i] = NULL;
 622                 }
 623         }
 624 
 625 mrsas_undo_cmd_list:
 626         if (instance->cmd_list != NULL)
 627                 kmem_free(instance->cmd_list, sz);
 628         instance->cmd_list = NULL;
 629 
 630         return (DDI_FAILURE);
 631 }
 632 
 633 
 634 /*
 635  * free_space_for_mpi2
 636  */
 637 void
 638 free_space_for_mpi2(struct mrsas_instance *instance)
 639 {
 640         /* already freed */
 641         if (instance->cmd_list == NULL) {
 642                 return;
 643         }
 644 
 645         /* First free the additional DMA buffer */
 646         mrsas_tbolt_free_additional_dma_buffer(instance);
 647 
 648         /* Free the request/reply descriptor pool */
 649         free_req_rep_desc_pool(instance);
 650 
 651         /*  Free the MPI message pool */
 652         destroy_mpi2_frame_pool(instance);
 653 
 654         /* Free the MFI frame pool */
 655         destroy_mfi_frame_pool(instance);
 656 
 657         /* Free all the commands in the cmd_list */
 658         /* Free the cmd_list buffer itself */
 659         mrsas_free_cmd_pool(instance);
 660 }
 661 
 662 
 663 /*
 664  * ThunderBolt(TB) memory allocations for commands/messages/frames.
 665  */
 666 int
 667 alloc_space_for_mpi2(struct mrsas_instance *instance)
 668 {
 669         /* Allocate command pool (memory for cmd_list & individual commands) */
 670         if (mrsas_alloc_cmd_pool_tbolt(instance)) {
 671                 dev_err(instance->dip, CE_WARN, "Error creating cmd pool");
 672                 return (DDI_FAILURE);
 673         }
 674 
 675         /* Initialize single reply size and Message size */
 676         instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
 677         instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
 678 
 679         instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
 680             (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
 681             sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
 682         instance->max_sge_in_chain = (MR_COMMAND_SIZE -
 683             MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
 684 
 685         /* Reduce SG count by 1 to take care of group cmds feature in FW */
 686         instance->max_num_sge = (instance->max_sge_in_main_msg +
 687             instance->max_sge_in_chain - 2);
 688         instance->chain_offset_mpt_msg =
 689             offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
 690         instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE -
 691             sizeof (MPI2_SGE_IO_UNION)) / 16;
 692         instance->reply_read_index = 0;
 693 
 694 
 695         /* Allocate Request and Reply descriptors Array */
 696         /* Make sure the buffer is aligned to 8 for req/rep  descriptor Pool */
 697         if (alloc_req_rep_desc(instance)) {
 698                 dev_err(instance->dip, CE_WARN,
 699                     "Error, allocating memory for descripter-pool");
 700                 goto mpi2_undo_cmd_pool;
 701         }
 702         con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x",
 703             instance->request_message_pool_phy));
 704 
 705 
 706         /* Allocate MFI Frame pool - for MPI-MFI passthru commands */
 707         if (create_mfi_frame_pool(instance)) {
 708                 dev_err(instance->dip, CE_WARN,
 709                     "Error, allocating memory for MFI frame-pool");
 710                 goto mpi2_undo_descripter_pool;
 711         }
 712 
 713 
 714         /* Allocate MPI2 Message pool */
 715         /*
 716          * Make sure the buffer is alligned to 256 for raid message packet
 717          * create a io request pool and assign one frame to each cmd
 718          */
 719 
 720         if (create_mpi2_frame_pool(instance)) {
 721                 dev_err(instance->dip, CE_WARN,
 722                     "Error, allocating memory for MPI2 Message-pool");
 723                 goto mpi2_undo_mfi_frame_pool;
 724         }
 725 
 726 #ifdef DEBUG
 727         con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x",
 728             instance->max_sge_in_main_msg));
 729         con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x",
 730             instance->max_sge_in_chain));
 731         con_log(CL_ANN1, (CE_CONT,
 732             "[max_sge]0x%x", instance->max_num_sge));
 733         con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x",
 734             instance->chain_offset_mpt_msg));
 735         con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x",
 736             instance->chain_offset_io_req));
 737 #endif
 738 
 739 
 740         /* Allocate additional dma buffer */
 741         if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) {
 742                 dev_err(instance->dip, CE_WARN,
 743                     "Error, allocating tbolt additional DMA buffer");
 744                 goto mpi2_undo_message_pool;
 745         }
 746 
 747         return (DDI_SUCCESS);
 748 
 749 mpi2_undo_message_pool:
 750         destroy_mpi2_frame_pool(instance);
 751 
 752 mpi2_undo_mfi_frame_pool:
 753         destroy_mfi_frame_pool(instance);
 754 
 755 mpi2_undo_descripter_pool:
 756         free_req_rep_desc_pool(instance);
 757 
 758 mpi2_undo_cmd_pool:
 759         mrsas_free_cmd_pool(instance);
 760 
 761         return (DDI_FAILURE);
 762 }
 763 
 764 
 765 /*
 766  * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
 767  */
 768 int
 769 mrsas_init_adapter_tbolt(struct mrsas_instance *instance)
 770 {
 771 
 772         /*
 773          * Reduce the max supported cmds by 1. This is to ensure that the
 774          * reply_q_sz (1 more than the max cmd that driver may send)
 775          * does not exceed max cmds that the FW can support
 776          */
 777 
 778         if (instance->max_fw_cmds > 1008) {
 779                 instance->max_fw_cmds = 1008;
 780                 instance->max_fw_cmds = instance->max_fw_cmds-1;
 781         }
 782 
 783         con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
 784             "instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
 785 
 786 
 787         /* create a pool of commands */
 788         if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
 789                 dev_err(instance->dip, CE_WARN,
 790                     "alloc_space_for_mpi2() failed.");
 791 
 792                 return (DDI_FAILURE);
 793         }
 794 
 795         /* Send ioc init message */
 796         /* NOTE: the issue_init call does FMA checking already. */
 797         if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
 798                 dev_err(instance->dip, CE_WARN,
 799                     "mrsas_issue_init_mpi2() failed.");
 800 
 801                 goto fail_init_fusion;
 802         }
 803 
 804         instance->unroll.alloc_space_mpi2 = 1;
 805 
 806         con_log(CL_ANN, (CE_NOTE,
 807             "mrsas_init_adapter_tbolt: SUCCESSFUL"));
 808 
 809         return (DDI_SUCCESS);
 810 
 811 fail_init_fusion:
 812         free_space_for_mpi2(instance);
 813 
 814         return (DDI_FAILURE);
 815 }
 816 
 817 
 818 
 819 /*
 820  * init_mpi2
 821  */
 822 int
 823 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
 824 {
 825         dma_obj_t init2_dma_obj;
 826         int ret_val = DDI_SUCCESS;
 827 
 828         /* allocate DMA buffer for IOC INIT message */
 829         init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
 830         init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
 831         init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
 832         init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
 833         init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
 834         init2_dma_obj.dma_attr.dma_attr_align = 256;
 835 
 836         if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
 837             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
 838                 dev_err(instance->dip, CE_WARN, "mr_sas_issue_init_mpi2 "
 839                     "could not allocate data transfer buffer.");
 840                 return (DDI_FAILURE);
 841         }
 842         (void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t));
 843 
 844         con_log(CL_ANN1, (CE_NOTE,
 845             "mrsas_issue_init_mpi2 _phys adr: %x",
 846             init2_dma_obj.dma_cookie[0].dmac_address));
 847 
 848 
 849         /* Initialize and send ioc init message */
 850         ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj);
 851         if (ret_val == DDI_FAILURE) {
 852                 con_log(CL_ANN1, (CE_WARN,
 853                     "mrsas_issue_init_mpi2: Failed"));
 854                 goto fail_init_mpi2;
 855         }
 856 
 857         /* free IOC init DMA buffer */
 858         if (mrsas_free_dma_obj(instance, init2_dma_obj)
 859             != DDI_SUCCESS) {
 860                 con_log(CL_ANN1, (CE_WARN,
 861                     "mrsas_issue_init_mpi2: Free Failed"));
 862                 return (DDI_FAILURE);
 863         }
 864 
 865         /* Get/Check and sync ld_map info */
 866         instance->map_id = 0;
 867         if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS)
 868                 (void) mrsas_tbolt_sync_map_info(instance);
 869 
 870 
 871         /* No mrsas_cmd to send, so send NULL. */
 872         if (mrsas_common_check(instance, NULL) != DDI_SUCCESS)
 873                 goto fail_init_mpi2;
 874 
 875         con_log(CL_ANN, (CE_NOTE,
 876             "mrsas_issue_init_mpi2: SUCCESSFUL"));
 877 
 878         return (DDI_SUCCESS);
 879 
 880 fail_init_mpi2:
 881         (void) mrsas_free_dma_obj(instance, init2_dma_obj);
 882 
 883         return (DDI_FAILURE);
 884 }
 885 
 886 static int
 887 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj)
 888 {
 889         int                             numbytes;
 890         uint16_t                        flags;
 891         struct mrsas_init_frame2        *mfiFrameInit2;
 892         struct mrsas_header             *frame_hdr;
 893         Mpi2IOCInitRequest_t            *init;
 894         struct mrsas_cmd                *cmd = NULL;
 895         struct mrsas_drv_ver            drv_ver_info;
 896         MRSAS_REQUEST_DESCRIPTOR_UNION  req_desc;
 897         uint32_t                        timeout;
 898 
 899         con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
 900 
 901 
 902 #ifdef DEBUG
 903         con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
 904             (int)sizeof (*mfiFrameInit2)));
 905         con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
 906         con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
 907             (int)sizeof (struct mrsas_init_frame2)));
 908         con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
 909             (int)sizeof (Mpi2IOCInitRequest_t)));
 910 #endif
 911 
 912         init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
 913         numbytes = sizeof (*init);
 914         bzero(init, numbytes);
 915 
 916         ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
 917             MPI2_FUNCTION_IOC_INIT);
 918 
 919         ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
 920             MPI2_WHOINIT_HOST_DRIVER);
 921 
 922         /* set MsgVersion and HeaderVersion host driver was built with */
 923         ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
 924             MPI2_VERSION);
 925 
 926         ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
 927             MPI2_HEADER_VERSION);
 928 
 929         ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
 930             instance->raid_io_msg_size / 4);
 931 
 932         ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth,
 933             0);
 934 
 935         ddi_put16(mpi2_dma_obj->acc_handle,
 936             &init->ReplyDescriptorPostQueueDepth,
 937             instance->reply_q_depth);
 938         /*
 939          * These addresses are set using the DMA cookie addresses from when the
 940          * memory was allocated.  Sense buffer hi address should be 0.
 941          * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
 942          */
 943 
 944         ddi_put32(mpi2_dma_obj->acc_handle,
 945             &init->SenseBufferAddressHigh, 0);
 946 
 947         ddi_put64(mpi2_dma_obj->acc_handle,
 948             (uint64_t *)&init->SystemRequestFrameBaseAddress,
 949             instance->io_request_frames_phy);
 950 
 951         ddi_put64(mpi2_dma_obj->acc_handle,
 952             &init->ReplyDescriptorPostQueueAddress,
 953             instance->reply_frame_pool_phy);
 954 
 955         ddi_put64(mpi2_dma_obj->acc_handle,
 956             &init->ReplyFreeQueueAddress, 0);
 957 
 958         cmd = instance->cmd_list[0];
 959         if (cmd == NULL) {
 960                 return (DDI_FAILURE);
 961         }
 962         cmd->retry_count_for_ocr = 0;
 963         cmd->pkt = NULL;
 964         cmd->drv_pkt_time = 0;
 965 
 966         mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
 967         con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2));
 968 
 969         frame_hdr = &cmd->frame->hdr;
 970 
 971         ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
 972             MFI_CMD_STATUS_POLL_MODE);
 973 
 974         flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
 975 
 976         flags   |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
 977 
 978         ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
 979 
 980         con_log(CL_ANN, (CE_CONT,
 981             "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
 982 
 983         /* Init the MFI Header */
 984         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
 985             &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
 986 
 987         con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
 988 
 989         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
 990             &mfiFrameInit2->cmd_status,
 991             MFI_STAT_INVALID_STATUS);
 992 
 993         con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
 994 
 995         ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
 996             &mfiFrameInit2->queue_info_new_phys_addr_lo,
 997             mpi2_dma_obj->dma_cookie[0].dmac_address);
 998 
 999         ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1000             &mfiFrameInit2->data_xfer_len,
1001             sizeof (Mpi2IOCInitRequest_t));
1002 
1003         con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1004             (int)init->ReplyDescriptorPostQueueAddress));
1005 
1006         /* fill driver version information */
1007         fill_up_drv_ver(&drv_ver_info);
1008 
1009         /* allocate the driver version data transfer buffer */
1010         instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
1011         instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1012         instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1013         instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1014         instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1015         instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1016 
1017         if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1018             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1019                 dev_err(instance->dip, CE_WARN,
1020                     "fusion init: Could not allocate driver version buffer.");
1021                 return (DDI_FAILURE);
1022         }
1023         /* copy driver version to dma buffer */
1024         bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver));
1025         ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1026             (uint8_t *)drv_ver_info.drv_ver,
1027             (uint8_t *)instance->drv_ver_dma_obj.buffer,
1028             sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1029 
1030         /* send driver version physical address to firmware */
1031         ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion,
1032             instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1033 
1034         con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1035             mfiFrameInit2->queue_info_new_phys_addr_lo,
1036             (int)sizeof (Mpi2IOCInitRequest_t)));
1037 
1038         con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1039 
1040         con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1041             cmd->scsi_io_request_phys_addr,
1042             (int)sizeof (struct mrsas_init_frame2)));
1043 
1044         /* disable interrupts before sending INIT2 frame */
1045         instance->func_ptr->disable_intr(instance);
1046 
1047         req_desc.Words = cmd->scsi_io_request_phys_addr;
1048         req_desc.MFAIo.RequestFlags =
1049             (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1050 
1051         cmd->request_desc = &req_desc;
1052 
1053         /* issue the init frame */
1054 
1055         mutex_enter(&instance->reg_write_mtx);
1056         WR_IB_LOW_QPORT((uint32_t)(req_desc.Words), instance);
1057         WR_IB_HIGH_QPORT((uint32_t)(req_desc.Words >> 32), instance);
1058         mutex_exit(&instance->reg_write_mtx);
1059 
1060         con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1061         con_log(CL_ANN1, (CE_CONT, "[cmd  Status= %x] ",
1062             frame_hdr->cmd_status));
1063 
1064         timeout = drv_usectohz(MFI_POLL_TIMEOUT_SECS * MICROSEC);
1065         do {
1066                 if (ddi_get8(cmd->frame_dma_obj.acc_handle,
1067                     &mfiFrameInit2->cmd_status) != MFI_CMD_STATUS_POLL_MODE)
1068                         break;
1069                 delay(1);
1070                 timeout--;
1071         } while (timeout > 0);
1072 
1073         if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1074             &mfiFrameInit2->cmd_status) == 0) {
1075                 con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1076         } else {
1077                 con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1078                 mrsas_dump_reply_desc(instance);
1079                 goto fail_ioc_init;
1080         }
1081 
1082         mrsas_dump_reply_desc(instance);
1083 
1084         instance->unroll.verBuff = 1;
1085 
1086         con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL"));
1087 
1088         return (DDI_SUCCESS);
1089 
1090 
1091 fail_ioc_init:
1092 
1093         (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1094 
1095         return (DDI_FAILURE);
1096 }
1097 
1098 int
1099 wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1100 {
1101         int i;
1102         uint32_t wait_time = dump_io_wait_time;
1103         for (i = 0; i < wait_time; i++) {
1104                 /*
1105                  * Check For Outstanding poll Commands
1106                  * except ldsync command and aen command
1107                  */
1108                 if (instance->fw_outstanding <= 2) {
1109                         break;
1110                 }
1111                 drv_usecwait(MILLISEC);
1112                 /* complete commands from reply queue */
1113                 (void) mr_sas_tbolt_process_outstanding_cmd(instance);
1114         }
1115         if (instance->fw_outstanding > 2) {
1116                 return (1);
1117         }
1118         return (0);
1119 }
1120 /*
1121  * scsi_pkt handling
1122  *
1123  * Visible to the external world via the transport structure.
1124  */
1125 
1126 int
1127 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1128 {
1129         struct mrsas_instance   *instance = ADDR2MR(ap);
1130         struct scsa_cmd         *acmd = PKT2CMD(pkt);
1131         struct mrsas_cmd        *cmd = NULL;
1132         uchar_t                 cmd_done = 0;
1133 
1134         con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1135         if (instance->deadadapter == 1) {
1136                 dev_err(instance->dip, CE_WARN,
1137                     "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1138                     "for IO, as the HBA doesnt take any more IOs");
1139                 if (pkt) {
1140                         pkt->pkt_reason              = CMD_DEV_GONE;
1141                         pkt->pkt_statistics  = STAT_DISCON;
1142                 }
1143                 return (TRAN_FATAL_ERROR);
1144         }
1145         if (instance->adapterresetinprogress) {
1146                 con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1147                     "returning mfi_pkt and setting TRAN_BUSY\n"));
1148                 return (TRAN_BUSY);
1149         }
1150         (void) mrsas_tbolt_prepare_pkt(acmd);
1151 
1152         cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1153 
1154         /*
1155          * Check if the command is already completed by the mrsas_build_cmd()
1156          * routine. In which case the busy_flag would be clear and scb will be
1157          * NULL and appropriate reason provided in pkt_reason field
1158          */
1159         if (cmd_done) {
1160                 pkt->pkt_reason = CMD_CMPLT;
1161                 pkt->pkt_scbp[0] = STATUS_GOOD;
1162                 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1163                     | STATE_SENT_CMD;
1164                 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1165                         (*pkt->pkt_comp)(pkt);
1166                 }
1167 
1168                 return (TRAN_ACCEPT);
1169         }
1170 
1171         if (cmd == NULL) {
1172                 return (TRAN_BUSY);
1173         }
1174 
1175 
1176         if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1177                 if (instance->fw_outstanding > instance->max_fw_cmds) {
1178                         dev_err(instance->dip, CE_WARN,
1179                             "Command Queue Full... Returning BUSY");
1180                         DTRACE_PROBE2(tbolt_start_tran_err,
1181                             uint16_t, instance->fw_outstanding,
1182                             uint16_t, instance->max_fw_cmds);
1183                         return_raid_msg_pkt(instance, cmd);
1184                         return (TRAN_BUSY);
1185                 }
1186 
1187                 /* Synchronize the Cmd frame for the controller */
1188                 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1189                     DDI_DMA_SYNC_FORDEV);
1190 
1191                 con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1192                     "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0],
1193                     cmd->index, cmd->SMID));
1194 
1195                 instance->func_ptr->issue_cmd(cmd, instance);
1196         } else {
1197                 instance->func_ptr->issue_cmd(cmd, instance);
1198                 (void) wait_for_outstanding_poll_io(instance);
1199                 (void) mrsas_common_check(instance, cmd);
1200                 DTRACE_PROBE2(tbolt_start_nointr_done,
1201                     uint8_t, cmd->frame->hdr.cmd,
1202                     uint8_t, cmd->frame->hdr.cmd_status);
1203         }
1204 
1205         return (TRAN_ACCEPT);
1206 }
1207 
1208 /*
1209  * prepare the pkt:
1210  * the pkt may have been resubmitted or just reused so
1211  * initialize some fields and do some checks.
1212  */
1213 static int
1214 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1215 {
1216         struct scsi_pkt *pkt = CMD2PKT(acmd);
1217 
1218 
1219         /*
1220          * Reinitialize some fields that need it; the packet may
1221          * have been resubmitted
1222          */
1223         pkt->pkt_reason = CMD_CMPLT;
1224         pkt->pkt_state = 0;
1225         pkt->pkt_statistics = 0;
1226         pkt->pkt_resid = 0;
1227 
1228         /*
1229          * zero status byte.
1230          */
1231         *(pkt->pkt_scbp) = 0;
1232 
1233         return (0);
1234 }
1235 
1236 
1237 int
1238 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1239     struct scsa_cmd *acmd,
1240     struct mrsas_cmd *cmd,
1241     Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1242     uint32_t *datalen)
1243 {
1244         uint32_t                MaxSGEs;
1245         int                     sg_to_process;
1246         uint32_t                i, j;
1247         uint32_t                numElements, endElement;
1248         Mpi25IeeeSgeChain64_t   *ieeeChainElement = NULL;
1249         Mpi25IeeeSgeChain64_t   *scsi_raid_io_sgl_ieee = NULL;
1250         ddi_acc_handle_t acc_handle =
1251             instance->mpi2_frame_pool_dma_obj.acc_handle;
1252 
1253         con_log(CL_ANN1, (CE_NOTE,
1254             "chkpnt: Building Chained SGL :%d", __LINE__));
1255 
1256         /* Calulate SGE size in number of Words(32bit) */
1257         /* Clear the datalen before updating it. */
1258         *datalen = 0;
1259 
1260         MaxSGEs = instance->max_sge_in_main_msg;
1261 
1262         ddi_put16(acc_handle, &scsi_raid_io->SGLFlags,
1263             MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1264 
1265         /* set data transfer flag. */
1266         if (acmd->cmd_flags & CFLAG_DMASEND) {
1267                 ddi_put32(acc_handle, &scsi_raid_io->Control,
1268                     MPI2_SCSIIO_CONTROL_WRITE);
1269         } else {
1270                 ddi_put32(acc_handle, &scsi_raid_io->Control,
1271                     MPI2_SCSIIO_CONTROL_READ);
1272         }
1273 
1274 
1275         numElements = acmd->cmd_cookiecnt;
1276 
1277         con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1278 
1279         if (numElements > instance->max_num_sge) {
1280                 con_log(CL_ANN, (CE_NOTE,
1281                     "[Max SGE Count Exceeded]:%x", numElements));
1282                 return (numElements);
1283         }
1284 
1285         ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE,
1286             (uint8_t)numElements);
1287 
1288         /* set end element in main message frame */
1289         endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1290 
1291         /* prepare the scatter-gather list for the firmware */
1292         scsi_raid_io_sgl_ieee =
1293             (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1294 
1295         if (instance->gen3) {
1296                 Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1297                 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1298 
1299                 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
1300         }
1301 
1302         for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1303                 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1304                     acmd->cmd_dmacookies[i].dmac_laddress);
1305 
1306                 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1307                     acmd->cmd_dmacookies[i].dmac_size);
1308 
1309                 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1310 
1311                 if (instance->gen3) {
1312                         if (i == (numElements - 1)) {
1313                                 ddi_put8(acc_handle,
1314                                     &scsi_raid_io_sgl_ieee->Flags,
1315                                     IEEE_SGE_FLAGS_END_OF_LIST);
1316                         }
1317                 }
1318 
1319                 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1320 
1321 #ifdef DEBUG
1322                 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1323                     scsi_raid_io_sgl_ieee->Address));
1324                 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1325                     scsi_raid_io_sgl_ieee->Length));
1326                 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1327                     scsi_raid_io_sgl_ieee->Flags));
1328 #endif
1329 
1330         }
1331 
1332         ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0);
1333 
1334         /* check if chained SGL required */
1335         if (i < numElements) {
1336 
1337                 con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1338 
1339                 if (instance->gen3) {
1340                         uint16_t ioFlags =
1341                             ddi_get16(acc_handle, &scsi_raid_io->IoFlags);
1342 
1343                         if ((ioFlags &
1344                             MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1345                             MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
1346                                 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1347                                     (U8)instance->chain_offset_io_req);
1348                         } else {
1349                                 ddi_put8(acc_handle,
1350                                     &scsi_raid_io->ChainOffset, 0);
1351                         }
1352                 } else {
1353                         ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1354                             (U8)instance->chain_offset_io_req);
1355                 }
1356 
1357                 /* prepare physical chain element */
1358                 ieeeChainElement = scsi_raid_io_sgl_ieee;
1359 
1360                 ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0);
1361 
1362                 if (instance->gen3) {
1363                         ddi_put8(acc_handle, &ieeeChainElement->Flags,
1364                             IEEE_SGE_FLAGS_CHAIN_ELEMENT);
1365                 } else {
1366                         ddi_put8(acc_handle, &ieeeChainElement->Flags,
1367                             (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1368                             MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1369                 }
1370 
1371                 ddi_put32(acc_handle, &ieeeChainElement->Length,
1372                     (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1373 
1374                 ddi_put64(acc_handle, &ieeeChainElement->Address,
1375                     (U64)cmd->sgl_phys_addr);
1376 
1377                 sg_to_process = numElements - i;
1378 
1379                 con_log(CL_ANN1, (CE_NOTE,
1380                     "[Additional SGE Count]:%x", endElement));
1381 
1382                 /* point to the chained SGL buffer */
1383                 scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1384 
1385                 /* build rest of the SGL in chained buffer */
1386                 for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1387                         con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1388 
1389                         ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1390                             acmd->cmd_dmacookies[i].dmac_laddress);
1391 
1392                         ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1393                             acmd->cmd_dmacookies[i].dmac_size);
1394 
1395                         ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1396 
1397                         if (instance->gen3) {
1398                                 if (i == (numElements - 1)) {
1399                                         ddi_put8(acc_handle,
1400                                             &scsi_raid_io_sgl_ieee->Flags,
1401                                             IEEE_SGE_FLAGS_END_OF_LIST);
1402                                 }
1403                         }
1404 
1405                         *datalen += acmd->cmd_dmacookies[i].dmac_size;
1406 
1407 #if DEBUG
1408                         con_log(CL_DLEVEL1, (CE_NOTE,
1409                             "[SGL Address]: %" PRIx64,
1410                             scsi_raid_io_sgl_ieee->Address));
1411                         con_log(CL_DLEVEL1, (CE_NOTE,
1412                             "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1413                         con_log(CL_DLEVEL1, (CE_NOTE,
1414                             "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1415 #endif
1416 
1417                         i++;
1418                 }
1419         }
1420 
1421         return (0);
1422 } /*end of BuildScatterGather */
1423 
1424 
1425 /*
1426  * build_cmd
1427  */
1428 static struct mrsas_cmd *
1429 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1430     struct scsi_pkt *pkt, uchar_t *cmd_done)
1431 {
1432         uint8_t         fp_possible = 0;
1433         uint32_t        index;
1434         uint32_t        lba_count = 0;
1435         uint32_t        start_lba_hi = 0;
1436         uint32_t        start_lba_lo = 0;
1437         ddi_acc_handle_t acc_handle =
1438             instance->mpi2_frame_pool_dma_obj.acc_handle;
1439         struct mrsas_cmd                *cmd = NULL;
1440         struct scsa_cmd                 *acmd = PKT2CMD(pkt);
1441         MRSAS_REQUEST_DESCRIPTOR_UNION  *ReqDescUnion;
1442         Mpi2RaidSCSIIORequest_t         *scsi_raid_io;
1443         uint32_t                        datalen;
1444         struct IO_REQUEST_INFO io_info;
1445         MR_FW_RAID_MAP_ALL *local_map_ptr;
1446         uint16_t pd_cmd_cdblen;
1447 
1448         con_log(CL_DLEVEL1, (CE_NOTE,
1449             "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1450 
1451         /* find out if this is logical or physical drive command.  */
1452         acmd->islogical = MRDRV_IS_LOGICAL(ap);
1453         acmd->device_id = MAP_DEVICE_ID(instance, ap);
1454 
1455         *cmd_done = 0;
1456 
1457         /* get the command packet */
1458         if (!(cmd = get_raid_msg_pkt(instance))) {
1459                 DTRACE_PROBE2(tbolt_build_cmd_mfi_err, uint16_t,
1460                     instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
1461                 return (NULL);
1462         }
1463 
1464         index = cmd->index;
1465         ReqDescUnion =  mr_sas_get_request_descriptor(instance, index);
1466         ReqDescUnion->Words = 0;
1467         ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1468         ReqDescUnion->SCSIIO.RequestFlags =
1469             (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1470             MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1471 
1472 
1473         cmd->request_desc = ReqDescUnion;
1474         cmd->pkt = pkt;
1475         cmd->cmd = acmd;
1476 
1477         DTRACE_PROBE4(tbolt_build_cmd, uint8_t, pkt->pkt_cdbp[0],
1478             ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len,
1479             uint16_t, acmd->device_id);
1480 
1481         /* lets get the command directions */
1482         if (acmd->cmd_flags & CFLAG_DMASEND) {
1483                 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1484                         (void) ddi_dma_sync(acmd->cmd_dmahandle,
1485                             acmd->cmd_dma_offset, acmd->cmd_dma_len,
1486                             DDI_DMA_SYNC_FORDEV);
1487                 }
1488         } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1489                 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1490                         (void) ddi_dma_sync(acmd->cmd_dmahandle,
1491                             acmd->cmd_dma_offset, acmd->cmd_dma_len,
1492                             DDI_DMA_SYNC_FORCPU);
1493                 }
1494         } else {
1495                 con_log(CL_ANN, (CE_NOTE, "NO DMA"));
1496         }
1497 
1498 
1499         /* get SCSI_IO raid message frame pointer */
1500         scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1501 
1502         /* zero out SCSI_IO raid message frame */
1503         bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t));
1504 
1505         /* Set the ldTargetId set by BuildRaidContext() */
1506         ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId,
1507             acmd->device_id);
1508 
1509         /*  Copy CDB to scsi_io_request message frame */
1510         ddi_rep_put8(acc_handle,
1511             (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32,
1512             acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1513 
1514         /*
1515          * Just the CDB length, rest of the Flags are zero
1516          * This will be modified later.
1517          */
1518         ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen);
1519 
1520         pd_cmd_cdblen = acmd->cmd_cdblen;
1521 
1522         if (acmd->islogical) {
1523 
1524                 switch (pkt->pkt_cdbp[0]) {
1525                 case SCMD_READ:
1526                 case SCMD_WRITE:
1527                 case SCMD_READ_G1:
1528                 case SCMD_WRITE_G1:
1529                 case SCMD_READ_G4:
1530                 case SCMD_WRITE_G4:
1531                 case SCMD_READ_G5:
1532                 case SCMD_WRITE_G5:
1533 
1534                         /* Initialize sense Information */
1535                         if (cmd->sense1 == NULL) {
1536                                 con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: "
1537                                     "Sense buffer ptr NULL "));
1538                         }
1539                         bzero(cmd->sense1, SENSE_LENGTH);
1540                         con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd "
1541                             "CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1542 
1543                         if (acmd->cmd_cdblen == CDB_GROUP0) {
1544                                 /* 6-byte cdb */
1545                                 lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1546                                 start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) |
1547                                     ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1548                                     ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
1549                                     << 16));
1550                         } else if (acmd->cmd_cdblen == CDB_GROUP1) {
1551                                 /* 10-byte cdb */
1552                                 lba_count =
1553                                     (((uint16_t)(pkt->pkt_cdbp[8])) |
1554                                     ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1555 
1556                                 start_lba_lo =
1557                                     (((uint32_t)(pkt->pkt_cdbp[5])) |
1558                                     ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1559                                     ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1560                                     ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1561 
1562                         } else if (acmd->cmd_cdblen == CDB_GROUP5) {
1563                                 /* 12-byte cdb */
1564                                 lba_count = (
1565                                     ((uint32_t)(pkt->pkt_cdbp[9])) |
1566                                     ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1567                                     ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1568                                     ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1569 
1570                                 start_lba_lo =
1571                                     (((uint32_t)(pkt->pkt_cdbp[5])) |
1572                                     ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1573                                     ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1574                                     ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1575 
1576                         } else if (acmd->cmd_cdblen == CDB_GROUP4) {
1577                                 /* 16-byte cdb */
1578                                 lba_count = (
1579                                     ((uint32_t)(pkt->pkt_cdbp[13])) |
1580                                     ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1581                                     ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1582                                     ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1583 
1584                                 start_lba_lo = (
1585                                     ((uint32_t)(pkt->pkt_cdbp[9])) |
1586                                     ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1587                                     ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1588                                     ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1589 
1590                                 start_lba_hi = (
1591                                     ((uint32_t)(pkt->pkt_cdbp[5])) |
1592                                     ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1593                                     ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1594                                     ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1595                         }
1596 
1597                         if (instance->tbolt &&
1598                             ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) {
1599                                 dev_err(instance->dip, CE_WARN,
1600                                     "IO SECTOR COUNT exceeds "
1601                                     "controller limit 0x%x sectors",
1602                                     lba_count);
1603                         }
1604 
1605                         bzero(&io_info, sizeof (struct IO_REQUEST_INFO));
1606                         io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) |
1607                             start_lba_lo;
1608                         io_info.numBlocks = lba_count;
1609                         io_info.ldTgtId = acmd->device_id;
1610 
1611                         if (acmd->cmd_flags & CFLAG_DMASEND)
1612                                 io_info.isRead = 0;
1613                         else
1614                                 io_info.isRead = 1;
1615 
1616 
1617                         /* Acquire SYNC MAP UPDATE lock */
1618                         mutex_enter(&instance->sync_map_mtx);
1619 
1620                         local_map_ptr =
1621                             instance->ld_map[(instance->map_id & 1)];
1622 
1623                         if ((MR_TargetIdToLdGet(
1624                             acmd->device_id, local_map_ptr) >=
1625                             MAX_LOGICAL_DRIVES) || !instance->fast_path_io) {
1626                                 dev_err(instance->dip, CE_NOTE,
1627                                     "Fast Path NOT Possible, "
1628                                     "targetId >= MAX_LOGICAL_DRIVES || "
1629                                     "!instance->fast_path_io");
1630                                 fp_possible = 0;
1631                                 /* Set Regionlock flags to BYPASS */
1632                                 /* io_request->RaidContext.regLockFlags  = 0; */
1633                                 ddi_put8(acc_handle,
1634                                     &scsi_raid_io->RaidContext.regLockFlags, 0);
1635                         } else {
1636                                 if (MR_BuildRaidContext(instance, &io_info,
1637                                     &scsi_raid_io->RaidContext, local_map_ptr))
1638                                         fp_possible = io_info.fpOkForIo;
1639                         }
1640 
1641                         if (!enable_fp)
1642                                 fp_possible = 0;
1643 
1644                         con_log(CL_ANN1, (CE_NOTE, "enable_fp %d  "
1645                             "instance->fast_path_io %d fp_possible %d",
1646                             enable_fp, instance->fast_path_io, fp_possible));
1647 
1648                 if (fp_possible) {
1649 
1650                         /* Check for DIF enabled LD */
1651                         if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1652                                 /* Prepare 32 Byte CDB for DIF capable Disk */
1653                                 mrsas_tbolt_prepare_cdb(instance,
1654                                     scsi_raid_io->CDB.CDB32,
1655                                     &io_info, scsi_raid_io, start_lba_lo);
1656                         } else {
1657                                 mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1658                                     sizeof (scsi_raid_io->CDB.CDB32),
1659                                     (uint8_t *)&pd_cmd_cdblen,
1660                                     io_info.pdBlock, io_info.numBlocks);
1661                                 ddi_put16(acc_handle,
1662                                     &scsi_raid_io->IoFlags, pd_cmd_cdblen);
1663                         }
1664 
1665                         ddi_put8(acc_handle, &scsi_raid_io->Function,
1666                             MPI2_FUNCTION_SCSI_IO_REQUEST);
1667 
1668                         ReqDescUnion->SCSIIO.RequestFlags =
1669                             (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1670                             MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1671 
1672                         if (instance->gen3) {
1673                                 uint8_t regLockFlags = ddi_get8(acc_handle,
1674                                     &scsi_raid_io->RaidContext.regLockFlags);
1675                                 uint16_t IoFlags = ddi_get16(acc_handle,
1676                                     &scsi_raid_io->IoFlags);
1677 
1678                                 if (regLockFlags == REGION_TYPE_UNUSED)
1679                                         ReqDescUnion->SCSIIO.RequestFlags =
1680                                             (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1681                                             MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1682 
1683                                 IoFlags |=
1684                                     MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1685                                 regLockFlags |=
1686                                     (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1687                                     MR_RL_FLAGS_SEQ_NUM_ENABLE);
1688 
1689                                 ddi_put8(acc_handle,
1690                                     &scsi_raid_io->ChainOffset, 0);
1691                                 ddi_put8(acc_handle,
1692                                     &scsi_raid_io->RaidContext.nsegType,
1693                                     ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1694                                     MPI2_TYPE_CUDA));
1695                                 ddi_put8(acc_handle,
1696                                     &scsi_raid_io->RaidContext.regLockFlags,
1697                                     regLockFlags);
1698                                 ddi_put16(acc_handle,
1699                                     &scsi_raid_io->IoFlags, IoFlags);
1700                         }
1701 
1702                         if ((instance->load_balance_info[
1703                             acmd->device_id].loadBalanceFlag) &&
1704                             (io_info.isRead)) {
1705                                 io_info.devHandle =
1706                                     get_updated_dev_handle(&instance->
1707                                     load_balance_info[acmd->device_id],
1708                                     &io_info);
1709                                 cmd->load_balance_flag |=
1710                                     MEGASAS_LOAD_BALANCE_FLAG;
1711                         } else {
1712                                 cmd->load_balance_flag &=
1713                                     ~MEGASAS_LOAD_BALANCE_FLAG;
1714                         }
1715 
1716                         ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1717                         ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1718                             io_info.devHandle);
1719 
1720                 } else { /* FP Not Possible */
1721 
1722                         ddi_put8(acc_handle, &scsi_raid_io->Function,
1723                             MPI2_FUNCTION_LD_IO_REQUEST);
1724 
1725                         ddi_put16(acc_handle,
1726                             &scsi_raid_io->DevHandle, acmd->device_id);
1727 
1728                         ReqDescUnion->SCSIIO.RequestFlags =
1729                             (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1730                             MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1731 
1732                         ddi_put16(acc_handle,
1733                             &scsi_raid_io->RaidContext.timeoutValue,
1734                             local_map_ptr->raidMap.fpPdIoTimeoutSec);
1735 
1736                         if (instance->gen3) {
1737                                 uint8_t regLockFlags = ddi_get8(acc_handle,
1738                                     &scsi_raid_io->RaidContext.regLockFlags);
1739 
1740                                 if (regLockFlags == REGION_TYPE_UNUSED) {
1741                                         ReqDescUnion->SCSIIO.RequestFlags =
1742                                             (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1743                                             MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1744                                 }
1745 
1746                                 regLockFlags |=
1747                                     (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1748                                     MR_RL_FLAGS_SEQ_NUM_ENABLE);
1749 
1750                                 ddi_put8(acc_handle,
1751                                     &scsi_raid_io->RaidContext.nsegType,
1752                                     ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1753                                     MPI2_TYPE_CUDA));
1754                                 ddi_put8(acc_handle,
1755                                     &scsi_raid_io->RaidContext.regLockFlags,
1756                                     regLockFlags);
1757                         }
1758                 } /* Not FP */
1759 
1760                 /* Release SYNC MAP UPDATE lock */
1761                 mutex_exit(&instance->sync_map_mtx);
1762 
1763                 break;
1764 
1765                 case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
1766                         return_raid_msg_pkt(instance, cmd);
1767                         *cmd_done = 1;
1768                         return (NULL);
1769                 }
1770 
1771                 case SCMD_MODE_SENSE:
1772                 case SCMD_MODE_SENSE_G1: {
1773                         union scsi_cdb  *cdbp;
1774                         uint16_t        page_code;
1775 
1776                         cdbp = (void *)pkt->pkt_cdbp;
1777                         page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1778                         switch (page_code) {
1779                         case 0x3:
1780                         case 0x4:
1781                                 (void) mrsas_mode_sense_build(pkt);
1782                                 return_raid_msg_pkt(instance, cmd);
1783                                 *cmd_done = 1;
1784                                 return (NULL);
1785                         }
1786                         return (cmd);
1787                 }
1788 
1789                 default:
1790                         /* Pass-through command to logical drive */
1791                         ddi_put8(acc_handle, &scsi_raid_io->Function,
1792                             MPI2_FUNCTION_LD_IO_REQUEST);
1793                         ddi_put8(acc_handle, &scsi_raid_io->LUN[1], acmd->lun);
1794                         ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1795                             acmd->device_id);
1796                         ReqDescUnion->SCSIIO.RequestFlags =
1797                             (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1798                             MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1799                         break;
1800                 }
1801         } else { /* Physical */
1802                 /* Pass-through command to physical drive */
1803 
1804                 /* Acquire SYNC MAP UPDATE lock */
1805                 mutex_enter(&instance->sync_map_mtx);
1806 
1807                 local_map_ptr = instance->ld_map[instance->map_id & 1];
1808 
1809                 ddi_put8(acc_handle, &scsi_raid_io->Function,
1810                     MPI2_FUNCTION_SCSI_IO_REQUEST);
1811 
1812                 ReqDescUnion->SCSIIO.RequestFlags =
1813                     (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1814                     MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1815 
1816                 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1817                     local_map_ptr->raidMap.
1818                     devHndlInfo[acmd->device_id].curDevHdl);
1819 
1820                 /* Set regLockFlasgs to REGION_TYPE_BYPASS */
1821                 ddi_put8(acc_handle,
1822                     &scsi_raid_io->RaidContext.regLockFlags, 0);
1823                 ddi_put64(acc_handle,
1824                     &scsi_raid_io->RaidContext.regLockRowLBA, 0);
1825                 ddi_put32(acc_handle,
1826                     &scsi_raid_io->RaidContext.regLockLength, 0);
1827                 ddi_put8(acc_handle,
1828                     &scsi_raid_io->RaidContext.RAIDFlags,
1829                     MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
1830                     MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1831                 ddi_put16(acc_handle,
1832                     &scsi_raid_io->RaidContext.timeoutValue,
1833                     local_map_ptr->raidMap.fpPdIoTimeoutSec);
1834                 ddi_put16(acc_handle,
1835                     &scsi_raid_io->RaidContext.ldTargetId,
1836                     acmd->device_id);
1837                 ddi_put8(acc_handle,
1838                     &scsi_raid_io->LUN[1], acmd->lun);
1839 
1840                 if (instance->fast_path_io && instance->gen3) {
1841                         uint16_t IoFlags = ddi_get16(acc_handle,
1842                             &scsi_raid_io->IoFlags);
1843                         IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1844                         ddi_put16(acc_handle, &scsi_raid_io->IoFlags, IoFlags);
1845                 }
1846                 ddi_put16(acc_handle, &ReqDescUnion->SCSIIO.DevHandle,
1847                     local_map_ptr->raidMap.
1848                     devHndlInfo[acmd->device_id].curDevHdl);
1849 
1850                 /* Release SYNC MAP UPDATE lock */
1851                 mutex_exit(&instance->sync_map_mtx);
1852         }
1853 
1854         /* Set sense buffer physical address/length in scsi_io_request. */
1855         ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
1856             cmd->sense_phys_addr1);
1857         ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1858 
1859         /* Construct SGL */
1860         ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1861             offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1862 
1863         (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1864             scsi_raid_io, &datalen);
1865 
1866         ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen);
1867 
1868         con_log(CL_ANN, (CE_CONT,
1869             "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1870             pkt->pkt_cdbp[0], acmd->device_id));
1871         con_log(CL_DLEVEL1, (CE_CONT,
1872             "data length = %x\n",
1873             scsi_raid_io->DataLength));
1874         con_log(CL_DLEVEL1, (CE_CONT,
1875             "cdb length = %x\n",
1876             acmd->cmd_cdblen));
1877 
1878         return (cmd);
1879 }
1880 
1881 uint32_t
1882 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1883 {
1884         return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1885 }
1886 
1887 void
1888 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1889 {
1890         MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1891         atomic_inc_16(&instance->fw_outstanding);
1892 
1893         struct scsi_pkt *pkt;
1894 
1895         con_log(CL_ANN1,
1896             (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1897 
1898         con_log(CL_DLEVEL1, (CE_CONT,
1899             " [req desc Words] %" PRIx64 " \n", req_desc->Words));
1900         con_log(CL_DLEVEL1, (CE_CONT,
1901             " [req desc low part] %x \n",
1902             (uint_t)(req_desc->Words & 0xffffffffff)));
1903         con_log(CL_DLEVEL1, (CE_CONT,
1904             " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
1905         pkt = cmd->pkt;
1906 
1907         if (pkt) {
1908                 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1909                     "ISSUED CMD TO FW : called : cmd:"
1910                     ": %p instance : %p pkt : %p pkt_time : %x\n",
1911                     gethrtime(), (void *)cmd, (void *)instance,
1912                     (void *)pkt, cmd->drv_pkt_time));
1913                 if (instance->adapterresetinprogress) {
1914                         cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1915                         con_log(CL_ANN, (CE_NOTE,
1916                             "TBOLT Reset the scsi_pkt timer"));
1917                 } else {
1918                         push_pending_mfi_pkt(instance, cmd);
1919                 }
1920 
1921         } else {
1922                 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1923                     "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
1924                     "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
1925         }
1926 
1927         /* Issue the command to the FW */
1928         mutex_enter(&instance->reg_write_mtx);
1929         WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1930         WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1931         mutex_exit(&instance->reg_write_mtx);
1932 }
1933 
1934 /*
1935  * issue_cmd_in_sync_mode
1936  */
1937 int
1938 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
1939     struct mrsas_cmd *cmd)
1940 {
1941         int             i;
1942         uint32_t        msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
1943         MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1944 
1945         struct mrsas_header     *hdr;
1946         hdr = (struct mrsas_header *)&cmd->frame->hdr;
1947 
1948         con_log(CL_ANN,
1949             (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
1950             cmd->SMID));
1951 
1952 
1953         if (instance->adapterresetinprogress) {
1954                 cmd->drv_pkt_time = ddi_get16
1955                     (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
1956                 if (cmd->drv_pkt_time < debug_timeout_g)
1957                         cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1958                 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
1959                     "RESET-IN-PROGRESS, issue cmd & return."));
1960 
1961                 mutex_enter(&instance->reg_write_mtx);
1962                 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1963                 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1964                 mutex_exit(&instance->reg_write_mtx);
1965 
1966                 return (DDI_SUCCESS);
1967         } else {
1968                 con_log(CL_ANN1, (CE_NOTE,
1969                     "tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
1970                 push_pending_mfi_pkt(instance, cmd);
1971         }
1972 
1973         con_log(CL_DLEVEL2, (CE_NOTE,
1974             "HighQport offset :%p",
1975             (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
1976         con_log(CL_DLEVEL2, (CE_NOTE,
1977             "LowQport offset :%p",
1978             (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
1979 
1980         cmd->sync_cmd = MRSAS_TRUE;
1981         cmd->cmd_status =  ENODATA;
1982 
1983 
1984         mutex_enter(&instance->reg_write_mtx);
1985         WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1986         WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1987         mutex_exit(&instance->reg_write_mtx);
1988 
1989         con_log(CL_ANN1, (CE_NOTE,
1990             " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
1991         con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
1992             (uint_t)(req_desc->Words & 0xffffffff)));
1993 
1994         mutex_enter(&instance->int_cmd_mtx);
1995         for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
1996                 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
1997         }
1998         mutex_exit(&instance->int_cmd_mtx);
1999 
2000 
2001         if (i < (msecs -1)) {
2002                 return (DDI_SUCCESS);
2003         } else {
2004                 return (DDI_FAILURE);
2005         }
2006 }
2007 
2008 /*
2009  * issue_cmd_in_poll_mode
2010  */
2011 int
2012 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2013     struct mrsas_cmd *cmd)
2014 {
2015         int             i;
2016         uint16_t        flags;
2017         uint32_t        msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2018         struct mrsas_header *frame_hdr;
2019 
2020         con_log(CL_ANN,
2021             (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
2022             cmd->SMID));
2023 
2024         MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2025 
2026         frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2027         ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2028             MFI_CMD_STATUS_POLL_MODE);
2029         flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2030         flags   |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2031         ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2032 
2033         con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2034             (uint_t)(req_desc->Words & 0xffffffff)));
2035         con_log(CL_ANN1, (CE_NOTE,
2036             " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2037 
2038         /* issue the frame using inbound queue port */
2039         mutex_enter(&instance->reg_write_mtx);
2040         WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2041         WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2042         mutex_exit(&instance->reg_write_mtx);
2043 
2044         for (i = 0; i < msecs && (
2045             ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2046             == MFI_CMD_STATUS_POLL_MODE); i++) {
2047                 /* wait for cmd_status to change from 0xFF */
2048                 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2049         }
2050 
2051         DTRACE_PROBE1(tbolt_complete_poll_cmd, uint8_t, i);
2052 
2053         if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2054             &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2055                 con_log(CL_ANN1, (CE_NOTE,
2056                     " cmd failed %" PRIx64, (req_desc->Words)));
2057                 return (DDI_FAILURE);
2058         }
2059 
2060         return (DDI_SUCCESS);
2061 }
2062 
2063 void
2064 tbolt_enable_intr(struct mrsas_instance *instance)
2065 {
2066         /* TODO: For Thunderbolt/Invader also clear intr on enable */
2067         /* writel(~0, &regs->outbound_intr_status); */
2068         /* readl(&regs->outbound_intr_status); */
2069 
2070         WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2071 
2072         /* dummy read to force PCI flush */
2073         (void) RD_OB_INTR_MASK(instance);
2074 
2075 }
2076 
2077 void
2078 tbolt_disable_intr(struct mrsas_instance *instance)
2079 {
2080         uint32_t mask = 0xFFFFFFFF;
2081 
2082         WR_OB_INTR_MASK(mask, instance);
2083 
2084         /* Dummy readl to force pci flush */
2085 
2086         (void) RD_OB_INTR_MASK(instance);
2087 }
2088 
2089 
2090 int
2091 tbolt_intr_ack(struct mrsas_instance *instance)
2092 {
2093         uint32_t        status;
2094 
2095         /* check if it is our interrupt */
2096         status = RD_OB_INTR_STATUS(instance);
2097         con_log(CL_ANN1, (CE_NOTE,
2098             "chkpnt: Entered tbolt_intr_ack status = %d", status));
2099 
2100         if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2101                 return (DDI_INTR_UNCLAIMED);
2102         }
2103 
2104         if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2105                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2106                 return (DDI_INTR_UNCLAIMED);
2107         }
2108 
2109         if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2110                 /* clear the interrupt by writing back the same value */
2111                 WR_OB_INTR_STATUS(status, instance);
2112                 /* dummy READ */
2113                 (void) RD_OB_INTR_STATUS(instance);
2114         }
2115         return (DDI_INTR_CLAIMED);
2116 }
2117 
2118 /*
2119  * get_raid_msg_pkt : Get a command from the free pool
2120  * After successful allocation, the caller of this routine
2121  * must clear the frame buffer (memset to zero) before
2122  * using the packet further.
2123  *
2124  * ***** Note *****
2125  * After clearing the frame buffer the context id of the
2126  * frame buffer SHOULD be restored back.
2127  */
2128 
2129 struct mrsas_cmd *
2130 get_raid_msg_pkt(struct mrsas_instance *instance)
2131 {
2132         mlist_t                 *head = &instance->cmd_pool_list;
2133         struct mrsas_cmd        *cmd = NULL;
2134 
2135         mutex_enter(&instance->cmd_pool_mtx);
2136         ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2137 
2138 
2139         if (!mlist_empty(head)) {
2140                 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2141                 mlist_del_init(head->next);
2142         }
2143         if (cmd != NULL) {
2144                 cmd->pkt = NULL;
2145                 cmd->retry_count_for_ocr = 0;
2146                 cmd->drv_pkt_time = 0;
2147         }
2148         mutex_exit(&instance->cmd_pool_mtx);
2149 
2150         if (cmd != NULL)
2151                 bzero(cmd->scsi_io_request,
2152                     sizeof (Mpi2RaidSCSIIORequest_t));
2153         return (cmd);
2154 }
2155 
2156 struct mrsas_cmd *
2157 get_raid_msg_mfi_pkt(struct mrsas_instance *instance)
2158 {
2159         mlist_t                 *head = &instance->cmd_app_pool_list;
2160         struct mrsas_cmd        *cmd = NULL;
2161 
2162         mutex_enter(&instance->cmd_app_pool_mtx);
2163         ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2164 
2165         if (!mlist_empty(head)) {
2166                 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2167                 mlist_del_init(head->next);
2168         }
2169         if (cmd != NULL) {
2170                 cmd->retry_count_for_ocr = 0;
2171                 cmd->drv_pkt_time = 0;
2172                 cmd->pkt = NULL;
2173                 cmd->request_desc = NULL;
2174 
2175         }
2176 
2177         mutex_exit(&instance->cmd_app_pool_mtx);
2178 
2179         if (cmd != NULL) {
2180                 bzero(cmd->scsi_io_request,
2181                     sizeof (Mpi2RaidSCSIIORequest_t));
2182         }
2183 
2184         return (cmd);
2185 }
2186 
2187 /*
2188  * return_raid_msg_pkt : Return a cmd to free command pool
2189  */
2190 void
2191 return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2192 {
2193         mutex_enter(&instance->cmd_pool_mtx);
2194         ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2195 
2196 
2197         mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2198 
2199         mutex_exit(&instance->cmd_pool_mtx);
2200 }
2201 
2202 void
2203 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2204 {
2205         mutex_enter(&instance->cmd_app_pool_mtx);
2206         ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2207 
2208         mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2209 
2210         mutex_exit(&instance->cmd_app_pool_mtx);
2211 }
2212 
2213 
2214 void
2215 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2216     struct mrsas_cmd *cmd)
2217 {
2218         Mpi2RaidSCSIIORequest_t         *scsi_raid_io;
2219         Mpi25IeeeSgeChain64_t           *scsi_raid_io_sgl_ieee;
2220         MRSAS_REQUEST_DESCRIPTOR_UNION  *ReqDescUnion;
2221         uint32_t                        index;
2222         ddi_acc_handle_t acc_handle =
2223             instance->mpi2_frame_pool_dma_obj.acc_handle;
2224 
2225         if (!instance->tbolt) {
2226                 con_log(CL_ANN, (CE_NOTE, "Not MFA enabled."));
2227                 return;
2228         }
2229 
2230         index = cmd->index;
2231 
2232         ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
2233 
2234         if (!ReqDescUnion) {
2235                 con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2236                 return;
2237         }
2238 
2239         con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2240 
2241         ReqDescUnion->Words = 0;
2242 
2243         ReqDescUnion->SCSIIO.RequestFlags =
2244             (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2245             MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2246 
2247         ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2248 
2249         cmd->request_desc = ReqDescUnion;
2250 
2251         /* get raid message frame pointer */
2252         scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2253 
2254         if (instance->gen3) {
2255                 Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)
2256                     &scsi_raid_io->SGL.IeeeChain;
2257                 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2258                 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
2259         }
2260 
2261         ddi_put8(acc_handle, &scsi_raid_io->Function,
2262             MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2263 
2264         ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
2265             offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2266 
2267         ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
2268             (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2269 
2270         ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
2271             cmd->sense_phys_addr1);
2272 
2273 
2274         scsi_raid_io_sgl_ieee =
2275             (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2276 
2277         ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
2278             (U64)cmd->frame_phys_addr);
2279 
2280         ddi_put8(acc_handle,
2281             &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2282             MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2283         /* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
2284         ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024);
2285 
2286         con_log(CL_ANN1, (CE_NOTE,
2287             "[MFI CMD PHY ADDRESS]:%" PRIx64,
2288             scsi_raid_io_sgl_ieee->Address));
2289         con_log(CL_ANN1, (CE_NOTE,
2290             "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2291         con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2292             scsi_raid_io_sgl_ieee->Flags));
2293 }
2294 
2295 
2296 void
2297 tbolt_complete_cmd(struct mrsas_instance *instance,
2298     struct mrsas_cmd *cmd)
2299 {
2300         uint8_t                         status;
2301         uint8_t                         extStatus;
2302         uint8_t                         function;
2303         uint8_t                         arm;
2304         struct scsa_cmd                 *acmd;
2305         struct scsi_pkt                 *pkt;
2306         struct scsi_arq_status          *arqstat;
2307         Mpi2RaidSCSIIORequest_t         *scsi_raid_io;
2308         LD_LOAD_BALANCE_INFO            *lbinfo;
2309         ddi_acc_handle_t acc_handle =
2310             instance->mpi2_frame_pool_dma_obj.acc_handle;
2311 
2312         scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2313 
2314         status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status);
2315         extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus);
2316 
2317         con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2318         con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2319 
2320         if (status != MFI_STAT_OK) {
2321                 con_log(CL_ANN, (CE_WARN,
2322                     "IO Cmd Failed SMID %x", cmd->SMID));
2323         } else {
2324                 con_log(CL_ANN, (CE_NOTE,
2325                     "IO Cmd Success  SMID %x", cmd->SMID));
2326         }
2327 
2328         /* regular commands */
2329 
2330         function = ddi_get8(acc_handle, &scsi_raid_io->Function);
2331         DTRACE_PROBE3(tbolt_complete_cmd, uint8_t, function,
2332             uint8_t, status, uint8_t, extStatus);
2333 
2334         switch (function) {
2335 
2336         case MPI2_FUNCTION_SCSI_IO_REQUEST :  /* Fast Path IO. */
2337                 acmd =  (struct scsa_cmd *)cmd->cmd;
2338                 lbinfo = &instance->load_balance_info[acmd->device_id];
2339 
2340                 if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2341                         arm = lbinfo->raid1DevHandle[0] ==
2342                             scsi_raid_io->DevHandle ? 0 : 1;
2343 
2344                         lbinfo->scsi_pending_cmds[arm]--;
2345                         cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2346                 }
2347                 con_log(CL_DLEVEL3, (CE_NOTE,
2348                     "FastPath IO Completion Success "));
2349                 /* FALLTHRU */
2350 
2351         case MPI2_FUNCTION_LD_IO_REQUEST :   { /* Regular Path IO. */
2352                 acmd =  (struct scsa_cmd *)cmd->cmd;
2353                 pkt =   (struct scsi_pkt *)CMD2PKT(acmd);
2354 
2355                 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2356                         if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2357                                 (void) ddi_dma_sync(acmd->cmd_dmahandle,
2358                                     acmd->cmd_dma_offset, acmd->cmd_dma_len,
2359                                     DDI_DMA_SYNC_FORCPU);
2360                         }
2361                 }
2362 
2363                 pkt->pkt_reason              = CMD_CMPLT;
2364                 pkt->pkt_statistics  = 0;
2365                 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2366                     STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2367 
2368                 con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: "
2369                     "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0],
2370                     ((acmd->islogical) ? "LD" : "PD"),
2371                     acmd->cmd_dmacount, cmd->SMID, status));
2372 
2373                 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2374                         struct scsi_inquiry     *inq;
2375 
2376                         if (acmd->cmd_dmacount != 0) {
2377                                 bp_mapin(acmd->cmd_buf);
2378                                 inq = (struct scsi_inquiry *)
2379                                     acmd->cmd_buf->b_un.b_addr;
2380 
2381                                 /* don't expose physical drives to OS */
2382                                 if (acmd->islogical &&
2383                                     (status == MFI_STAT_OK)) {
2384                                         display_scsi_inquiry((caddr_t)inq);
2385                                 } else if ((status == MFI_STAT_OK) &&
2386                                     (inq->inq_dtype == DTYPE_DIRECT ||
2387                                     inq->inq_dtype == DTYPE_ESI)) {
2388                                         display_scsi_inquiry((caddr_t)inq);
2389                                 } else {
2390                                         /* for physical disk */
2391                                         status = MFI_STAT_DEVICE_NOT_FOUND;
2392                                 }
2393                         }
2394                 }
2395 
2396                 switch (status) {
2397                 case MFI_STAT_OK:
2398                         pkt->pkt_scbp[0] = STATUS_GOOD;
2399                         break;
2400                 case MFI_STAT_LD_CC_IN_PROGRESS:
2401                 case MFI_STAT_LD_RECON_IN_PROGRESS:
2402                         pkt->pkt_scbp[0] = STATUS_GOOD;
2403                         break;
2404                 case MFI_STAT_LD_INIT_IN_PROGRESS:
2405                         pkt->pkt_reason      = CMD_TRAN_ERR;
2406                         break;
2407                 case MFI_STAT_SCSI_IO_FAILED:
2408                         dev_err(instance->dip, CE_WARN,
2409                             "tbolt_complete_cmd: scsi_io failed");
2410                         pkt->pkt_reason      = CMD_TRAN_ERR;
2411                         break;
2412                 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2413                         con_log(CL_ANN, (CE_WARN,
2414                             "tbolt_complete_cmd: scsi_done with error"));
2415 
2416                         pkt->pkt_reason      = CMD_CMPLT;
2417                         ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2418 
2419                         if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2420                                 con_log(CL_ANN,
2421                                     (CE_WARN, "TEST_UNIT_READY fail"));
2422                         } else {
2423                                 pkt->pkt_state |= STATE_ARQ_DONE;
2424                                 arqstat = (void *)(pkt->pkt_scbp);
2425                                 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2426                                 arqstat->sts_rqpkt_resid = 0;
2427                                 arqstat->sts_rqpkt_state |=
2428                                     STATE_GOT_BUS | STATE_GOT_TARGET
2429                                     | STATE_SENT_CMD
2430                                     | STATE_XFERRED_DATA;
2431                                 *(uint8_t *)&arqstat->sts_rqpkt_status =
2432                                     STATUS_GOOD;
2433                                 con_log(CL_ANN1,
2434                                     (CE_NOTE, "Copying Sense data %x",
2435                                     cmd->SMID));
2436 
2437                                 ddi_rep_get8(acc_handle,
2438                                     (uint8_t *)&(arqstat->sts_sensedata),
2439                                     cmd->sense1,
2440                                     sizeof (struct scsi_extended_sense),
2441                                     DDI_DEV_AUTOINCR);
2442 
2443                         }
2444                         break;
2445                 case MFI_STAT_LD_OFFLINE:
2446                         dev_err(instance->dip, CE_WARN,
2447                             "tbolt_complete_cmd: ld offline "
2448                             "CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
2449                             /* UNDO: */
2450                             ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2451 
2452                             ddi_get16(acc_handle,
2453                             &scsi_raid_io->RaidContext.ldTargetId),
2454 
2455                             ddi_get16(acc_handle, &scsi_raid_io->DevHandle));
2456 
2457                         pkt->pkt_reason      = CMD_DEV_GONE;
2458                         pkt->pkt_statistics  = STAT_DISCON;
2459                         break;
2460                 case MFI_STAT_DEVICE_NOT_FOUND:
2461                         con_log(CL_ANN, (CE_CONT,
2462                             "tbolt_complete_cmd: device not found error"));
2463                         pkt->pkt_reason      = CMD_DEV_GONE;
2464                         pkt->pkt_statistics  = STAT_DISCON;
2465                         break;
2466 
2467                 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2468                         pkt->pkt_state |= STATE_ARQ_DONE;
2469                         pkt->pkt_reason      = CMD_CMPLT;
2470                         ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2471 
2472                         arqstat = (void *)(pkt->pkt_scbp);
2473                         arqstat->sts_rqpkt_reason = CMD_CMPLT;
2474                         arqstat->sts_rqpkt_resid = 0;
2475                         arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2476                             | STATE_GOT_TARGET | STATE_SENT_CMD
2477                             | STATE_XFERRED_DATA;
2478                         *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
2479 
2480                         arqstat->sts_sensedata.es_valid = 1;
2481                         arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST;
2482                         arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2483 
2484                         /*
2485                          * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2486                          * ASC: 0x21h; ASCQ: 0x00h;
2487                          */
2488                         arqstat->sts_sensedata.es_add_code = 0x21;
2489                         arqstat->sts_sensedata.es_qual_code = 0x00;
2490                         break;
2491                 case MFI_STAT_INVALID_CMD:
2492                 case MFI_STAT_INVALID_DCMD:
2493                 case MFI_STAT_INVALID_PARAMETER:
2494                 case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2495                 default:
2496                         dev_err(instance->dip, CE_WARN,
2497                             "tbolt_complete_cmd: Unknown status!");
2498                         pkt->pkt_reason      = CMD_TRAN_ERR;
2499 
2500                         break;
2501                 }
2502 
2503                 atomic_add_16(&instance->fw_outstanding, (-1));
2504 
2505                 (void) mrsas_common_check(instance, cmd);
2506                 if (acmd->cmd_dmahandle) {
2507                         if (mrsas_check_dma_handle(acmd->cmd_dmahandle) !=
2508                             DDI_SUCCESS) {
2509                                 ddi_fm_service_impact(instance->dip,
2510                                     DDI_SERVICE_UNAFFECTED);
2511                                 pkt->pkt_reason = CMD_TRAN_ERR;
2512                                 pkt->pkt_statistics = 0;
2513                         }
2514                 }
2515 
2516                 /* Call the callback routine */
2517                 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp)
2518                         (*pkt->pkt_comp)(pkt);
2519 
2520                 con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2521 
2522                 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0);
2523 
2524                 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0);
2525 
2526                 return_raid_msg_pkt(instance, cmd);
2527                 break;
2528         }
2529         case MPI2_FUNCTION_PASSTHRU_IO_REQUEST:  /* MFA command. */
2530 
2531                 if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO &&
2532                     cmd->frame->dcmd.mbox.b[1] == 1) {
2533 
2534                         mutex_enter(&instance->sync_map_mtx);
2535 
2536                         con_log(CL_ANN, (CE_NOTE,
2537                             "LDMAP sync command SMID RECEIVED 0x%X",
2538                             cmd->SMID));
2539                         if (cmd->frame->hdr.cmd_status != 0) {
2540                                 dev_err(instance->dip, CE_WARN,
2541                                     "map sync failed, status = 0x%x.",
2542                                     cmd->frame->hdr.cmd_status);
2543                         } else {
2544                                 instance->map_id++;
2545                                 con_log(CL_ANN1, (CE_NOTE,
2546                                     "map sync received, switched map_id to %"
2547                                     PRIu64, instance->map_id));
2548                         }
2549 
2550                         if (MR_ValidateMapInfo(
2551                             instance->ld_map[instance->map_id & 1],
2552                             instance->load_balance_info)) {
2553                                 instance->fast_path_io = 1;
2554                         } else {
2555                                 instance->fast_path_io = 0;
2556                         }
2557 
2558                         con_log(CL_ANN, (CE_NOTE,
2559                             "instance->fast_path_io %d",
2560                             instance->fast_path_io));
2561 
2562                         instance->unroll.syncCmd = 0;
2563 
2564                         if (instance->map_update_cmd == cmd) {
2565                                 return_raid_msg_pkt(instance, cmd);
2566                                 atomic_add_16(&instance->fw_outstanding, (-1));
2567                                 (void) mrsas_tbolt_sync_map_info(instance);
2568                         }
2569 
2570                         con_log(CL_ANN1, (CE_NOTE,
2571                             "LDMAP sync completed, ldcount=%d",
2572                             instance->ld_map[instance->map_id & 1]
2573                             ->raidMap.ldCount));
2574                         mutex_exit(&instance->sync_map_mtx);
2575                         break;
2576                 }
2577 
2578                 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2579                         con_log(CL_ANN1, (CE_CONT,
2580                             "AEN command SMID RECEIVED 0x%X",
2581                             cmd->SMID));
2582                         if ((instance->aen_cmd == cmd) &&
2583                             (instance->aen_cmd->abort_aen)) {
2584                                 con_log(CL_ANN, (CE_WARN, "mrsas_softintr: "
2585                                     "aborted_aen returned"));
2586                         } else {
2587                                 atomic_add_16(&instance->fw_outstanding, (-1));
2588                                 service_mfi_aen(instance, cmd);
2589                         }
2590                 }
2591 
2592                 if (cmd->sync_cmd == MRSAS_TRUE) {
2593                         con_log(CL_ANN1, (CE_CONT,
2594                             "Sync-mode Command Response SMID RECEIVED 0x%X",
2595                             cmd->SMID));
2596 
2597                         tbolt_complete_cmd_in_sync_mode(instance, cmd);
2598                 } else {
2599                         con_log(CL_ANN, (CE_CONT,
2600                             "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2601                             cmd->SMID));
2602                 }
2603                 break;
2604         default:
2605                 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2606                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2607 
2608                 /* free message */
2609                 con_log(CL_ANN,
2610                     (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2611                 break;
2612         }
2613 }
2614 
2615 uint_t
2616 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2617 {
2618         uint8_t                         replyType;
2619         Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2620         Mpi2ReplyDescriptorsUnion_t     *desc;
2621         uint16_t                        smid;
2622         union desc_value                d_val;
2623         struct mrsas_cmd                *cmd;
2624 
2625         struct mrsas_header     *hdr;
2626         struct scsi_pkt         *pkt;
2627 
2628         (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2629             0, 0, DDI_DMA_SYNC_FORDEV);
2630 
2631         (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2632             0, 0, DDI_DMA_SYNC_FORCPU);
2633 
2634         desc = instance->reply_frame_pool;
2635         desc += instance->reply_read_index;
2636 
2637         replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2638         replyType = replyDesc->ReplyFlags &
2639             MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2640 
2641         if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2642                 return (DDI_INTR_UNCLAIMED);
2643 
2644         if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2645             != DDI_SUCCESS) {
2646                 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2647                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2648                 con_log(CL_ANN1,
2649                     (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
2650                     "FMA check, returning DDI_INTR_UNCLAIMED"));
2651                 return (DDI_INTR_CLAIMED);
2652         }
2653 
2654         con_log(CL_ANN1, (CE_NOTE, "Reply Desc  = %p  Words = %" PRIx64,
2655             (void *)desc, desc->Words));
2656 
2657         d_val.word = desc->Words;
2658 
2659 
2660         /* Read Reply descriptor */
2661         while ((d_val.u1.low != 0xffffffff) &&
2662             (d_val.u1.high != 0xffffffff)) {
2663 
2664                 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2665                     0, 0, DDI_DMA_SYNC_FORCPU);
2666 
2667                 smid = replyDesc->SMID;
2668 
2669                 if (!smid || smid > instance->max_fw_cmds + 1) {
2670                         con_log(CL_ANN1, (CE_NOTE,
2671                             "Reply Desc at Break  = %p  Words = %" PRIx64,
2672                             (void *)desc, desc->Words));
2673                         break;
2674                 }
2675 
2676                 cmd     = instance->cmd_list[smid - 1];
2677                 if (!cmd) {
2678                         con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_"
2679                             "outstanding_cmd: Invalid command "
2680                             " or Poll commad Received in completion path"));
2681                 } else {
2682                         mutex_enter(&instance->cmd_pend_mtx);
2683                         if (cmd->sync_cmd == MRSAS_TRUE) {
2684                                 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2685                                 if (hdr) {
2686                                         con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2687                                             "tbolt_process_outstanding_cmd:"
2688                                             " mlist_del_init(&cmd->list)."));
2689                                         mlist_del_init(&cmd->list);
2690                                 }
2691                         } else {
2692                                 pkt = cmd->pkt;
2693                                 if (pkt) {
2694                                         con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2695                                             "tbolt_process_outstanding_cmd:"
2696                                             "mlist_del_init(&cmd->list)."));
2697                                         mlist_del_init(&cmd->list);
2698                                 }
2699                         }
2700 
2701                         mutex_exit(&instance->cmd_pend_mtx);
2702 
2703                         tbolt_complete_cmd(instance, cmd);
2704                 }
2705                 /* set it back to all 1s. */
2706                 desc->Words = -1LL;
2707 
2708                 instance->reply_read_index++;
2709 
2710                 if (instance->reply_read_index >= (instance->reply_q_depth)) {
2711                         con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2712                         instance->reply_read_index = 0;
2713                 }
2714 
2715                 /* Get the next reply descriptor */
2716                 if (!instance->reply_read_index)
2717                         desc = instance->reply_frame_pool;
2718                 else
2719                         desc++;
2720 
2721                 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2722 
2723                 d_val.word = desc->Words;
2724 
2725                 con_log(CL_ANN1, (CE_NOTE,
2726                     "Next Reply Desc  = %p Words = %" PRIx64,
2727                     (void *)desc, desc->Words));
2728 
2729                 replyType = replyDesc->ReplyFlags &
2730                     MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2731 
2732                 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2733                         break;
2734 
2735         } /* End of while loop. */
2736 
2737         /* update replyIndex to FW */
2738         WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2739 
2740 
2741         (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2742             0, 0, DDI_DMA_SYNC_FORDEV);
2743 
2744         (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2745             0, 0, DDI_DMA_SYNC_FORCPU);
2746         return (DDI_INTR_CLAIMED);
2747 }
2748 
2749 
2750 
2751 
2752 /*
2753  * complete_cmd_in_sync_mode -  Completes an internal command
2754  * @instance:                   Adapter soft state
2755  * @cmd:                        Command to be completed
2756  *
2757  * The issue_cmd_in_sync_mode() function waits for a command to complete
2758  * after it issues a command. This function wakes up that waiting routine by
2759  * calling wake_up() on the wait queue.
2760  */
2761 void
2762 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance,
2763     struct mrsas_cmd *cmd)
2764 {
2765 
2766         cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
2767             &cmd->frame->io.cmd_status);
2768 
2769         cmd->sync_cmd = MRSAS_FALSE;
2770 
2771         mutex_enter(&instance->int_cmd_mtx);
2772         if (cmd->cmd_status == ENODATA) {
2773                 cmd->cmd_status = 0;
2774         }
2775         cv_broadcast(&instance->int_cmd_cv);
2776         mutex_exit(&instance->int_cmd_mtx);
2777 
2778 }
2779 
2780 /*
2781  * mrsas_tbolt_get_ld_map_info -        Returns  ld_map structure
2782  * instance:                            Adapter soft state
2783  *
2784  * Issues an internal command (DCMD) to get the FW's controller PD
2785  * list structure.  This information is mainly used to find out SYSTEM
2786  * supported by the FW.
2787  */
2788 int
2789 mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance)
2790 {
2791         int ret = 0;
2792         struct mrsas_cmd        *cmd = NULL;
2793         struct mrsas_dcmd_frame *dcmd;
2794         MR_FW_RAID_MAP_ALL *ci;
2795         uint32_t ci_h = 0;
2796         U32 size_map_info;
2797 
2798         cmd = get_raid_msg_pkt(instance);
2799 
2800         if (cmd == NULL) {
2801                 dev_err(instance->dip, CE_WARN,
2802                     "Failed to get a cmd from free-pool in get_ld_map_info()");
2803                 return (DDI_FAILURE);
2804         }
2805 
2806         dcmd = &cmd->frame->dcmd;
2807 
2808         size_map_info = sizeof (MR_FW_RAID_MAP) +
2809             (sizeof (MR_LD_SPAN_MAP) *
2810             (MAX_LOGICAL_DRIVES - 1));
2811 
2812         con_log(CL_ANN, (CE_NOTE,
2813             "size_map_info : 0x%x", size_map_info));
2814 
2815         ci = instance->ld_map[instance->map_id & 1];
2816         ci_h = instance->ld_map_phy[instance->map_id & 1];
2817 
2818         if (!ci) {
2819                 dev_err(instance->dip, CE_WARN,
2820                     "Failed to alloc mem for ld_map_info");
2821                 return_raid_msg_pkt(instance, cmd);
2822                 return (-1);
2823         }
2824 
2825         bzero(ci, sizeof (*ci));
2826         bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
2827 
2828         dcmd->cmd = MFI_CMD_OP_DCMD;
2829         dcmd->cmd_status = 0xFF;
2830         dcmd->sge_count = 1;
2831         dcmd->flags = MFI_FRAME_DIR_READ;
2832         dcmd->timeout = 0;
2833         dcmd->pad_0 = 0;
2834         dcmd->data_xfer_len = size_map_info;
2835         dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2836         dcmd->sgl.sge32[0].phys_addr = ci_h;
2837         dcmd->sgl.sge32[0].length = size_map_info;
2838 
2839 
2840         mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2841 
2842         if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2843                 ret = 0;
2844                 con_log(CL_ANN1, (CE_NOTE, "Get LD Map Info success"));
2845         } else {
2846                 dev_err(instance->dip, CE_WARN, "Get LD Map Info failed");
2847                 ret = -1;
2848         }
2849 
2850         return_raid_msg_pkt(instance, cmd);
2851 
2852         return (ret);
2853 }
2854 
2855 void
2856 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2857 {
2858         uint32_t i;
2859         MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2860         union desc_value d_val;
2861 
2862         reply_desc = instance->reply_frame_pool;
2863 
2864         for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2865                 d_val.word = reply_desc->Words;
2866                 con_log(CL_DLEVEL3, (CE_NOTE,
2867                     "i=%d, %x:%x",
2868                     i, d_val.u1.high, d_val.u1.low));
2869         }
2870 }
2871 
2872 /*
2873  * mrsas_tbolt_command_create - Create command for fast path.
2874  * @io_info:    MegaRAID IO request packet pointer.
2875  * @ref_tag:    Reference tag for RD/WRPROTECT
2876  *
2877  * Create the command for fast path.
2878  */
2879 void
2880 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],
2881     struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request,
2882     U32 ref_tag)
2883 {
2884         uint16_t                EEDPFlags;
2885         uint32_t                Control;
2886         ddi_acc_handle_t acc_handle =
2887             instance->mpi2_frame_pool_dma_obj.acc_handle;
2888 
2889         /* Prepare 32-byte CDB if DIF is supported on this device */
2890         con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB"));
2891 
2892         bzero(cdb, 32);
2893 
2894         cdb[0] =  MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2895 
2896 
2897         cdb[7] =  MRSAS_SCSI_ADDL_CDB_LEN;
2898 
2899         if (io_info->isRead)
2900                 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2901         else
2902                 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2903 
2904         /* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
2905         cdb[10] = MRSAS_RD_WR_PROTECT;
2906 
2907         /* LOGICAL BLOCK ADDRESS */
2908         cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2909         cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2910         cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2911         cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2912         cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2913         cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2914         cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2915         cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2916 
2917         /* Logical block reference tag */
2918         ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
2919             BE_32(ref_tag));
2920 
2921         ddi_put16(acc_handle,
2922             &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff);
2923 
2924         ddi_put32(acc_handle, &scsi_io_request->DataLength,
2925             ((io_info->numBlocks)*512));
2926         /* Specify 32-byte cdb */
2927         ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32);
2928 
2929         /* Transfer length */
2930         cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
2931         cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
2932         cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
2933         cdb[31] = (U8)((io_info->numBlocks) & 0xff);
2934 
2935         /* set SCSI IO EEDPFlags */
2936         EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags);
2937         Control = ddi_get32(acc_handle, &scsi_io_request->Control);
2938 
2939         /* set SCSI IO EEDPFlags bits */
2940         if (io_info->isRead) {
2941                 /*
2942                  * For READ commands, the EEDPFlags shall be set to specify to
2943                  * Increment the Primary Reference Tag, to Check the Reference
2944                  * Tag, and to Check and Remove the Protection Information
2945                  * fields.
2946                  */
2947                 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG        |
2948                     MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG  |
2949                     MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP       |
2950                     MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG  |
2951                     MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2952         } else {
2953                 /*
2954                  * For WRITE commands, the EEDPFlags shall be set to specify to
2955                  * Increment the Primary Reference Tag, and to Insert
2956                  * Protection Information fields.
2957                  */
2958                 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG        |
2959                     MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
2960         }
2961         Control |= (0x4 << 26);
2962 
2963         ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags);
2964         ddi_put32(acc_handle, &scsi_io_request->Control, Control);
2965         ddi_put32(acc_handle,
2966             &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE);
2967 }
2968 
2969 
2970 /*
2971  * mrsas_tbolt_set_pd_lba -     Sets PD LBA
2972  * @cdb:                CDB
2973  * @cdb_size:           CDB size
2974  * @cdb_len_ptr:        cdb length
2975  * @start_blk:          Start block of IO
2976  * @num_blocks:         Number of blocks
2977  *
2978  * Used to set the PD LBA in CDB for FP IOs
2979  */
2980 static void
2981 mrsas_tbolt_set_pd_lba(U8 *cdb, size_t cdb_size, uint8_t *cdb_len_ptr,
2982     U64 start_blk, U32 num_blocks)
2983 {
2984         U8 cdb_len = *cdb_len_ptr;
2985         U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
2986 
2987         /* Some drives don't support 16/12 byte CDB's, convert to 10 */
2988         if (((cdb_len == 12) || (cdb_len == 16)) &&
2989             (start_blk <= 0xffffffff)) {
2990                 if (cdb_len == 16) {
2991                         con_log(CL_ANN,
2992                             (CE_NOTE, "Converting READ/WRITE(16) to READ10"));
2993                         opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
2994                         flagvals = cdb[1];
2995                         groupnum = cdb[14];
2996                         control = cdb[15];
2997                 } else {
2998                         con_log(CL_ANN,
2999                             (CE_NOTE, "Converting READ/WRITE(12) to READ10"));
3000                         opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
3001                         flagvals = cdb[1];
3002                         groupnum = cdb[10];
3003                         control = cdb[11];
3004                 }
3005 
3006                 bzero(cdb, cdb_size);
3007 
3008                 cdb[0] = opcode;
3009                 cdb[1] = flagvals;
3010                 cdb[6] = groupnum;
3011                 cdb[9] = control;
3012                 /* Set transfer length */
3013                 cdb[8] = (U8)(num_blocks & 0xff);
3014                 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3015                 cdb_len = 10;
3016         } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3017                 /* Convert to 16 byte CDB for large LBA's */
3018                 con_log(CL_ANN,
3019                     (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB"));
3020                 switch (cdb_len) {
3021                 case 6:
3022                         opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3023                         control = cdb[5];
3024                         break;
3025                 case 10:
3026                         opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3027                         flagvals = cdb[1];
3028                         groupnum = cdb[6];
3029                         control = cdb[9];
3030                         break;
3031                 case 12:
3032                         opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3033                         flagvals = cdb[1];
3034                         groupnum = cdb[10];
3035                         control = cdb[11];
3036                         break;
3037                 }
3038 
3039                 bzero(cdb, cdb_size);
3040 
3041                 cdb[0] = opcode;
3042                 cdb[1] = flagvals;
3043                 cdb[14] = groupnum;
3044                 cdb[15] = control;
3045 
3046                 /* Transfer length */
3047                 cdb[13] = (U8)(num_blocks & 0xff);
3048                 cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3049                 cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3050                 cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3051 
3052                 /* Specify 16-byte cdb */
3053                 cdb_len = 16;
3054         } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3055                 /* convert to 10 byte CDB */
3056                 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3057                 control = cdb[5];
3058 
3059                 bzero(cdb, cdb_size);
3060                 cdb[0] = opcode;
3061                 cdb[9] = control;
3062 
3063                 /* Set transfer length */
3064                 cdb[8] = (U8)(num_blocks & 0xff);
3065                 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3066 
3067                 /* Specify 10-byte cdb */
3068                 cdb_len = 10;
3069         }
3070 
3071 
3072         /* Fall through Normal case, just load LBA here */
3073         switch (cdb_len) {
3074         case 6:
3075         {
3076                 U8 val = cdb[1] & 0xE0;
3077                 cdb[3] = (U8)(start_blk & 0xff);
3078                 cdb[2] = (U8)((start_blk >> 8) & 0xff);
3079                 cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3080                 break;
3081         }
3082         case 10:
3083                 cdb[5] = (U8)(start_blk & 0xff);
3084                 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3085                 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3086                 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3087                 break;
3088         case 12:
3089                 cdb[5]    = (U8)(start_blk & 0xff);
3090                 cdb[4]    = (U8)((start_blk >> 8) & 0xff);
3091                 cdb[3]    = (U8)((start_blk >> 16) & 0xff);
3092                 cdb[2]    = (U8)((start_blk >> 24) & 0xff);
3093                 break;
3094 
3095         case 16:
3096                 cdb[9]  = (U8)(start_blk & 0xff);
3097                 cdb[8]  = (U8)((start_blk >> 8) & 0xff);
3098                 cdb[7]  = (U8)((start_blk >> 16) & 0xff);
3099                 cdb[6]  = (U8)((start_blk >> 24) & 0xff);
3100                 cdb[5]  = (U8)((start_blk >> 32) & 0xff);
3101                 cdb[4]  = (U8)((start_blk >> 40) & 0xff);
3102                 cdb[3]  = (U8)((start_blk >> 48) & 0xff);
3103                 cdb[2]  = (U8)((start_blk >> 56) & 0xff);
3104                 break;
3105         }
3106 
3107         *cdb_len_ptr = cdb_len;
3108 }
3109 
3110 
3111 static int
3112 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3113 {
3114         MR_FW_RAID_MAP_ALL *ld_map;
3115 
3116         if (!mrsas_tbolt_get_ld_map_info(instance)) {
3117 
3118                 ld_map = instance->ld_map[instance->map_id & 1];
3119 
3120                 con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d",
3121                     ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3122 
3123                 if (MR_ValidateMapInfo(
3124                     instance->ld_map[instance->map_id & 1],
3125                     instance->load_balance_info)) {
3126                         con_log(CL_ANN,
3127                             (CE_CONT, "MR_ValidateMapInfo success"));
3128 
3129                         instance->fast_path_io = 1;
3130                         con_log(CL_ANN,
3131                             (CE_NOTE, "instance->fast_path_io %d",
3132                             instance->fast_path_io));
3133 
3134                         return (DDI_SUCCESS);
3135                 }
3136 
3137         }
3138 
3139         instance->fast_path_io = 0;
3140         dev_err(instance->dip, CE_WARN, "MR_ValidateMapInfo failed");
3141         con_log(CL_ANN, (CE_NOTE,
3142             "instance->fast_path_io %d", instance->fast_path_io));
3143 
3144         return (DDI_FAILURE);
3145 }
3146 
3147 /*
3148  * Marks HBA as bad. This will be called either when an
3149  * IO packet times out even after 3 FW resets
3150  * or FW is found to be fault even after 3 continuous resets.
3151  */
3152 
3153 void
3154 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3155 {
3156         dev_err(instance->dip, CE_NOTE, "TBOLT Kill adapter called");
3157 
3158         if (instance->deadadapter == 1)
3159                 return;
3160 
3161         con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3162             "Writing to doorbell with MFI_STOP_ADP "));
3163         mutex_enter(&instance->ocr_flags_mtx);
3164         instance->deadadapter = 1;
3165         mutex_exit(&instance->ocr_flags_mtx);
3166         instance->func_ptr->disable_intr(instance);
3167         WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3168         /* Flush */
3169         (void) RD_RESERVED0_REGISTER(instance);
3170 
3171         (void) mrsas_print_pending_cmds(instance);
3172         (void) mrsas_complete_pending_cmds(instance);
3173 }
3174 
3175 void
3176 mrsas_reset_reply_desc(struct mrsas_instance *instance)
3177 {
3178         int i;
3179         MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3180         instance->reply_read_index = 0;
3181 
3182         /* initializing reply address to 0xFFFFFFFF */
3183         reply_desc = instance->reply_frame_pool;
3184 
3185         for (i = 0; i < instance->reply_q_depth; i++) {
3186                 reply_desc->Words = (uint64_t)~0;
3187                 reply_desc++;
3188         }
3189 }
3190 
3191 int
3192 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3193 {
3194         uint32_t status = 0x00;
3195         uint32_t retry = 0;
3196         uint32_t cur_abs_reg_val;
3197         uint32_t fw_state;
3198         uint32_t abs_state;
3199         uint32_t i;
3200 
3201         if (instance->deadadapter == 1) {
3202                 dev_err(instance->dip, CE_WARN, "mrsas_tbolt_reset_ppc: "
3203                     "no more resets as HBA has been marked dead");
3204                 return (DDI_FAILURE);
3205         }
3206 
3207         mutex_enter(&instance->ocr_flags_mtx);
3208         instance->adapterresetinprogress = 1;
3209         mutex_exit(&instance->ocr_flags_mtx);
3210 
3211         instance->func_ptr->disable_intr(instance);
3212 
3213         /* Add delay in order to complete the ioctl & io cmds in-flight */
3214         for (i = 0; i < 3000; i++)
3215                 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3216 
3217         instance->reply_read_index = 0;
3218 
3219 retry_reset:
3220         con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: Resetting TBOLT"));
3221 
3222         /* Flush */
3223         WR_TBOLT_IB_WRITE_SEQ(0x0, instance);
3224         /* Write magic number */
3225         WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3226         WR_TBOLT_IB_WRITE_SEQ(0x4, instance);
3227         WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3228         WR_TBOLT_IB_WRITE_SEQ(0x2, instance);
3229         WR_TBOLT_IB_WRITE_SEQ(0x7, instance);
3230         WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3231 
3232         con_log(CL_ANN1, (CE_NOTE,
3233             "mrsas_tbolt_reset_ppc: magic number written "
3234             "to write sequence register"));
3235 
3236         /* Wait for the diag write enable (DRWE) bit to be set */
3237         retry = 0;
3238         status = RD_TBOLT_HOST_DIAG(instance);
3239         while (!(status & DIAG_WRITE_ENABLE)) {
3240                 delay(100 * drv_usectohz(MILLISEC));
3241                 status = RD_TBOLT_HOST_DIAG(instance);
3242                 if (retry++ >= 100) {
3243                         dev_err(instance->dip, CE_WARN,
3244                             "%s(): timeout waiting for DRWE.", __func__);
3245                         return (DDI_FAILURE);
3246                 }
3247         }
3248 
3249         /* Send reset command */
3250         WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3251         delay(100 * drv_usectohz(MILLISEC));
3252 
3253         /* Wait for reset bit to clear */
3254         retry = 0;
3255         status = RD_TBOLT_HOST_DIAG(instance);
3256         while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3257                 delay(100 * drv_usectohz(MILLISEC));
3258                 status = RD_TBOLT_HOST_DIAG(instance);
3259                 if (retry++ == 100) {
3260                         /* Dont call kill adapter here */
3261                         /* RESET BIT ADAPTER is cleared by firmare */
3262                         /* mrsas_tbolt_kill_adapter(instance); */
3263                         dev_err(instance->dip, CE_WARN,
3264                             "%s(): RESET FAILED; return failure!!!", __func__);
3265                         return (DDI_FAILURE);
3266                 }
3267         }
3268 
3269         con_log(CL_ANN,
3270             (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3271 
3272         abs_state = instance->func_ptr->read_fw_status_reg(instance);
3273         retry = 0;
3274         while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3275                 delay(100 * drv_usectohz(MILLISEC));
3276                 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3277         }
3278         if (abs_state <= MFI_STATE_FW_INIT) {
3279                 dev_err(instance->dip, CE_WARN,
3280                     "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3281                     "state = 0x%x, RETRY RESET.", abs_state);
3282                 goto retry_reset;
3283         }
3284 
3285         /* Mark HBA as bad, if FW is fault after 3 continuous resets */
3286         if (mfi_state_transition_to_ready(instance) ||
3287             mrsas_debug_tbolt_fw_faults_after_ocr == 1) {
3288                 cur_abs_reg_val =
3289                     instance->func_ptr->read_fw_status_reg(instance);
3290                 fw_state        = cur_abs_reg_val & MFI_STATE_MASK;
3291 
3292                 con_log(CL_ANN1, (CE_NOTE,
3293                     "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3294                     "FW state = 0x%x", fw_state));
3295                 if (mrsas_debug_tbolt_fw_faults_after_ocr == 1)
3296                         fw_state = MFI_STATE_FAULT;
3297 
3298                 con_log(CL_ANN,
3299                     (CE_NOTE,  "mrsas_tbolt_reset_ppc : FW is not ready "
3300                     "FW state = 0x%x", fw_state));
3301 
3302                 if (fw_state == MFI_STATE_FAULT) {
3303                         /* increment the count */
3304                         instance->fw_fault_count_after_ocr++;
3305                         if (instance->fw_fault_count_after_ocr
3306                             < MAX_FW_RESET_COUNT) {
3307                                 dev_err(instance->dip, CE_WARN,
3308                                     "mrsas_tbolt_reset_ppc: "
3309                                     "FW is in fault after OCR count %d "
3310                                     "Retry Reset",
3311                                     instance->fw_fault_count_after_ocr);
3312                                 goto retry_reset;
3313 
3314                         } else {
3315                                 dev_err(instance->dip, CE_WARN, "%s:"
3316                                     "Max Reset Count exceeded >%d"
3317                                     "Mark HBA as bad, KILL adapter",
3318                                     __func__, MAX_FW_RESET_COUNT);
3319 
3320                                 mrsas_tbolt_kill_adapter(instance);
3321                                 return (DDI_FAILURE);
3322                         }
3323                 }
3324         }
3325 
3326         /* reset the counter as FW is up after OCR */
3327         instance->fw_fault_count_after_ocr = 0;
3328 
3329         mrsas_reset_reply_desc(instance);
3330 
3331         abs_state = mrsas_issue_init_mpi2(instance);
3332         if (abs_state == (uint32_t)DDI_FAILURE) {
3333                 dev_err(instance->dip, CE_WARN, "mrsas_tbolt_reset_ppc: "
3334                     "INIT failed Retrying Reset");
3335                 goto retry_reset;
3336         }
3337 
3338         (void) mrsas_print_pending_cmds(instance);
3339 
3340         instance->func_ptr->enable_intr(instance);
3341         instance->fw_outstanding = 0;
3342 
3343         (void) mrsas_issue_pending_cmds(instance);
3344 
3345         instance->aen_cmd->retry_count_for_ocr = 0;
3346         instance->aen_cmd->drv_pkt_time = 0;
3347 
3348         instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3349 
3350         mutex_enter(&instance->ocr_flags_mtx);
3351         instance->adapterresetinprogress = 0;
3352         mutex_exit(&instance->ocr_flags_mtx);
3353 
3354         dev_err(instance->dip, CE_NOTE, "TBOLT adapter reset successfully");
3355 
3356         return (DDI_SUCCESS);
3357 }
3358 
3359 /*
3360  * mrsas_sync_map_info -        Returns FW's ld_map structure
3361  * @instance:                           Adapter soft state
3362  *
3363  * Issues an internal command (DCMD) to get the FW's controller PD
3364  * list structure.  This information is mainly used to find out SYSTEM
3365  * supported by the FW.
3366  */
3367 
3368 static int
3369 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3370 {
3371         int                     ret = 0, i;
3372         struct mrsas_cmd        *cmd = NULL;
3373         struct mrsas_dcmd_frame *dcmd;
3374         uint32_t size_sync_info, num_lds;
3375         LD_TARGET_SYNC *ci = NULL;
3376         MR_FW_RAID_MAP_ALL *map;
3377         MR_LD_RAID  *raid;
3378         LD_TARGET_SYNC *ld_sync;
3379         uint32_t ci_h = 0;
3380         uint32_t size_map_info;
3381 
3382         cmd = get_raid_msg_pkt(instance);
3383 
3384         if (cmd == NULL) {
3385                 dev_err(instance->dip, CE_WARN,
3386                     "Failed to get a cmd from free-pool in "
3387                     "mrsas_tbolt_sync_map_info().");
3388                 return (DDI_FAILURE);
3389         }
3390 
3391         /* Clear the frame buffer and assign back the context id */
3392         bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3393         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3394             cmd->index);
3395         bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3396 
3397 
3398         map = instance->ld_map[instance->map_id & 1];
3399 
3400         num_lds = map->raidMap.ldCount;
3401 
3402         dcmd = &cmd->frame->dcmd;
3403 
3404         size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3405 
3406         con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x",
3407             size_sync_info, num_lds));
3408 
3409         ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3410 
3411         bzero(ci, sizeof (MR_FW_RAID_MAP_ALL));
3412         ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3413 
3414         bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
3415 
3416         ld_sync = (LD_TARGET_SYNC *)ci;
3417 
3418         for (i = 0; i < num_lds; i++, ld_sync++) {
3419                 raid = MR_LdRaidGet(i, map);
3420 
3421                 con_log(CL_ANN1,
3422                     (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
3423                     i, raid->seqNum, raid->flags.ldSyncRequired));
3424 
3425                 ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3426 
3427                 con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x",
3428                     i, ld_sync->ldTargetId));
3429 
3430                 ld_sync->seqNum = raid->seqNum;
3431         }
3432 
3433 
3434         size_map_info = sizeof (MR_FW_RAID_MAP) +
3435             (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3436 
3437         dcmd->cmd = MFI_CMD_OP_DCMD;
3438         dcmd->cmd_status = 0xFF;
3439         dcmd->sge_count = 1;
3440         dcmd->flags = MFI_FRAME_DIR_WRITE;
3441         dcmd->timeout = 0;
3442         dcmd->pad_0 = 0;
3443         dcmd->data_xfer_len = size_map_info;
3444         ASSERT(num_lds <= 255);
3445         dcmd->mbox.b[0] = (U8)num_lds;
3446         dcmd->mbox.b[1] = 1; /* Pend */
3447         dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3448         dcmd->sgl.sge32[0].phys_addr = ci_h;
3449         dcmd->sgl.sge32[0].length = size_map_info;
3450 
3451 
3452         instance->map_update_cmd = cmd;
3453         mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3454 
3455         instance->func_ptr->issue_cmd(cmd, instance);
3456 
3457         instance->unroll.syncCmd = 1;
3458         con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID));
3459 
3460         return (ret);
3461 }
3462 
3463 /*
3464  * abort_syncmap_cmd
3465  */
3466 int
3467 abort_syncmap_cmd(struct mrsas_instance *instance,
3468     struct mrsas_cmd *cmd_to_abort)
3469 {
3470         int     ret = 0;
3471 
3472         struct mrsas_cmd                *cmd;
3473         struct mrsas_abort_frame        *abort_fr;
3474 
3475         con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3476 
3477         cmd = get_raid_msg_mfi_pkt(instance);
3478 
3479         if (!cmd) {
3480                 dev_err(instance->dip, CE_WARN,
3481                     "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3482                 return (DDI_FAILURE);
3483         }
3484         /* Clear the frame buffer and assign back the context id */
3485         bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3486         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3487             cmd->index);
3488 
3489         abort_fr = &cmd->frame->abort;
3490 
3491         /* prepare and issue the abort frame */
3492         ddi_put8(cmd->frame_dma_obj.acc_handle,
3493             &abort_fr->cmd, MFI_CMD_OP_ABORT);
3494         ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3495             MFI_CMD_STATUS_SYNC_MODE);
3496         ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3497         ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3498             cmd_to_abort->index);
3499         ddi_put32(cmd->frame_dma_obj.acc_handle,
3500             &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3501         ddi_put32(cmd->frame_dma_obj.acc_handle,
3502             &abort_fr->abort_mfi_phys_addr_hi, 0);
3503 
3504         cmd->frame_count = 1;
3505 
3506         mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3507 
3508         if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3509                 con_log(CL_ANN1, (CE_WARN,
3510                     "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3511                 ret = -1;
3512         } else {
3513                 ret = 0;
3514         }
3515 
3516         return_raid_msg_mfi_pkt(instance, cmd);
3517 
3518         atomic_add_16(&instance->fw_outstanding, (-1));
3519 
3520         return (ret);
3521 }
3522 
3523 /*
3524  * Even though these functions were originally intended for 2208 only, it
3525  * turns out they're useful for "Skinny" support as well.  In a perfect world,
3526  * these two functions would be either in mr_sas.c, or in their own new source
3527  * file.  Since this driver needs some cleanup anyway, keep this portion in
3528  * mind as well.
3529  */
3530 
3531 int
3532 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3533     uint8_t lun, dev_info_t **ldip)
3534 {
3535         struct scsi_device *sd;
3536         dev_info_t *child;
3537         int rval, dtype;
3538         struct mrsas_tbolt_pd_info *pds = NULL;
3539 
3540         con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3541             tgt, lun));
3542 
3543         if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3544                 if (ldip) {
3545                         *ldip = child;
3546                 }
3547                 if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3548                         rval = mrsas_service_evt(instance, tgt, 1,
3549                             MRSAS_EVT_UNCONFIG_TGT, NULL);
3550                         con_log(CL_ANN1, (CE_WARN,
3551                             "mr_sas:DELETING STALE ENTRY  rval = %d "
3552                             "tgt id = %d", rval, tgt));
3553                         return (NDI_FAILURE);
3554                 }
3555                 return (NDI_SUCCESS);
3556         }
3557 
3558         pds = (struct mrsas_tbolt_pd_info *)
3559             kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3560         mrsas_tbolt_get_pd_info(instance, pds, tgt);
3561         dtype = pds->scsiDevType;
3562 
3563         /* Check for disk/enclosure */
3564         if (dtype == DTYPE_DIRECT || dtype == DTYPE_ESI) {
3565                 if ((dtype == DTYPE_DIRECT) &&
3566                     (LE_16(pds->fwState) != PD_SYSTEM)) {
3567                         kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3568                         return (NDI_FAILURE);
3569                 }
3570                 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3571                 sd->sd_address.a_hba_tran = instance->tran;
3572                 sd->sd_address.a_target = (uint16_t)tgt;
3573                 sd->sd_address.a_lun = (uint8_t)lun;
3574 
3575                 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3576                         rval = mrsas_config_scsi_device(instance, sd, ldip);
3577                         dev_err(instance->dip, CE_CONT,
3578                             "?Phys. device found: tgt %d dtype %d: %s\n",
3579                             tgt, dtype, sd->sd_inq->inq_vid);
3580                 } else {
3581                         rval = NDI_FAILURE;
3582                         con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found "
3583                             "scsi_hba_probe Failed: tgt %d dtype %d: %s",
3584                             tgt, dtype, sd->sd_inq->inq_vid));
3585                 }
3586 
3587                 /* sd_unprobe is blank now. Free buffer manually */
3588                 if (sd->sd_inq) {
3589                         kmem_free(sd->sd_inq, SUN_INQSIZE);
3590                         sd->sd_inq = (struct scsi_inquiry *)NULL;
3591                 }
3592                 kmem_free(sd, sizeof (struct scsi_device));
3593         } else {
3594                 con_log(CL_ANN1, (CE_NOTE,
3595                     "?Device not supported: tgt %d lun %d dtype %d",
3596                     tgt, lun, dtype));
3597                 rval = NDI_FAILURE;
3598         }
3599 
3600         kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3601         con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3602             rval));
3603         return (rval);
3604 }
3605 
3606 static void
3607 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance,
3608     struct mrsas_tbolt_pd_info *pds, int tgt)
3609 {
3610         struct mrsas_cmd        *cmd;
3611         struct mrsas_dcmd_frame *dcmd;
3612         dma_obj_t               dcmd_dma_obj;
3613 
3614         ASSERT(instance->tbolt || instance->skinny);
3615 
3616         if (instance->tbolt)
3617                 cmd = get_raid_msg_pkt(instance);
3618         else
3619                 cmd = mrsas_get_mfi_pkt(instance);
3620 
3621         if (!cmd) {
3622                 con_log(CL_ANN1,
3623                     (CE_WARN, "Failed to get a cmd for get pd info"));
3624                 return;
3625         }
3626 
3627         /* Clear the frame buffer and assign back the context id */
3628         bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3629         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3630             cmd->index);
3631 
3632 
3633         dcmd = &cmd->frame->dcmd;
3634         dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3635         dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3636         dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3637         dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3638         dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3639         dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3640 
3641         (void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3642             DDI_STRUCTURE_LE_ACC);
3643         bzero(dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info));
3644         bzero(dcmd->mbox.b, 12);
3645         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3646         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3647         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3648         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3649             MFI_FRAME_DIR_READ);
3650         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3651         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3652             sizeof (struct mrsas_tbolt_pd_info));
3653         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3654             MR_DCMD_PD_GET_INFO);
3655         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3656         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3657             sizeof (struct mrsas_tbolt_pd_info));
3658         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3659             dcmd_dma_obj.dma_cookie[0].dmac_address);
3660 
3661         cmd->sync_cmd = MRSAS_TRUE;
3662         cmd->frame_count = 1;
3663 
3664         if (instance->tbolt)
3665                 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3666 
3667         instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3668 
3669         ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds,
3670             (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info),
3671             DDI_DEV_AUTOINCR);
3672         (void) mrsas_free_dma_obj(instance, dcmd_dma_obj);
3673 
3674         if (instance->tbolt)
3675                 return_raid_msg_pkt(instance, cmd);
3676         else
3677                 mrsas_return_mfi_pkt(instance, cmd);
3678 }