1 /*
   2  * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
   3  * i.e. Thunderbolt and Invader
   4  *
   5  * Solaris MegaRAID device driver for SAS2.0 controllers
   6  * Copyright (c) 2008-2012, LSI Logic Corporation.
   7  * All rights reserved.
   8  *
   9  * Version:
  10  * Author:
  11  *              Swaminathan K S
  12  *              Arun Chandrashekhar
  13  *              Manju R
  14  *              Rasheed
  15  *              Shakeel Bukhari
  16  */
  17 
  18 
  19 #include <stddef.h>
  20 #include <sys/types.h>
  21 #include <sys/file.h>
  22 #include <sys/atomic.h>
  23 #include <sys/scsi/scsi.h>
  24 #include <sys/byteorder.h>
  25 #include "ld_pd_map.h"
  26 #include "mr_sas.h"
  27 #include "fusion.h"
  28 
  29 
  30 // Pre-TB command size and TB command size.
  31 #define MR_COMMAND_SIZE (64*20) // 1280 bytes
  32 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
  33 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
  34 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
  35 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *in_info);
  36 extern ddi_dma_attr_t mrsas_generic_dma_attr;
  37 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
  38 extern struct ddi_device_acc_attr endian_attr;
  39 extern int      debug_level_g;
  40 extern unsigned int     enable_fp;
  41 volatile int dump_io_wait_time = 90;
  42 extern void
  43 io_timeout_checker(void *arg);
  44 extern int
  45 mfi_state_transition_to_ready(struct mrsas_instance *instance);
  46 extern volatile int  debug_timeout_g;
  47 extern int      mrsas_issue_pending_cmds(struct mrsas_instance *);
  48 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
  49 extern void     push_pending_mfi_pkt(struct mrsas_instance *,
  50                         struct mrsas_cmd *);
  51 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
  52             MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
  53 
  54 static volatile int  debug_tbolt_fw_faults_after_ocr_g  = 0;
  55 
  56 /*
  57  * destroy_mfi_mpi_frame_pool
  58  */
  59 void
  60 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
  61 {
  62         int     i;
  63 
  64         struct mrsas_cmd        *cmd;
  65 
  66         /* return all mfi frames to pool */
  67         for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
  68                 cmd = instance->cmd_list[i];
  69                 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
  70                         (void) mrsas_free_dma_obj(instance,
  71                             cmd->frame_dma_obj);
  72                 cmd->frame_dma_obj_status  = DMA_OBJ_FREED;
  73         }
  74 }
  75 
  76 /*
  77  * destroy_mpi2_frame_pool
  78  */
  79 void
  80 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
  81 {
  82 
  83         if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
  84                 (void) mrsas_free_dma_obj(instance,
  85                     instance->mpi2_frame_pool_dma_obj);
  86                 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
  87         }
  88 }
  89 
  90 
  91 /*
  92  * mrsas_tbolt_free_additional_dma_buffer
  93  */
  94 void
  95 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
  96 {
  97         int i;
  98          if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
  99                 (void) mrsas_free_dma_obj(instance,
 100                     instance->mfi_internal_dma_obj);
 101                 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
 102         }
 103         if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
 104                 (void) mrsas_free_dma_obj(instance,
 105                     instance->mfi_evt_detail_obj);
 106                 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
 107         }
 108 
 109         for (i = 0; i < 2; i++) {
 110                 if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
 111                         (void) mrsas_free_dma_obj(instance,
 112                         instance->ld_map_obj[i]);
 113                         instance->ld_map_obj[i].status = DMA_OBJ_FREED;
 114                 }
 115         }
 116 }
 117 
 118 
 119 /*
 120  * free_req_desc_pool
 121  */
 122 void
 123 free_req_rep_desc_pool(struct mrsas_instance *instance)
 124 {
 125         if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
 126                 (void) mrsas_free_dma_obj(instance,
 127                     instance->request_desc_dma_obj);
 128                 instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
 129         }
 130 
 131         if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
 132                 (void) mrsas_free_dma_obj(instance,
 133                     instance->reply_desc_dma_obj);
 134                 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
 135         }
 136 
 137 
 138 }
 139 
 140 
 141 /*
 142  * ThunderBolt(TB) Request Message Frame Pool
 143  */
 144 int
 145 create_mpi2_frame_pool(struct mrsas_instance *instance)
 146 {
 147         int             i = 0;
 148         int             cookie_cnt;
 149         uint16_t        max_cmd;
 150         uint32_t        sgl_sz;
 151         uint32_t        raid_msg_size;
 152         uint32_t        total_size;
 153         uint32_t        offset;
 154         uint32_t        io_req_base_phys;
 155         uint8_t         *io_req_base;
 156         struct mrsas_cmd        *cmd;
 157 
 158         max_cmd = instance->max_fw_cmds;
 159 
 160         sgl_sz          = 1024;
 161         raid_msg_size   = MRSAS_THUNDERBOLT_MSG_SIZE;
 162 
 163         // Allocating additional 256 bytes to accomodate SMID 0.
 164         total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
 165             (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
 166 
 167         con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
 168             "max_cmd %x ", max_cmd));
 169 
 170         con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
 171             "request message frame pool size %x", total_size));
 172 
 173         /*
 174          * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
 175          * and then split the memory to 1024 commands. Each command should be
 176          * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
 177          * within it. Further refer the "alloc_req_rep_desc" function where
 178          * we allocate request/reply descriptors queues for a clue.
 179          */
 180 
 181         instance->mpi2_frame_pool_dma_obj.size = total_size;
 182         instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
 183         instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
 184             0xFFFFFFFFU;
 185         instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
 186             0xFFFFFFFFU;
 187         instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
 188         instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
 189 
 190         if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
 191             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
 192                 cmn_err(CE_WARN,
 193                     "mr_sas: could not alloc mpi2 frame pool");
 194                 return (DDI_FAILURE);
 195         }
 196 
 197         bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
 198         instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
 199 
 200         instance->io_request_frames =
 201             (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
 202         instance->io_request_frames_phy =
 203             (uint32_t)
 204             instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
 205 
 206         con_log(CL_DLEVEL3, (CE_NOTE,
 207             "io_request_frames 0x%x",
 208             instance->io_request_frames));
 209 
 210         con_log(CL_DLEVEL3, (CE_NOTE,
 211             "io_request_frames_phy 0x%x",
 212             instance->io_request_frames_phy));
 213 
 214         io_req_base = (uint8_t *)instance->io_request_frames +
 215             MRSAS_THUNDERBOLT_MSG_SIZE;
 216         io_req_base_phys = instance->io_request_frames_phy +
 217             MRSAS_THUNDERBOLT_MSG_SIZE;
 218 
 219         con_log(CL_DLEVEL3, (CE_NOTE,
 220             "io req_base_phys 0x%x", io_req_base_phys));
 221 
 222         for (i = 0; i < max_cmd; i++) {
 223                 cmd = instance->cmd_list[i];
 224 
 225                 offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
 226 
 227                 cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
 228                     ((uint8_t *)io_req_base + offset);
 229                 cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
 230 
 231                 cmd->sgl = (Mpi2SGEIOUnion_t *)
 232                     ((uint8_t *)io_req_base +
 233                     (max_cmd * raid_msg_size) +  i * sgl_sz);
 234 
 235                 cmd->sgl_phys_addr =
 236                     (io_req_base_phys +
 237                     (max_cmd * raid_msg_size) + i * sgl_sz);
 238 
 239                 cmd->sense1 = (uint8_t *)
 240                     ((uint8_t *)io_req_base +
 241                     (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
 242                     (i * SENSE_LENGTH));
 243 
 244                 cmd->sense_phys_addr1 =
 245                     (io_req_base_phys +
 246                     (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
 247                     (i * SENSE_LENGTH));
 248 
 249 
 250                 cmd->SMID = i+1;
 251 
 252                 con_log(CL_DLEVEL3, (CE_NOTE,
 253                     "Frame Pool Addr [%x]0x%x",
 254                     cmd->index, cmd->scsi_io_request));
 255 
 256                 con_log(CL_DLEVEL3, (CE_NOTE,
 257                     "Frame Pool Phys Addr [%x]0x%x",
 258                     cmd->index, cmd->scsi_io_request_phys_addr));
 259 
 260                 con_log(CL_DLEVEL3, (CE_NOTE,
 261                     "Sense Addr [%x]0x%x",
 262                     cmd->index, cmd->sense1));
 263 
 264                 con_log(CL_DLEVEL3, (CE_NOTE,
 265                     "Sense Addr Phys [%x]0x%x",
 266                     cmd->index, cmd->sense_phys_addr1));
 267 
 268 
 269                 con_log(CL_DLEVEL3, (CE_NOTE,
 270                     "Sgl bufffers [%x]0x%x",
 271                     cmd->index, cmd->sgl));
 272 
 273                 con_log(CL_DLEVEL3, (CE_NOTE,
 274                     "Sgl bufffers phys  [%x]0x%x",
 275                     cmd->index, cmd->sgl_phys_addr));
 276         }
 277 
 278         return (DDI_SUCCESS);
 279 
 280 }
 281 
 282 
 283 /*
 284  * alloc_additional_dma_buffer for AEN
 285  */
 286 int
 287 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
 288 {
 289         uint32_t        internal_buf_size = PAGESIZE*2; 
 290         int i;
 291 
 292         /* Initialize buffer status as free */
 293         instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
 294         instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
 295         instance->ld_map_obj[0].status = DMA_OBJ_FREED;
 296         instance->ld_map_obj[1].status = DMA_OBJ_FREED;
 297 
 298 
 299         instance->mfi_internal_dma_obj.size = internal_buf_size;
 300         instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
 301         instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
 302         instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
 303         instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
 304 
 305         if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
 306                         (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
 307                 cmn_err(CE_WARN,
 308                         "mr_sas: could not alloc reply queue");
 309                 return (DDI_FAILURE);
 310         }
 311 
 312         bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
 313 
 314         instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;  
 315         instance->internal_buf = (caddr_t)(((unsigned long)
 316         instance->mfi_internal_dma_obj.buffer));
 317         instance->internal_buf_dmac_add =
 318         instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address; 
 319         instance->internal_buf_size = internal_buf_size;
 320          
 321         /* allocate evt_detail */
 322         instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
 323         instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
 324         instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
 325         instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
 326         instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
 327         instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
 328 
 329         if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
 330             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
 331                 cmn_err(CE_WARN,
 332                     "mrsas_tbolt_alloc_additional_dma_buffer: "
 333                     "could not allocate data transfer buffer.");
 334                 goto fail_tbolt_additional_buff;
 335         }
 336 
 337         bzero(instance->mfi_evt_detail_obj.buffer,
 338             sizeof (struct mrsas_evt_detail));
 339 
 340         instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
 341 
 342         instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
 343             (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
 344 
 345         for (i = 0; i < 2; i++) {
 346                 /* allocate the data transfer buffer */
 347                 instance->ld_map_obj[i].size = instance->size_map_info;
 348                 instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
 349                 instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
 350                 instance->ld_map_obj[i].dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
 351                 instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
 352                 instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
 353 
 354                 if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
 355                         (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
 356                         cmn_err(CE_WARN,
 357                         "could not allocate data transfer buffer.");
 358                         goto fail_tbolt_additional_buff;
 359                 }
 360 
 361                 instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
 362 
 363                 (void) memset(instance->ld_map_obj[i].buffer, 0,
 364                 instance->size_map_info);
 365 
 366                 instance->ld_map[i] = (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
 367                 instance->ld_map_phy[i] =
 368                         (uint32_t)instance->ld_map_obj[i].dma_cookie[0].dmac_address;
 369 
 370                 con_log(CL_DLEVEL3, (CE_NOTE,
 371                         "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
 372 
 373                 con_log(CL_DLEVEL3, (CE_NOTE,
 374                         "size_map_info 0x%x", instance->size_map_info));
 375 
 376         }
 377 
 378         return (DDI_SUCCESS);
 379 
 380 fail_tbolt_additional_buff:
 381         mrsas_tbolt_free_additional_dma_buffer(instance);
 382 
 383         return (DDI_FAILURE);
 384 }
 385 
 386 MRSAS_REQUEST_DESCRIPTOR_UNION *
 387 mr_sas_get_request_descriptor(struct mrsas_instance *instance,
 388     uint16_t index, struct mrsas_cmd *cmd)
 389 {
 390         MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
 391 
 392         if (index > instance->max_fw_cmds) {
 393                 con_log(CL_ANN1, (CE_NOTE,
 394                     "Invalid SMID 0x%x request for descriptor", index));
 395                 con_log(CL_ANN1, (CE_NOTE,
 396                     "max_fw_cmds : 0x%x\n", instance->max_fw_cmds));
 397                 return (NULL);
 398         }
 399 
 400         req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
 401             ((char *)instance->request_message_pool +
 402             (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
 403 
 404         con_log(CL_ANN1, (CE_NOTE,
 405             "request descriptor : 0x%08lx\n", (unsigned long)req_desc));
 406 
 407         con_log(CL_ANN1, (CE_NOTE,
 408             "request descriptor base phy : 0x%08lx\n",
 409             (unsigned long)instance->request_message_pool_phy));
 410 
 411         return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
 412 }
 413 
 414 
 415 /*
 416  * Allocate Request and Reply  Queue Descriptors.
 417  */
 418 int
 419 alloc_req_rep_desc(struct mrsas_instance *instance)
 420 {
 421         uint32_t        request_q_sz, reply_q_sz;
 422         int             i, max_request_q_sz, max_reply_q_sz;
 423         uint64_t        request_desc;
 424         MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
 425         uint64_t        *reply_ptr;
 426 
 427         /*
 428          * ThunderBolt(TB) There's no longer producer consumer mechanism.
 429          * Once we have an interrupt we are supposed to scan through the list of
 430          * reply descriptors and process them accordingly. We would be needing
 431          * to allocate memory for 1024 reply descriptors
 432          */
 433 
 434         /* Allocate Reply Descriptors */
 435         con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x\n",
 436             sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
 437 
 438         // reply queue size should be multiple of 16
 439         max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
 440 
 441         reply_q_sz = 8 * max_reply_q_sz;
 442 
 443 
 444         con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x\n",
 445             sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
 446 
 447         instance->reply_desc_dma_obj.size = reply_q_sz;
 448         instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
 449         instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
 450         instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
 451         instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
 452         instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
 453 
 454         if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
 455                         (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
 456                 cmn_err(CE_WARN,
 457                     "mr_sas: could not alloc reply queue");
 458                 return (DDI_FAILURE);
 459         }
 460 
 461         bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
 462         instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
 463 
 464         // virtual address of  reply queue
 465         instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
 466             instance->reply_desc_dma_obj.buffer);
 467 
 468         instance->reply_q_depth = max_reply_q_sz;
 469 
 470         con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
 471             instance->reply_q_depth));
 472 
 473         con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%x",
 474             instance->reply_frame_pool));
 475 
 476         /* initializing reply address to 0xFFFFFFFF */
 477         reply_desc = instance->reply_frame_pool;
 478 
 479         for (i = 0; i < instance->reply_q_depth; i++) {
 480                 reply_desc->Words = (uint64_t)~0;
 481                 reply_desc++;
 482         }
 483 
 484 
 485         instance->reply_frame_pool_phy =
 486             (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
 487 
 488         con_log(CL_ANN1, (CE_NOTE,
 489             "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
 490 
 491 
 492         instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
 493             reply_q_sz);
 494 
 495         con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
 496             instance->reply_pool_limit_phy));
 497 
 498 
 499         con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x\n",
 500             sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
 501 
 502         /* Allocate Request Descriptors */
 503         con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x\n",
 504             sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
 505 
 506         request_q_sz = 8 *
 507             (instance->max_fw_cmds);
 508 
 509         instance->request_desc_dma_obj.size = request_q_sz;
 510         instance->request_desc_dma_obj.dma_attr      = mrsas_generic_dma_attr;
 511         instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
 512         instance->request_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
 513         instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen      = 1;
 514         instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
 515 
 516         if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
 517             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
 518                 cmn_err(CE_WARN,
 519                     "mr_sas: could not alloc request queue desc");
 520                 goto fail_undo_reply_queue;
 521         }
 522 
 523         bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
 524         instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
 525 
 526         /* virtual address of  request queue desc */
 527         instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
 528             (instance->request_desc_dma_obj.buffer);
 529 
 530         instance->request_message_pool_phy =
 531             (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
 532 
 533         max_request_q_sz = instance->max_fw_cmds;
 534 
 535         return (DDI_SUCCESS);
 536 
 537 fail_undo_reply_queue:
 538         if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
 539                 (void) mrsas_free_dma_obj(instance,
 540                     instance->reply_desc_dma_obj);
 541                 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
 542         }
 543 
 544         return (DDI_FAILURE);
 545 }
 546 
 547 /*
 548  * mrsas_alloc_cmd_pool_tbolt
 549  * TODO: merge tbolt-specific codee into mrsas_alloc_cmd_pool() to have single routine
 550  */
 551 int
 552 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
 553 {
 554         int             i;
 555         int             count;
 556         uint32_t        max_cmd;
 557         uint32_t        reserve_cmd;
 558         size_t          sz;
 559 
 560         struct mrsas_cmd        *cmd;
 561 
 562         max_cmd = instance->max_fw_cmds;
 563         con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
 564             "max_cmd %x", max_cmd));
 565 
 566 
 567         sz = sizeof (struct mrsas_cmd *) * max_cmd;
 568 
 569         /*
 570          * instance->cmd_list is an array of struct mrsas_cmd pointers.
 571          * Allocate the dynamic array first and then allocate individual
 572          * commands.
 573          */
 574         instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
 575         if (instance->cmd_list == NULL) {
 576                 con_log(CL_NONE, (CE_WARN,
 577                     "Failed to allocate memory for cmd_list"));
 578                 return (DDI_FAILURE);
 579         }
 580 
 581         /* create a frame pool and assign one frame to each cmd */
 582         for (count = 0; count < max_cmd; count++) {
 583                 instance->cmd_list[count] = kmem_zalloc(sizeof (struct mrsas_cmd),
 584                     KM_SLEEP);
 585                 if (instance->cmd_list[count] == NULL) {
 586                         con_log(CL_NONE, (CE_WARN,
 587                                 "Failed to allocate memory for mrsas_cmd"));
 588                         goto mrsas_undo_cmds;
 589                 }
 590         }
 591 
 592         /* add all the commands to command pool */
 593 
 594         INIT_LIST_HEAD(&instance->cmd_pool_list);
 595         INIT_LIST_HEAD(&instance->cmd_pend_list);
 596         INIT_LIST_HEAD(&instance->cmd_app_pool_list);
 597 
 598         reserve_cmd     = MRSAS_APP_RESERVED_CMDS;
 599 
 600         for (i = 1; i < reserve_cmd; i++) {    //cmd index 0 reservered for IOC INIT
 601                 cmd             = instance->cmd_list[i];
 602                 cmd->index   = i;
 603                 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
 604         }
 605         
 606 
 607         for (i = reserve_cmd; i < max_cmd; i++) {
 608                 cmd             = instance->cmd_list[i];
 609                 cmd->index   = i;
 610                 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
 611         }
 612 
 613         return (DDI_SUCCESS);
 614 
 615 mrsas_undo_cmds:
 616         if (count > 0) {
 617                 /* free each cmd */
 618                 for (i = 0; i < count; i++) {
 619                         if (instance->cmd_list[i] != NULL)
 620                                 kmem_free(instance->cmd_list[i],sizeof (struct mrsas_cmd));
 621                         instance->cmd_list[i] = NULL;
 622                 }
 623         }
 624 
 625 mrsas_undo_cmd_list:
 626         if (instance->cmd_list != NULL)
 627                 kmem_free(instance->cmd_list,sz);
 628         instance->cmd_list = NULL;
 629 
 630         return (DDI_FAILURE);
 631 }
 632 
 633 
 634 /*
 635  * free_space_for_mpi2
 636  */
 637 void
 638 free_space_for_mpi2(struct mrsas_instance *instance)
 639 {
 640         /* already freed */
 641         if (instance->cmd_list == NULL) {
 642                 return;
 643         }
 644 
 645         /* First free the additional DMA buffer */
 646         mrsas_tbolt_free_additional_dma_buffer(instance);
 647 
 648         /* Free the request/reply descriptor pool */
 649         free_req_rep_desc_pool(instance);
 650 
 651         /*  Free the MPI message pool */
 652         destroy_mpi2_frame_pool(instance);
 653 
 654         /* Free the MFI frame pool */
 655         destroy_mfi_frame_pool(instance);
 656 
 657         /* Free all the commands in the cmd_list */
 658         /* Free the cmd_list buffer itself */
 659         mrsas_free_cmd_pool(instance);
 660 }
 661 
 662 
 663 /*
 664  * ThunderBolt(TB) memory allocations for commands/messages/frames.
 665  */
 666 int
 667 alloc_space_for_mpi2(struct mrsas_instance *instance)
 668 {
 669         /* Allocate command pool ( memory for cmd_list & individual commands )*/
 670         if (mrsas_alloc_cmd_pool_tbolt(instance)) {
 671                 cmn_err(CE_WARN, "Error creating cmd pool");
 672                 return (DDI_FAILURE); 
 673         }
 674 
 675         /* Initialize single reply size and Message size */
 676         instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
 677         instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
 678 
 679         instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
 680             (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
 681             sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
 682         instance->max_sge_in_chain = (MR_COMMAND_SIZE -
 683             MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
 684 
 685         /* Reduce SG count by 1 to take care of group cmds feature in FW */
 686         instance->max_num_sge = (instance->max_sge_in_main_msg +
 687             instance->max_sge_in_chain - 2);
 688         instance->chain_offset_mpt_msg =
 689             offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
 690         instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE -
 691             sizeof (MPI2_SGE_IO_UNION)) / 16;
 692         instance->reply_read_index = 0;
 693 
 694 
 695         /* Allocate Request and Reply descriptors Array */
 696         /* Make sure the buffer is aligned to 8 for req/rep  descriptor Pool */
 697         if (alloc_req_rep_desc(instance)) {
 698                 cmn_err(CE_WARN,
 699                         "Error, allocating memory for descripter-pool");
 700                 goto mpi2_undo_cmd_pool;
 701         }
 702         con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x",
 703             instance->request_message_pool_phy));
 704 
 705 
 706         /* Allocate MFI Frame pool - for MPI-MFI passthru commands */
 707         if (create_mfi_frame_pool(instance)) {
 708                 cmn_err(CE_WARN,
 709                         "Error, allocating memory for MFI frame-pool");
 710                 goto mpi2_undo_descripter_pool;
 711         }
 712 
 713 
 714         /* Allocate MPI2 Message pool */
 715         /*
 716          * Make sure the buffer is alligned to 256 for raid message packet
 717          * create a io request pool and assign one frame to each cmd
 718          */
 719 
 720         if (create_mpi2_frame_pool(instance)) {
 721                 cmn_err(CE_WARN,
 722                         "Error, allocating memory for MPI2 Message-pool");
 723                 goto mpi2_undo_mfi_frame_pool;
 724         }
 725 
 726 #ifdef DEBUG
 727         con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x",
 728             instance->max_sge_in_main_msg));
 729         con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x",
 730             instance->max_sge_in_chain));
 731         con_log(CL_ANN1, (CE_CONT,
 732             "[max_sge]0x%x", instance->max_num_sge));
 733         con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x",
 734             instance->chain_offset_mpt_msg));
 735         con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x",
 736             instance->chain_offset_io_req));
 737 #endif
 738 
 739 
 740         /* Allocate additional dma buffer */
 741         if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) {
 742                 cmn_err(CE_WARN,
 743                         "Error, allocating tbolt additional DMA buffer");
 744                 goto mpi2_undo_message_pool;
 745         }
 746 
 747         return (DDI_SUCCESS);
 748 
 749 mpi2_undo_message_pool:
 750         destroy_mpi2_frame_pool(instance);
 751 
 752 mpi2_undo_mfi_frame_pool:
 753         destroy_mfi_frame_pool(instance);
 754 
 755 mpi2_undo_descripter_pool:
 756         free_req_rep_desc_pool(instance);
 757 
 758 mpi2_undo_cmd_pool:
 759         mrsas_free_cmd_pool(instance);
 760 
 761         return (DDI_FAILURE);
 762 }
 763 
 764 
 765 /*
 766  * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
 767  */
 768 int
 769 mrsas_init_adapter_tbolt (struct mrsas_instance *instance)
 770 {
 771 
 772         /*
 773          * Reduce the max supported cmds by 1. This is to ensure that the
 774          * reply_q_sz (1 more than the max cmd that driver may send)
 775          * does not exceed max cmds that the FW can support
 776          */
 777 
 778         if (instance->max_fw_cmds > 1008) {
 779                 instance->max_fw_cmds = 1008;
 780                 instance->max_fw_cmds = instance->max_fw_cmds-1;
 781         }
 782 
 783         con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
 784                     " instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
 785 
 786 
 787         /* create a pool of commands */
 788         if ( alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
 789                 cmn_err(CE_WARN,
 790                     " alloc_space_for_mpi2() failed.");
 791 
 792                 return (DDI_FAILURE);
 793         }
 794 
 795         /* Send ioc init message */
 796         if ( mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
 797                 cmn_err(CE_WARN,
 798                     " mrsas_issue_init_mpi2() failed.");
 799 
 800                 goto fail_init_fusion;
 801         }
 802 
 803         instance->unroll.alloc_space_mpi2 = 1;
 804 
 805         con_log(CL_ANN, (CE_NOTE,
 806                     "mrsas_init_adapter_tbolt: SUCCESSFULL\n"));
 807 
 808         return (DDI_SUCCESS);
 809 
 810 fail_init_fusion:
 811 
 812 fail_undo_alloc_mpi2:
 813         free_space_for_mpi2(instance);
 814 
 815         return (DDI_FAILURE);
 816 }
 817 
 818 
 819 
 820 /*
 821  * init_mpi2
 822  */
 823 int
 824 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
 825 {
 826         dma_obj_t init2_dma_obj;
 827         int ret_val = DDI_SUCCESS;
 828 
 829         /* allocate DMA buffer for IOC INIT message */
 830         init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
 831         init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
 832         init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
 833         init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
 834         init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
 835         init2_dma_obj.dma_attr.dma_attr_align = 256;
 836 
 837         if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
 838             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
 839                 cmn_err(CE_WARN, "mr_sas_issue_init_mpi2 "
 840                     "could not allocate data transfer buffer.");
 841                 return (DDI_FAILURE);
 842         }
 843         (void) memset(init2_dma_obj.buffer, 2,
 844             sizeof (Mpi2IOCInitRequest_t));
 845 
 846         con_log(CL_ANN1, (CE_NOTE,
 847             "mrsas_issue_init_mpi2 _phys adr: %x \n",
 848             init2_dma_obj.dma_cookie[0].dmac_address));
 849 
 850 
 851         /* Initialize and send ioc init message */
 852         ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj,
 853             init2_dma_obj.acc_handle);
 854         if (ret_val == DDI_FAILURE) {
 855                 con_log(CL_ANN1, (CE_WARN,
 856                     "mrsas_issue_init_mpi2: Failed\n"));
 857                 goto fail_init_mpi2;
 858         }
 859 
 860         /* free IOC init DMA buffer */
 861         if (mrsas_free_dma_obj(instance, init2_dma_obj)
 862             != DDI_SUCCESS) {
 863                 con_log(CL_ANN1, (CE_WARN,
 864                     "mrsas_issue_init_mpi2: Free Failed\n"));
 865                 return (DDI_FAILURE);
 866         }
 867 
 868 
 869         /* Get/Check and sync ld_map info */
 870         instance->map_id = 0;
 871         if( mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS ) 
 872                 mrsas_tbolt_sync_map_info(instance);
 873         
 874         con_log(CL_ANN, (CE_NOTE,
 875                     "mrsas_issue_init_mpi2: SUCCESSFULL\n"));
 876 
 877         return (DDI_SUCCESS);
 878 
 879 fail_init_mpi2:
 880         mrsas_free_dma_obj(instance, init2_dma_obj);
 881 
 882         return (DDI_FAILURE);
 883 }
 884 
 885 int
 886 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj,
 887     ddi_acc_handle_t accessp)
 888 {
 889         int                             numbytes, i;
 890         int ret                         = DDI_SUCCESS;
 891         uint16_t                        flags;
 892         int                             status;
 893         timespec_t                      time;
 894         uint64_t                        mSec;
 895         uint32_t msecs                  = MFI_POLL_TIMEOUT_SECS * MILLISEC;
 896         struct mrsas_init_frame2        *mfiFrameInit2;
 897         struct mrsas_header             *frame_hdr;
 898         Mpi2IOCInitRequest_t            *init;
 899         struct mrsas_cmd                *cmd = NULL;
 900         struct mrsas_drv_ver            drv_ver_info;
 901         MRSAS_REQUEST_DESCRIPTOR_UNION  *req_desc;
 902 
 903 
 904         con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
 905 
 906 
 907 #ifdef DEBUG
 908         con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
 909             sizeof (*mfiFrameInit2)));
 910         con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", sizeof (*init)));
 911         con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
 912             sizeof (struct mrsas_init_frame2)));
 913         con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
 914             sizeof (Mpi2IOCInitRequest_t)));
 915 #endif
 916 
 917         init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
 918         numbytes = sizeof (*init);
 919         bzero(init, numbytes);
 920 
 921         ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
 922             MPI2_FUNCTION_IOC_INIT);
 923 
 924         ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
 925             MPI2_WHOINIT_HOST_DRIVER);
 926 
 927         /* set MsgVersion and HeaderVersion host driver was built with */
 928         ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
 929             MPI2_VERSION);
 930 
 931         ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
 932             MPI2_HEADER_VERSION);
 933 
 934         ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
 935             instance->raid_io_msg_size / 4);
 936 
 937         ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth,
 938             0);
 939 
 940         ddi_put16(mpi2_dma_obj->acc_handle,
 941             &init->ReplyDescriptorPostQueueDepth,
 942             instance->reply_q_depth);
 943         /*
 944          * These addresses are set using the DMA cookie addresses from when the
 945          * memory was allocated.  Sense buffer hi address should be 0.
 946          * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
 947          */
 948 
 949         ddi_put32(mpi2_dma_obj->acc_handle,
 950             &init->SenseBufferAddressHigh, 0);
 951 
 952         ddi_put64(mpi2_dma_obj->acc_handle,
 953             (uint64_t *)&init->SystemRequestFrameBaseAddress,
 954             instance->io_request_frames_phy);
 955 
 956         ddi_put64(mpi2_dma_obj->acc_handle,
 957             &init->ReplyDescriptorPostQueueAddress,
 958             instance->reply_frame_pool_phy);
 959 
 960         ddi_put64(mpi2_dma_obj->acc_handle,
 961             &init->ReplyFreeQueueAddress, 0);
 962 
 963         cmd = instance->cmd_list[0];
 964         if (cmd == NULL) {
 965                 return (DDI_FAILURE);
 966         }
 967         cmd->retry_count_for_ocr = 0;
 968         cmd->pkt = NULL;
 969         cmd->drv_pkt_time = 0;
 970 
 971         mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
 972         con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%x", mfiFrameInit2));
 973 
 974         frame_hdr = &cmd->frame->hdr;
 975 
 976         ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
 977             MFI_CMD_STATUS_POLL_MODE);
 978 
 979         flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
 980 
 981         flags   |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
 982 
 983         ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
 984 
 985         con_log(CL_ANN, (CE_CONT,
 986             "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
 987 
 988         // Init the MFI Header
 989         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
 990             &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
 991 
 992         con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
 993 
 994         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
 995             &mfiFrameInit2->cmd_status,
 996             MFI_STAT_INVALID_STATUS);
 997 
 998         con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
 999 
1000         ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1001             &mfiFrameInit2->queue_info_new_phys_addr_lo,
1002             mpi2_dma_obj->dma_cookie[0].dmac_address);
1003 
1004         ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1005             &mfiFrameInit2->data_xfer_len,
1006             sizeof (Mpi2IOCInitRequest_t));
1007 
1008         con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1009             init->ReplyDescriptorPostQueueAddress));
1010 
1011         /* fill driver version information*/    
1012         fill_up_drv_ver(&drv_ver_info);
1013         
1014         /* allocate the driver version data transfer buffer */
1015         instance->drv_ver_dma_obj.size = sizeof(drv_ver_info.drv_ver);
1016         instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1017         instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1018         instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1019         instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1020         instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1021 
1022         if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1023             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1024                 cmn_err(CE_WARN,
1025                     "fusion init: Could not allocate driver version buffer.");
1026                 return (DDI_FAILURE);
1027         }
1028         /* copy driver version to dma  buffer*/
1029         (void) memset(instance->drv_ver_dma_obj.buffer, 0,sizeof(drv_ver_info.drv_ver));
1030         ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1031                 (uint8_t *)drv_ver_info.drv_ver,
1032                 (uint8_t *)instance->drv_ver_dma_obj.buffer, 
1033                 sizeof(drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);        
1034 
1035         /*send driver version physical address to firmware*/
1036         ddi_put64(cmd->frame_dma_obj.acc_handle,
1037             &mfiFrameInit2->driverversion, instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1038 
1039         con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1040             mfiFrameInit2->queue_info_new_phys_addr_lo,
1041             sizeof (Mpi2IOCInitRequest_t)));
1042 
1043         con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1044 
1045         con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1046             cmd->scsi_io_request_phys_addr, sizeof (struct mrsas_init_frame2)));
1047 
1048         /* disable interrupts before sending INIT2 frame */
1049         instance->func_ptr->disable_intr(instance);
1050 
1051         req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
1052             instance->request_message_pool;
1053         req_desc->Words = cmd->scsi_io_request_phys_addr;
1054         req_desc->MFAIo.RequestFlags =
1055             (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1056 
1057         cmd->request_desc = req_desc;
1058 
1059         /* issue the init frame */
1060         instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1061 
1062         con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1063         con_log(CL_ANN1, (CE_CONT, "[cmd  Status= %x] ",
1064             frame_hdr->cmd_status));
1065 
1066         if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1067             &mfiFrameInit2->cmd_status) == 0) {
1068                 con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1069                 ret = DDI_SUCCESS;
1070         } else {
1071                 con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1072                 mrsas_dump_reply_desc(instance);
1073                 goto fail_ioc_init;
1074         }
1075 
1076         mrsas_dump_reply_desc(instance);
1077 
1078         instance->unroll.verBuff = 1;
1079 
1080         con_log(CL_ANN, (CE_NOTE,
1081                     "mrsas_tbolt_ioc_init: SUCCESSFULL\n"));
1082 
1083 
1084         return (DDI_SUCCESS);
1085 
1086 
1087 fail_ioc_init:
1088 
1089         mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1090 
1091         return (DDI_FAILURE);
1092 }
1093 
1094 int wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1095 {
1096         int i;
1097         uint32_t wait_time = dump_io_wait_time;
1098         for (i = 0; i < wait_time; i++) {
1099                 /*
1100                  * Check For Outstanding poll Commands
1101                  * except ldsync command and aen command
1102                  */
1103                 if (instance->fw_outstanding <= 2) {
1104                         break;
1105                 }
1106                 drv_usecwait(10*MILLISEC);
1107                 /* complete commands from reply queue */
1108                 (void) mr_sas_tbolt_process_outstanding_cmd(instance);
1109         }
1110         if (instance->fw_outstanding > 2) {
1111                 return (1);
1112         }
1113         return (0);
1114 }
1115 /*
1116  * scsi_pkt handling
1117  *
1118  * Visible to the external world via the transport structure.
1119  */
1120 
1121 int
1122 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1123 {
1124         struct mrsas_instance   *instance = ADDR2MR(ap);
1125         struct scsa_cmd         *acmd = PKT2CMD(pkt);
1126         struct mrsas_cmd        *cmd = NULL;
1127         int                     rval, i;
1128         uchar_t                 cmd_done = 0;
1129         Mpi2RaidSCSIIORequest_t *scsi_raid_io;
1130         uint32_t                msecs = 120 * MILLISEC;
1131 
1132         con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1133         if (instance->deadadapter == 1) {
1134                 cmn_err(CE_WARN,
1135                     "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1136                     "for IO, as the HBA doesnt take any more IOs");
1137                 if (pkt) {
1138                         pkt->pkt_reason              = CMD_DEV_GONE;
1139                         pkt->pkt_statistics  = STAT_DISCON;
1140                 }
1141                 return (TRAN_FATAL_ERROR);
1142         }
1143         if (instance->adapterresetinprogress) {
1144                 con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1145                     "returning mfi_pkt and setting TRAN_BUSY\n"));
1146                 return (TRAN_BUSY);
1147         }
1148         rval = mrsas_tbolt_prepare_pkt(acmd);
1149 
1150         cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1151 
1152         /*
1153          * Check if the command is already completed by the mrsas_build_cmd()
1154          * routine. In which case the busy_flag would be clear and scb will be
1155          * NULL and appropriate reason provided in pkt_reason field
1156          */
1157         if (cmd_done) {
1158                 pkt->pkt_reason = CMD_CMPLT;
1159                 pkt->pkt_scbp[0] = STATUS_GOOD;
1160                 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1161                     | STATE_SENT_CMD;
1162                 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1163                         (*pkt->pkt_comp)(pkt);
1164                 }
1165 
1166                 return (TRAN_ACCEPT);
1167         }
1168 
1169         if (cmd == NULL) {
1170                 return (TRAN_BUSY);
1171         }
1172 
1173 
1174         if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1175                 if (instance->fw_outstanding > instance->max_fw_cmds) {
1176                         cmn_err(CE_WARN,
1177                             "Command Queue Full... Returning BUSY \n");
1178                         return_raid_msg_pkt(instance, cmd);
1179                         return (TRAN_BUSY);
1180                 }
1181 
1182                 /* Synchronize the Cmd frame for the controller */
1183                 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1184                     DDI_DMA_SYNC_FORDEV);
1185 
1186                 con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1187                     "cmd->index:0x%x SMID %0x%x\n", pkt->pkt_cdbp[0], cmd->index, cmd->SMID));
1188 
1189                 instance->func_ptr->issue_cmd(cmd, instance);
1190 
1191                 return (TRAN_ACCEPT);
1192 
1193         } else {
1194                 instance->func_ptr->issue_cmd(cmd, instance);     
1195                 (void) wait_for_outstanding_poll_io(instance);
1196                 return (TRAN_ACCEPT);
1197         }
1198 }
1199 
1200 /*
1201  * prepare the pkt:
1202  * the pkt may have been resubmitted or just reused so
1203  * initialize some fields and do some checks.
1204  */
1205 int
1206 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1207 {
1208         struct scsi_pkt *pkt = CMD2PKT(acmd);
1209 
1210 
1211         /*
1212          * Reinitialize some fields that need it; the packet may
1213          * have been resubmitted
1214          */
1215         pkt->pkt_reason = CMD_CMPLT;
1216         pkt->pkt_state = 0;
1217         pkt->pkt_statistics = 0;
1218         pkt->pkt_resid = 0;
1219 
1220         /*
1221          * zero status byte.
1222          */
1223         *(pkt->pkt_scbp) = 0;
1224 
1225         return (0);
1226 }
1227 
1228 
1229 int
1230 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1231     struct scsa_cmd *acmd,
1232     struct mrsas_cmd *cmd,
1233     Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1234     uint32_t *datalen)
1235 {
1236         uint32_t                MaxSGEs;
1237         int                     sg_to_process;
1238         uint32_t                i, j, SGEdwords = 0;
1239         uint32_t                numElements, endElement;
1240         Mpi25IeeeSgeChain64_t   *ieeeChainElement = NULL;
1241         Mpi25IeeeSgeChain64_t   *scsi_raid_io_sgl_ieee = NULL;
1242         uint32_t                SGLFlags = 0;
1243 
1244         con_log(CL_ANN1, (CE_NOTE,
1245             "chkpnt: Building Chained SGL :%d", __LINE__));
1246 
1247         /* Calulate SGE size in number of Words(32bit) */
1248         /* Clear the datalen before updating it. */
1249         *datalen = 0;
1250 
1251         SGEdwords = sizeof (Mpi25IeeeSgeChain64_t) / 4;
1252 
1253         MaxSGEs = instance->max_sge_in_main_msg;
1254 
1255         ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1256             &scsi_raid_io->SGLFlags,
1257             MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1258 
1259         // set data transfer flag.
1260         if (acmd->cmd_flags & CFLAG_DMASEND) {
1261                 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1262                     &scsi_raid_io->Control,
1263                     MPI2_SCSIIO_CONTROL_WRITE);
1264         } else {
1265                 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1266                     &scsi_raid_io->Control, MPI2_SCSIIO_CONTROL_READ);
1267         }
1268 
1269         
1270         numElements = acmd->cmd_cookiecnt;
1271 
1272         con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1273 
1274         if (numElements > instance->max_num_sge) {
1275                 con_log(CL_ANN, (CE_NOTE,
1276                     "[Max SGE Count Exceeded]:%x", numElements));
1277                 return (numElements);   
1278         }
1279 
1280         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1281             &scsi_raid_io->RaidContext.numSGE, (uint8_t)numElements);
1282 
1283         /* set end element in main message frame */
1284         endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1285 
1286         /* prepare the scatter-gather list for the firmware */
1287         scsi_raid_io_sgl_ieee =
1288             (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1289 
1290         if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1291                 Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1292                 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1293                 
1294                 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1295                     &sgl_ptr_end->Flags, 0);
1296         }
1297 
1298         for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1299                 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1300                     &scsi_raid_io_sgl_ieee->Address,
1301                     acmd->cmd_dmacookies[i].dmac_laddress);
1302 
1303                 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1304                     &scsi_raid_io_sgl_ieee->Length,
1305                     acmd->cmd_dmacookies[i].dmac_size);
1306 
1307                 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1308                     &scsi_raid_io_sgl_ieee->Flags, 0);
1309         
1310                 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1311                         if (i == (numElements - 1))
1312                                 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1313                                         &scsi_raid_io_sgl_ieee->Flags, IEEE_SGE_FLAGS_END_OF_LIST);
1314                 }
1315 
1316                 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1317 
1318 #ifdef DEBUG
1319                 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]:%llx",
1320                     scsi_raid_io_sgl_ieee->Address));
1321                 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1322                     scsi_raid_io_sgl_ieee->Length));
1323                 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1324                     scsi_raid_io_sgl_ieee->Flags));
1325 #endif
1326 
1327         }
1328 
1329         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1330             &scsi_raid_io->ChainOffset, 0);
1331 
1332         /* check if chained SGL required */
1333         if (i < numElements) {
1334 
1335                 con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1336         
1337                 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1338                         uint16_t        ioFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1339                                                       &scsi_raid_io->IoFlags);
1340 
1341                         if ((ioFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1342                                 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1343                                         &scsi_raid_io->ChainOffset, (U8)instance->chain_offset_io_req);
1344                         else
1345                                 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1346                                         &scsi_raid_io->ChainOffset, 0);
1347                 }
1348                 else {
1349                         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1350                                 &scsi_raid_io->ChainOffset, (U8)instance->chain_offset_io_req);
1351                 }
1352 
1353                 /* prepare physical chain element */
1354                 ieeeChainElement  = scsi_raid_io_sgl_ieee;
1355 
1356                 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1357                     &ieeeChainElement->NextChainOffset, 0);
1358 
1359                 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) 
1360                         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1361                                 &ieeeChainElement->Flags, IEEE_SGE_FLAGS_CHAIN_ELEMENT );
1362                 else
1363                         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1364                                 &ieeeChainElement->Flags,
1365                                 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1366                    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1367 
1368                 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1369                     &ieeeChainElement->Length,
1370                     (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1371 
1372                 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1373                     &ieeeChainElement->Address,
1374                     (U64)cmd->sgl_phys_addr);
1375 
1376                 sg_to_process = numElements - i;
1377 
1378                 con_log(CL_ANN1, (CE_NOTE,
1379                     "[Additional SGE Count]:%x", endElement));
1380 
1381                 /* point to the chained SGL buffer */
1382                 scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1383 
1384                 /* build rest of the SGL in chained buffer */
1385                 for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1386                         con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1387 
1388                         ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1389                             &scsi_raid_io_sgl_ieee->Address,
1390                             acmd->cmd_dmacookies[i].dmac_laddress);
1391 
1392                         ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1393                             &scsi_raid_io_sgl_ieee->Length,
1394                             acmd->cmd_dmacookies[i].dmac_size);
1395 
1396                         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1397                             &scsi_raid_io_sgl_ieee->Flags, 0);
1398 
1399                         if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1400                                 if (i == (numElements - 1))
1401                                         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1402                                                 &scsi_raid_io_sgl_ieee->Flags, IEEE_SGE_FLAGS_END_OF_LIST);
1403                         }
1404 
1405                         *datalen += acmd->cmd_dmacookies[i].dmac_size;
1406 
1407 #if DEBUG
1408                         con_log(CL_DLEVEL1, (CE_NOTE,
1409                             "[SGL Address]:%llx",
1410                             scsi_raid_io_sgl_ieee->Address));
1411                         con_log(CL_DLEVEL1, (CE_NOTE,
1412                             "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1413                         con_log(CL_DLEVEL1, (CE_NOTE,
1414                             "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1415 #endif
1416 
1417                         i++;
1418                 }
1419         }
1420 
1421         return (0);
1422 } /*end of BuildScatterGather */
1423 
1424 
1425 /*
1426  * build_cmd
1427  */
1428 struct mrsas_cmd *
1429 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1430     struct scsi_pkt *pkt, uchar_t *cmd_done)
1431 {
1432         uint8_t         fp_possible = 0;
1433         uint16_t        flags = 0;
1434         uint32_t        i, index;
1435         uint32_t        context;
1436         uint32_t        sge_bytes;
1437         uint8_t         ChainOffsetValue;
1438         uint32_t        SGLFlags;
1439         uint32_t        lba_count=0;
1440         uint32_t        start_lba_hi=0; 
1441         uint32_t        start_lba_lo=0;
1442         ddi_acc_handle_t acc_handle;
1443         struct mrsas_cmd                *cmd = NULL;
1444         struct scsa_cmd                 *acmd = PKT2CMD(pkt);
1445         MRSAS_REQUEST_DESCRIPTOR_UNION  *ReqDescUnion;
1446         Mpi2RaidSCSIIORequest_t         *scsi_raid_io;
1447         Mpi25IeeeSgeChain64_t           *scsi_raid_io_sgl_ieee;
1448         uint32_t                        datalen;
1449         struct IO_REQUEST_INFO io_info;
1450         MR_FW_RAID_MAP_ALL *local_map_ptr;
1451         MR_LD_RAID *raid;
1452         U32     ld;
1453         uint16_t pd_cmd_cdblen;
1454 
1455         con_log(CL_DLEVEL1, (CE_NOTE,
1456             "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1457 
1458         /* find out if this is logical or physical drive command.  */
1459         acmd->islogical = MRDRV_IS_LOGICAL(ap);
1460         acmd->device_id = MAP_DEVICE_ID(instance, ap);
1461 
1462         *cmd_done = 0;
1463 
1464         /* get the command packet */
1465         if (!(cmd = get_raid_msg_pkt(instance))) {
1466                 return (NULL);
1467         }
1468 
1469         index = cmd->index;
1470         ReqDescUnion =  mr_sas_get_request_descriptor(instance, index, cmd);
1471         ReqDescUnion->Words = 0;
1472         ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1473         ReqDescUnion->SCSIIO.RequestFlags =
1474             (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1475             MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1476 
1477 
1478         cmd->request_desc = ReqDescUnion;
1479         cmd->pkt = pkt;
1480         cmd->cmd = acmd;
1481 
1482         /* lets get the command directions */
1483         if (acmd->cmd_flags & CFLAG_DMASEND) {
1484                 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1485                         (void) ddi_dma_sync(acmd->cmd_dmahandle,
1486                             acmd->cmd_dma_offset, acmd->cmd_dma_len,
1487                             DDI_DMA_SYNC_FORDEV);
1488                 }
1489         } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1490                 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1491                         (void) ddi_dma_sync(acmd->cmd_dmahandle,
1492                             acmd->cmd_dma_offset, acmd->cmd_dma_len,
1493                             DDI_DMA_SYNC_FORCPU);
1494                 }
1495         } else {
1496                 con_log(CL_ANN, (CE_NOTE, "NO DMA\n"));
1497         }
1498 
1499 
1500         // get SCSI_IO raid message frame pointer
1501         scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1502 
1503         /* zero out SCSI_IO raid message frame */
1504         memset(scsi_raid_io, 0, sizeof(Mpi2RaidSCSIIORequest_t));
1505 
1506         /*Set the ldTargetId set by BuildRaidContext() */
1507         ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1508                     &scsi_raid_io->RaidContext.ldTargetId,
1509                     acmd->device_id);
1510 
1511         /*  Copy CDB to scsi_io_request message frame */
1512         ddi_rep_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1513                     (uint8_t *)pkt->pkt_cdbp,
1514                     (uint8_t *)scsi_raid_io->CDB.CDB32,
1515                     acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1516 
1517         /* 
1518         * Just the CDB length,rest of the Flags are zero
1519         * This will be modified later.
1520         */
1521         ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1522                     &scsi_raid_io->IoFlags,
1523                     acmd->cmd_cdblen);     
1524 
1525         pd_cmd_cdblen = acmd->cmd_cdblen;
1526 
1527         switch (pkt->pkt_cdbp[0]) {
1528         case SCMD_READ:
1529         case SCMD_WRITE:
1530         case SCMD_READ_G1:
1531         case SCMD_WRITE_G1:
1532         case SCMD_READ_G4:
1533         case SCMD_WRITE_G4:
1534         case SCMD_READ_G5:
1535         case SCMD_WRITE_G5:
1536 
1537         if (acmd->islogical) {
1538                 /* Initialize sense Information */
1539                 if (cmd->sense1 == NULL) {
1540                         con_log(CL_ANN, (CE_NOTE,
1541                                 "tbolt_build_cmd: Sense buffer ptr NULL \n"));
1542                 }
1543                 bzero(cmd->sense1, SENSE_LENGTH);
1544                 con_log(CL_DLEVEL2, (CE_NOTE,
1545                     "tbolt_build_cmd CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1546 
1547                 if (acmd->cmd_cdblen == CDB_GROUP0) {                        /* 6-byte cdb */
1548                         lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1549                         start_lba_lo =
1550                             ((uint32_t)(pkt->pkt_cdbp[3]) |
1551                             ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1552                             ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F) << 16));
1553                 } else if (acmd->cmd_cdblen == CDB_GROUP1) {         /* 10-byte cdb */
1554                         lba_count =
1555                             (((uint16_t)(pkt->pkt_cdbp[8])) |
1556                             ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1557 
1558                         start_lba_lo =
1559                             (((uint32_t)(pkt->pkt_cdbp[5])) |
1560                             ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1561                             ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1562                             ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1563 
1564                 } else if (acmd->cmd_cdblen == CDB_GROUP5) {         /* 12-byte cdb */
1565                         lba_count = (
1566                             ((uint32_t)(pkt->pkt_cdbp[9])) |
1567                             ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1568                             ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1569                             ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1570 
1571                         start_lba_lo =
1572                             (((uint32_t)(pkt->pkt_cdbp[5])) |
1573                             ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1574                             ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1575                             ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1576 
1577                 } else if (acmd->cmd_cdblen == CDB_GROUP4) {         /* 16-byte cdb */
1578                         lba_count = (
1579                             ((uint32_t)(pkt->pkt_cdbp[13])) |
1580                             ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1581                             ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1582                             ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1583 
1584                         start_lba_lo = (
1585                             ((uint32_t)(pkt->pkt_cdbp[9])) |
1586                             ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1587                             ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1588                             ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1589 
1590                         start_lba_hi = (
1591                             ((uint32_t)(pkt->pkt_cdbp[5])) |
1592                             ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1593                             ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1594                             ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1595                 }
1596 
1597                 if (instance->tbolt && 
1598                                 ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer) )
1599                         cmn_err(CE_WARN," IO SECTOR COUNT exceeds controller limit 0x%x sectors\n", lba_count);
1600 
1601                 memset(&io_info, 0, sizeof (struct IO_REQUEST_INFO));
1602                 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo;
1603                 io_info.numBlocks = lba_count;
1604                 io_info.ldTgtId = acmd->device_id;
1605 
1606                 if (acmd->cmd_flags & CFLAG_DMASEND)
1607                         io_info.isRead = 0;
1608                 else
1609                         io_info.isRead = 1;
1610 
1611                 
1612                 /*Aquire SYNC MAP UPDATE lock */
1613                 mutex_enter(&instance->sync_map_mtx);
1614 
1615                 local_map_ptr = instance->ld_map[(instance->map_id & 1)];
1616 
1617                 if  ( (MR_TargetIdToLdGet(acmd->device_id, local_map_ptr) >= MAX_LOGICAL_DRIVES) || !instance->fast_path_io ){
1618                         cmn_err(CE_NOTE,
1619                                 "Fast Path NOT Possible, targetId >= MAX_LOGICAL_DRIVES || !instance->fast_path_io\n");
1620                         fp_possible = 0;
1621                         /* Set Regionlock flags to BYPASS
1622                         io_request->RaidContext.regLockFlags  = 0; */
1623                         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1624                                     &scsi_raid_io->RaidContext.regLockFlags, 0);
1625                 } else {
1626                         if (MR_BuildRaidContext(instance, &io_info,
1627                                         &scsi_raid_io->RaidContext, local_map_ptr))
1628                                 fp_possible = io_info.fpOkForIo;
1629                 }
1630 
1631                 if (!enable_fp) {
1632                         fp_possible = 0;  
1633                 }
1634                 con_log(CL_ANN1, (CE_NOTE,
1635                                 "enable_fp %d  instance->fast_path_io %d fp_possible %d \n",
1636                                 enable_fp, instance->fast_path_io, fp_possible));
1637 
1638                 if (fp_possible) {
1639 
1640                         /* Check for DIF enabled LD */  
1641                         if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1642                                 /* Prepare 32 Byte CDB for DIF capable Disk */
1643                                 mrsas_tbolt_prepare_cdb(instance,
1644                                     scsi_raid_io->CDB.CDB32,
1645                                     &io_info,
1646                                     scsi_raid_io,
1647                                     start_lba_lo);
1648                         } else {
1649                                 mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1650                                    (uint8_t *)&pd_cmd_cdblen, io_info.pdBlock, io_info.numBlocks, 0);
1651                                 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1652                                                 &scsi_raid_io->IoFlags,
1653                                                 pd_cmd_cdblen); 
1654                         }
1655 
1656                         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1657                             &scsi_raid_io->Function,
1658                             MPI2_FUNCTION_SCSI_IO_REQUEST);
1659 
1660                         ReqDescUnion->SCSIIO.RequestFlags =
1661                             (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1662                             MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1663 
1664                         if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1665                                 uint8_t regLockFlags = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1666                                                                 &scsi_raid_io->RaidContext.regLockFlags);
1667                                 uint16_t IoFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1668                                                                 &scsi_raid_io->IoFlags);
1669 
1670                                 if (regLockFlags == REGION_TYPE_UNUSED)
1671                                         ReqDescUnion->SCSIIO.RequestFlags = 
1672                                                 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1673 
1674                                 IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1675                                 regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE);
1676 
1677                                 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1678                                                         &scsi_raid_io->ChainOffset, 0);
1679                                 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1680                                                     &scsi_raid_io->RaidContext.nsegType, ((0x01 << MPI2_NSEG_FLAGS_SHIFT) | MPI2_TYPE_CUDA));
1681                                 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1682                                                     &scsi_raid_io->RaidContext.regLockFlags, regLockFlags);
1683                                 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1684                                                     &scsi_raid_io->IoFlags, IoFlags);
1685                         }
1686 
1687                         if ((instance->load_balance_info[acmd->device_id].loadBalanceFlag) && (io_info.isRead)) {
1688                                 io_info.devHandle = get_updated_dev_handle(&instance->load_balance_info[acmd->device_id], &io_info);
1689                                 cmd->load_balance_flag |= MEGASAS_LOAD_BALANCE_FLAG;
1690                         } else
1691                                 cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
1692 
1693                         ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle; 
1694                         ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1695                             &scsi_raid_io->DevHandle,
1696                             io_info.devHandle);
1697 
1698                 } else {
1699                         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1700                             &scsi_raid_io->Function,
1701                             MPI2_FUNCTION_LD_IO_REQUEST);
1702 
1703                         ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1704                             &scsi_raid_io->DevHandle, acmd->device_id);
1705 
1706                         ReqDescUnion->SCSIIO.RequestFlags =
1707                             (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1708                             MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1709 
1710                         ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1711                                             &scsi_raid_io->RaidContext.timeoutValue, local_map_ptr->raidMap.fpPdIoTimeoutSec);
1712 
1713                         if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1714                                 uint8_t regLockFlags = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1715                                                                         &scsi_raid_io->RaidContext.regLockFlags);
1716 
1717                                 if (regLockFlags == REGION_TYPE_UNUSED)
1718                                         ReqDescUnion->SCSIIO.RequestFlags = 
1719                                                 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1720 
1721                                 regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | MR_RL_FLAGS_SEQ_NUM_ENABLE);
1722 
1723                                 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1724                                                     &scsi_raid_io->RaidContext.nsegType, ((0x01 << MPI2_NSEG_FLAGS_SHIFT) | MPI2_TYPE_CUDA));
1725                                 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1726                                                     &scsi_raid_io->RaidContext.regLockFlags, regLockFlags);
1727                         }
1728         
1729                 } /* Not FP */
1730 
1731                 /*Release SYNC MAP UPDATE lock */
1732                 mutex_exit(&instance->sync_map_mtx);
1733 
1734                 
1735                 /* Set sense buffer physical address/length in scsi_io_request.*/
1736                 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1737                     &scsi_raid_io->SenseBufferLowAddress,
1738                     cmd->sense_phys_addr1);
1739                 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1740                     &scsi_raid_io->SenseBufferLength,
1741                     SENSE_LENGTH); 
1742                         
1743                 /* Construct SGL*/                      
1744                 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1745                     &scsi_raid_io->SGLOffset0,
1746                     offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1747 
1748                 mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1749                     scsi_raid_io, &datalen);
1750 
1751                 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1752                     &scsi_raid_io->DataLength, datalen);
1753 
1754                 break;
1755 
1756         }
1757         else {
1758 #ifndef PDSUPPORT       /* if PDSUPPORT, skip break and fall through */
1759                 break;
1760 #endif
1761         }
1762         /* fall through For all non-rd/wr cmds */
1763         default:
1764                 switch (pkt->pkt_cdbp[0]) {
1765                 case 0x35: { // SCMD_SYNCHRONIZE_CACHE
1766                         return_raid_msg_pkt(instance, cmd);
1767                         *cmd_done = 1;
1768                         return (NULL);
1769                 }
1770 
1771                 case SCMD_MODE_SENSE:
1772                 case SCMD_MODE_SENSE_G1: {
1773                         union scsi_cdb  *cdbp;
1774                         uint16_t        page_code;
1775 
1776                         cdbp = (void *)pkt->pkt_cdbp;
1777                         page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1778                         switch (page_code) {
1779                         case 0x3:
1780                         case 0x4:
1781                                 (void) mrsas_mode_sense_build(pkt);
1782                                 return_raid_msg_pkt(instance, cmd);
1783                                 *cmd_done = 1;
1784                                 return (NULL);
1785                         }
1786                         break;
1787                 }
1788 
1789                 default: {
1790                         /* Here we need to handle PASSTHRU for
1791                            Logical Devices. Like Inquiry etc.*/
1792 
1793                         if(!(acmd->islogical)) {
1794 
1795                                 /* Aquire SYNC MAP UPDATE lock */
1796                                 mutex_enter(&instance->sync_map_mtx);
1797 
1798                                 local_map_ptr = instance->ld_map[(instance->map_id & 1)];     
1799                         
1800                                 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1801                                     &scsi_raid_io->Function, MPI2_FUNCTION_SCSI_IO_REQUEST);
1802                         
1803                                 ReqDescUnion->SCSIIO.RequestFlags =
1804                                         (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1805                                                 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1806 
1807                                 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1808                                         &scsi_raid_io->DevHandle,
1809                                                 local_map_ptr->raidMap.devHndlInfo[acmd->device_id].curDevHdl);
1810                 
1811 
1812                                 /*Set regLockFlasgs to REGION_TYPE_BYPASS */
1813                                 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1814                                         &scsi_raid_io->RaidContext.regLockFlags, 0);
1815                                 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1816                                         &scsi_raid_io->RaidContext.regLockRowLBA, 0);
1817                                 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1818                                         &scsi_raid_io->RaidContext.regLockLength, 0);
1819                                 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->RaidContext.RAIDFlags,
1820                                         MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1821                                 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1822                                     &scsi_raid_io->RaidContext.timeoutValue, local_map_ptr->raidMap.fpPdIoTimeoutSec);
1823                                 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1824                                         &scsi_raid_io->RaidContext.ldTargetId, acmd->device_id);
1825                                 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1826                                         &scsi_raid_io->LUN[1], acmd->lun); 
1827 
1828                                 /* Release SYNC MAP UPDATE lock */
1829                                 mutex_exit(&instance->sync_map_mtx);
1830                         
1831                         } else {
1832                                 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1833                                                 &scsi_raid_io->Function, MPI2_FUNCTION_LD_IO_REQUEST);
1834                                 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1835                                                 &scsi_raid_io->LUN[1], acmd->lun);
1836                                 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1837                                                 &scsi_raid_io->DevHandle, acmd->device_id);
1838                                 ReqDescUnion->SCSIIO.RequestFlags = 
1839                                         (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1840                         }
1841 
1842                         /* Set sense buffer physical address/length in scsi_io_request.*/
1843                         ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1844                             &scsi_raid_io->SenseBufferLowAddress,
1845                             cmd->sense_phys_addr1);
1846                         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1847                             &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1848 
1849                         /* Construct SGL*/                      
1850                         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1851                             &scsi_raid_io->SGLOffset0,
1852                             offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1853 
1854                         mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1855                             scsi_raid_io, &datalen);
1856 
1857                         ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1858                             &scsi_raid_io->DataLength, datalen);
1859 
1860 
1861                         con_log(CL_ANN, (CE_CONT,
1862                             "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1863                             pkt->pkt_cdbp[0], acmd->device_id));
1864                         con_log(CL_DLEVEL1, (CE_CONT,
1865                             "data length = %x\n",
1866                             scsi_raid_io->DataLength));
1867                         con_log(CL_DLEVEL1, (CE_CONT,
1868                             "cdb length = %x\n",
1869                             acmd->cmd_cdblen));
1870                         }
1871                         break;
1872                 }
1873 
1874         }
1875 #ifdef lint
1876         context = context;
1877 #endif
1878 
1879         return (cmd);
1880 }
1881 
1882 /*
1883  * mrsas_tbolt_tran_init_pkt - allocate & initialize a scsi_pkt structure
1884  * @ap:
1885  * @pkt:
1886  * @bp:
1887  * @cmdlen:
1888  * @statuslen:
1889  * @tgtlen:
1890  * @flags:
1891  * @callback:
1892  *
1893  * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1894  * structure and DMA resources for a target driver request. The
1895  * tran_init_pkt() entry point is called when the target driver calls the
1896  * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1897  * is a request to perform one or more of three possible services:
1898  *  - allocation and initialization of a scsi_pkt structure
1899  *  - allocation of DMA resources for data transfer
1900  *  - reallocation of DMA resources for the next portion of the data transfer
1901  */
1902 struct scsi_pkt *
1903 mrsas_tbolt_tran_init_pkt(struct scsi_address *ap,
1904         register struct scsi_pkt *pkt,
1905         struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1906         int flags, int (*callback)(), caddr_t arg)
1907 {
1908         struct scsa_cmd *acmd;
1909         struct mrsas_instance   *instance;
1910         struct scsi_pkt *new_pkt;
1911 
1912         instance = ADDR2MR(ap);
1913 
1914         /* step #1 : pkt allocation */
1915         if (pkt == NULL) {
1916                 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1917                     tgtlen, sizeof (struct scsa_cmd), callback, arg);
1918                 if (pkt == NULL) {
1919                         return (NULL);
1920                 }
1921 
1922                 acmd = PKT2CMD(pkt);
1923 
1924                 /*
1925                  * Initialize the new pkt - we redundantly initialize
1926                  * all the fields for illustrative purposes.
1927                  */
1928                 acmd->cmd_pkt                = pkt;
1929                 acmd->cmd_flags              = 0;
1930                 acmd->cmd_scblen     = statuslen;
1931                 acmd->cmd_cdblen     = cmdlen;
1932                 acmd->cmd_dmahandle  = NULL;
1933                 acmd->cmd_ncookies   = 0;
1934                 acmd->cmd_cookie     = 0;
1935                 acmd->cmd_cookiecnt  = 0;
1936                 acmd->cmd_nwin               = 0;
1937 
1938                 pkt->pkt_address     = *ap;
1939                 pkt->pkt_comp                = (void (*)())NULL;
1940                 pkt->pkt_flags               = 0;
1941                 pkt->pkt_time                = 0;
1942                 pkt->pkt_resid               = 0;
1943                 pkt->pkt_state               = 0;
1944                 pkt->pkt_statistics  = 0;
1945                 pkt->pkt_reason              = 0;
1946                 new_pkt                 = pkt;
1947         } else {
1948                 acmd = PKT2CMD(pkt);
1949                 new_pkt = NULL;
1950         }
1951 
1952         /* step #2 : dma allocation/move */
1953         if (bp && bp->b_bcount != 0) {
1954                 if (acmd->cmd_dmahandle == NULL) {
1955                         if (mrsas_dma_alloc(instance, pkt, bp, flags,
1956                             callback) == DDI_FAILURE) {
1957                                 if (new_pkt) {
1958                                         scsi_hba_pkt_free(ap, new_pkt);
1959                                 }
1960                                 return ((struct scsi_pkt *)NULL);
1961                         }
1962                 } else {
1963                         if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) {
1964                                 return ((struct scsi_pkt *)NULL);
1965                         }
1966                 }
1967         }
1968         return (pkt);
1969 }
1970 
1971 
1972 uint32_t
1973 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1974 {
1975         return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1976 }
1977 
1978 void
1979 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1980 {
1981         MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1982         atomic_add_16(&instance->fw_outstanding, 1);
1983 
1984         struct scsi_pkt *pkt;
1985 
1986         con_log(CL_ANN1, (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1987 
1988         con_log(CL_DLEVEL1, (CE_CONT,
1989             " [req desc Words] %llx \n", req_desc->Words));
1990         con_log(CL_DLEVEL1, (CE_CONT,
1991             " [req desc low part] %x \n", req_desc->Words));
1992         con_log(CL_DLEVEL1, (CE_CONT,
1993             " [req desc high part] %x \n", (req_desc->Words >> 32)));
1994         pkt = cmd->pkt;
1995         
1996         if (pkt) {
1997                 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1998                     "ISSUED CMD TO FW : called : cmd:"
1999                     ": %p instance : %p pkt : %p pkt_time : %x\n",
2000                     gethrtime(), (void *)cmd, (void *)instance,
2001                     (void *)pkt, cmd->drv_pkt_time));
2002                 if (instance->adapterresetinprogress) {
2003                         cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2004                         con_log(CL_ANN, (CE_NOTE,
2005                             "TBOLT Reset the scsi_pkt timer"));
2006                 } else {
2007                         push_pending_mfi_pkt(instance, cmd);
2008                 }
2009 
2010         } else {
2011                 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
2012                     "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
2013                     "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
2014         }
2015 
2016         /* Issue the command to the FW */
2017         mutex_enter(&instance->reg_write_mtx);
2018         WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2019         WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2020         mutex_exit(&instance->reg_write_mtx);
2021 }
2022 
2023 /*
2024  * issue_cmd_in_sync_mode
2025  */
2026 int
2027 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
2028     struct mrsas_cmd *cmd)
2029 {
2030         int             i;
2031         uint32_t        msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2032         MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2033 
2034         struct mrsas_header     *hdr;
2035         hdr = (struct mrsas_header *)&cmd->frame->hdr;
2036 
2037         con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X", cmd->SMID));
2038 
2039 
2040         if (instance->adapterresetinprogress) {
2041                 cmd->drv_pkt_time = ddi_get16
2042                     (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2043                 if (cmd->drv_pkt_time < debug_timeout_g)
2044                         cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2045                 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
2046                     "RESET-IN-PROGRESS, issue cmd & return.\n"));
2047 
2048                 mutex_enter(&instance->reg_write_mtx);
2049                 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2050                 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2051                 mutex_exit(&instance->reg_write_mtx);
2052 
2053                 return (DDI_SUCCESS);
2054         } else {
2055                 con_log(CL_ANN1, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: pushing the pkt\n"));
2056                 push_pending_mfi_pkt(instance, cmd);
2057         }
2058 
2059         con_log(CL_DLEVEL2, (CE_NOTE,
2060             "HighQport offset :%lx",
2061             (uint32_t *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
2062         con_log(CL_DLEVEL2, (CE_NOTE,
2063             "LowQport offset :%lx",
2064             (uint32_t *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
2065 
2066         cmd->sync_cmd = MRSAS_TRUE;
2067         cmd->cmd_status =  ENODATA;
2068 
2069 
2070         mutex_enter(&instance->reg_write_mtx);
2071         WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2072         WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2073         mutex_exit(&instance->reg_write_mtx);
2074 
2075         con_log(CL_ANN1, (CE_NOTE,
2076             " req desc high part %x \n", (req_desc->Words >> 32)));
2077         con_log(CL_ANN1, (CE_NOTE,
2078             " req desc low part %x \n", req_desc->Words));
2079 
2080         mutex_enter(&instance->int_cmd_mtx);
2081         for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
2082                 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
2083         }
2084         mutex_exit(&instance->int_cmd_mtx);
2085 
2086 
2087         if (i < (msecs -1)) {
2088                 return (DDI_SUCCESS);
2089         } else {
2090                 return (DDI_FAILURE);
2091         }
2092 }
2093 
2094 /*
2095  * issue_cmd_in_poll_mode
2096  */
2097 int
2098 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2099     struct mrsas_cmd *cmd)
2100 {
2101         int             i;
2102         uint16_t        flags;
2103         uint32_t        msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2104         struct mrsas_header *frame_hdr;
2105 
2106         con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X", cmd->SMID));
2107         
2108         MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2109 
2110         frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2111         ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2112             MFI_CMD_STATUS_POLL_MODE);
2113         flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2114         flags   |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2115         ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2116 
2117         con_log(CL_ANN1, (CE_NOTE,
2118             " req desc low part %x \n", req_desc->Words));
2119         con_log(CL_ANN1, (CE_NOTE,
2120             " req desc high part %x \n", (req_desc->Words >> 32)));
2121 
2122         /* issue the frame using inbound queue port */
2123         mutex_enter(&instance->reg_write_mtx);
2124         WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2125         WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2126         mutex_exit(&instance->reg_write_mtx);
2127 
2128         for (i = 0; i < msecs && (
2129             ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2130             == MFI_CMD_STATUS_POLL_MODE); i++) {
2131                 /* wait for cmd_status to change from 0xFF */
2132                 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2133         }
2134 
2135         if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2136             &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2137                 con_log(CL_ANN1, (CE_NOTE,
2138                     " cmd failed %x \n", (req_desc->Words)));
2139                 return (DDI_FAILURE);
2140         }
2141 
2142         return (DDI_SUCCESS);
2143 }
2144 
2145 void
2146 tbolt_enable_intr(struct mrsas_instance *instance)
2147 {
2148         uint32_t        mask;
2149 
2150         /* TODO: For Thunderbolt/Invader also clear intr on enable */
2151         //writel(~0, ®s->outbound_intr_status);
2152         //readl(®s->outbound_intr_status);    
2153 
2154         WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2155 
2156         /* dummy read to force PCI flush */
2157         mask = RD_OB_INTR_MASK(instance);
2158 
2159 }
2160 
2161 void
2162 tbolt_disable_intr(struct mrsas_instance *instance)
2163 {
2164         uint32_t mask = 0xFFFFFFFF;
2165         uint32_t status;
2166 
2167 
2168         WR_OB_INTR_MASK(mask, instance);
2169 
2170         /* Dummy readl to force pci flush */
2171 
2172         status = RD_OB_INTR_MASK(instance);
2173 }
2174 
2175 
2176 int
2177 tbolt_intr_ack(struct mrsas_instance *instance)
2178 {
2179         uint32_t        status;
2180 
2181         /* check if it is our interrupt */
2182         status = RD_OB_INTR_STATUS(instance);
2183         con_log(CL_ANN1, (CE_NOTE,
2184             "chkpnt: Entered tbolt_intr_ack status = %d \n", status));
2185 
2186         if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2187                 return (DDI_INTR_UNCLAIMED);
2188         }
2189 
2190         if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2191                 /* clear the interrupt by writing back the same value */
2192                 WR_OB_INTR_STATUS(status, instance);
2193                 /* dummy READ */
2194                 RD_OB_INTR_STATUS(instance);
2195                 }
2196         return (DDI_INTR_CLAIMED);
2197 }
2198 
2199 /*
2200  * get_raid_msg_pkt : Get a command from the free pool
2201  * After successful allocation, the caller of this routine
2202  * must clear the frame buffer (memset to zero) before
2203  * using the packet further.
2204  *
2205  * ***** Note *****
2206  * After clearing the frame buffer the context id of the
2207  * frame buffer SHOULD be restored back.
2208  */
2209 
2210 struct mrsas_cmd *
2211 get_raid_msg_pkt(struct mrsas_instance *instance)
2212 {
2213         mlist_t                 *head = &instance->cmd_pool_list;
2214         struct mrsas_cmd        *cmd = NULL;
2215 
2216         mutex_enter(&instance->cmd_pool_mtx);
2217         ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2218 
2219 
2220         if (!mlist_empty(head)) {
2221                 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2222                 mlist_del_init(head->next);
2223         }
2224         if (cmd != NULL) {
2225                 cmd->pkt = NULL;
2226                 cmd->retry_count_for_ocr = 0;
2227                 cmd->drv_pkt_time = 0;
2228         }
2229         mutex_exit(&instance->cmd_pool_mtx);
2230 
2231         if (cmd != NULL)
2232                 bzero(cmd->scsi_io_request,
2233                     sizeof (Mpi2RaidSCSIIORequest_t));
2234         return (cmd);
2235 }
2236 
2237 struct mrsas_cmd *
2238 get_raid_msg_mfi_pkt(struct mrsas_instance *instance)
2239 {
2240         mlist_t                 *head = &instance->cmd_app_pool_list;
2241         struct mrsas_cmd        *cmd = NULL;
2242 
2243         mutex_enter(&instance->cmd_app_pool_mtx);
2244         ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2245 
2246         if (!mlist_empty(head)) {
2247                 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2248                 mlist_del_init(head->next);
2249         }
2250         if (cmd != NULL) {
2251                 cmd->retry_count_for_ocr = 0;
2252                 cmd->drv_pkt_time = 0;
2253                 cmd->pkt = NULL;
2254                 cmd->request_desc = NULL;
2255 
2256         }
2257 
2258         mutex_exit(&instance->cmd_app_pool_mtx);
2259 
2260         if (cmd != NULL) {
2261                 bzero(cmd->scsi_io_request,
2262                     sizeof (Mpi2RaidSCSIIORequest_t));
2263         }
2264 
2265         return (cmd);
2266 }
2267 
2268 /*
2269  * return_raid_msg_pkt : Return a cmd to free command pool
2270  */
2271 void
2272 return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2273 {
2274         mutex_enter(&instance->cmd_pool_mtx);
2275         ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2276 
2277 
2278         mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2279 
2280         mutex_exit(&instance->cmd_pool_mtx);
2281 }
2282 
2283 void
2284 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2285 {
2286         mutex_enter(&instance->cmd_app_pool_mtx);
2287         ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2288 
2289         mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2290 
2291         mutex_exit(&instance->cmd_app_pool_mtx);
2292 }
2293 
2294 
2295 void
2296 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2297     struct mrsas_cmd *cmd)
2298 {
2299         Mpi2RaidSCSIIORequest_t         *scsi_raid_io;
2300         Mpi25IeeeSgeChain64_t           *scsi_raid_io_sgl_ieee;
2301         MRSAS_REQUEST_DESCRIPTOR_UNION  *ReqDescUnion;
2302         uint32_t                        index;
2303 
2304         if (!instance->tbolt) {
2305                 con_log(CL_ANN, (CE_NOTE, "Not MFA enabled.\n"));
2306                 return;
2307         }
2308 
2309         index = cmd->index;
2310 
2311         ReqDescUnion =
2312             mr_sas_get_request_descriptor(instance, index, cmd);
2313 
2314         if (!ReqDescUnion) {
2315                 con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]%x"));
2316                 return;
2317         }
2318 
2319         con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2320 
2321         ReqDescUnion->Words = 0;
2322 
2323         ReqDescUnion->SCSIIO.RequestFlags =
2324             (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2325             MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2326 
2327         ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2328 
2329         cmd->request_desc = ReqDescUnion;
2330 
2331         // get raid message frame pointer
2332         scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2333 
2334         if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
2335                 Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2336                 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2337                 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2338                     &sgl_ptr_end->Flags, 0);
2339         }
2340 
2341         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2342             &scsi_raid_io->Function,
2343             MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2344 
2345         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2346             &scsi_raid_io->SGLOffset0,
2347             offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2348 
2349         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2350             &scsi_raid_io->ChainOffset,
2351             (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2352 
2353         ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2354             &scsi_raid_io->SenseBufferLowAddress,
2355             cmd->sense_phys_addr1);
2356 
2357 
2358         scsi_raid_io_sgl_ieee =
2359             (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2360 
2361         ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
2362             &scsi_raid_io_sgl_ieee->Address,
2363             (U64)cmd->frame_phys_addr);
2364 
2365         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2366             &scsi_raid_io_sgl_ieee->Flags,
2367             (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2368                    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2369         ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2370             &scsi_raid_io_sgl_ieee->Length, 1024); //MEGASAS_MAX_SZ_CHAIN_FRAME
2371 
2372         con_log(CL_ANN1, (CE_NOTE,
2373             "[MFI CMD PHY ADDRESS]:%x",
2374             scsi_raid_io_sgl_ieee->Address));
2375         con_log(CL_ANN1, (CE_NOTE,
2376             "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2377         con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2378             scsi_raid_io_sgl_ieee->Flags));
2379 }
2380 
2381 
2382 void
2383 tbolt_complete_cmd(struct mrsas_instance *instance,
2384     struct mrsas_cmd *cmd)
2385 {
2386         uint8_t                         status;
2387         uint8_t                         extStatus;
2388         uint8_t                         arm;
2389         struct scsa_cmd                 *acmd;
2390         struct scsi_pkt                 *pkt;
2391         struct scsi_arq_status          *arqstat;
2392         Mpi2RaidSCSIIORequest_t         *scsi_raid_io;
2393         LD_LOAD_BALANCE_INFO            *lbinfo;
2394         int i;
2395 
2396         scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2397 
2398         status = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2399             &scsi_raid_io->RaidContext.status);
2400         extStatus = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2401             &scsi_raid_io->RaidContext.extStatus);
2402 
2403         con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2404         con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2405 
2406         if (status != MFI_STAT_OK) {
2407                 con_log(CL_ANN, (CE_WARN,
2408                     "IO Cmd Failed SMID %x", cmd->SMID));
2409         } else {
2410                 con_log(CL_ANN, (CE_NOTE,
2411                     "IO Cmd Success  SMID %x", cmd->SMID));
2412         }
2413 
2414         /* regular commands */
2415 
2416         switch (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2417             &scsi_raid_io->Function)) {
2418 
2419                 case MPI2_FUNCTION_SCSI_IO_REQUEST :  /* Fast Path IO. */
2420                         acmd =  (struct scsa_cmd *)cmd->cmd;
2421                         lbinfo = &instance->load_balance_info[acmd->device_id];
2422 
2423                         if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2424                                 arm = lbinfo->raid1DevHandle[0] == scsi_raid_io->DevHandle ? 0 : 1;
2425 
2426                                 lbinfo->scsi_pending_cmds[arm]--;
2427                                 cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2428                         }
2429                         con_log(CL_DLEVEL3, (CE_NOTE,
2430                             "FastPath IO Completion Success "));
2431 
2432                 case MPI2_FUNCTION_LD_IO_REQUEST :   {// Regular Path IO.
2433                         acmd =  (struct scsa_cmd *)cmd->cmd;
2434                         pkt =   (struct scsi_pkt *)CMD2PKT(acmd);
2435 
2436                         if (acmd->cmd_flags & CFLAG_DMAVALID) {
2437                                 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2438                                         (void) ddi_dma_sync(acmd->cmd_dmahandle,
2439                                             acmd->cmd_dma_offset,
2440                                             acmd->cmd_dma_len,
2441                                             DDI_DMA_SYNC_FORCPU);
2442                                 }
2443                         }
2444 
2445                         pkt->pkt_reason              = CMD_CMPLT;
2446                         pkt->pkt_statistics  = 0;
2447                         pkt->pkt_state = STATE_GOT_BUS
2448                             | STATE_GOT_TARGET | STATE_SENT_CMD
2449                             | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2450 
2451                         con_log(CL_ANN, (CE_CONT,
2452                             " CDB[0] = %x completed for %s: size %lx SMID %x cmd_status %x",
2453                             pkt->pkt_cdbp[0],
2454                             ((acmd->islogical) ? "LD" : "PD"),
2455                             acmd->cmd_dmacount, cmd->SMID, status));
2456 
2457                         if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2458                                 struct scsi_inquiry     *inq;
2459 
2460                                 if (acmd->cmd_dmacount != 0) {
2461                                         bp_mapin(acmd->cmd_buf);
2462                                         inq = (struct scsi_inquiry *)
2463                                             acmd->cmd_buf->b_un.b_addr;
2464 
2465                                         /* don't expose physical drives to OS */
2466                                         if (acmd->islogical &&
2467                                             (status == MFI_STAT_OK)) {
2468                                                 display_scsi_inquiry(
2469                                                     (caddr_t)inq);
2470                                         } 
2471 #ifdef PDSUPPORT
2472                                         else if ((status ==
2473                                             MFI_STAT_OK) && inq->inq_dtype ==
2474                                             DTYPE_DIRECT) {
2475 
2476                                                 display_scsi_inquiry(
2477                                                     (caddr_t)inq);
2478                                         }
2479 #endif
2480                                         else {
2481                                                 /* for physical disk */
2482                                                 status =
2483                                                     MFI_STAT_DEVICE_NOT_FOUND;
2484                                         }
2485                                 }
2486                         }
2487 
2488                         switch (status) {
2489                         case MFI_STAT_OK:
2490                                 pkt->pkt_scbp[0] = STATUS_GOOD;
2491                                 break;
2492                         case MFI_STAT_LD_CC_IN_PROGRESS:
2493                         case MFI_STAT_LD_RECON_IN_PROGRESS:
2494                                 pkt->pkt_scbp[0] = STATUS_GOOD;
2495                                 break;
2496                         case MFI_STAT_LD_INIT_IN_PROGRESS:
2497                                 pkt->pkt_reason      = CMD_TRAN_ERR;
2498                                 break;
2499                         case MFI_STAT_SCSI_IO_FAILED:
2500                                 cmn_err(CE_WARN, "tbolt_complete_cmd: scsi_io failed");
2501                                 pkt->pkt_reason      = CMD_TRAN_ERR;
2502                                 break;
2503                         case MFI_STAT_SCSI_DONE_WITH_ERROR:
2504                                 con_log(CL_ANN, (CE_WARN,
2505                                         "tbolt_complete_cmd: scsi_done with error"));
2506 
2507                                 pkt->pkt_reason      = CMD_CMPLT;
2508                                 ((struct scsi_status *)
2509                                     pkt->pkt_scbp)->sts_chk = 1;
2510 
2511                                 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2512                                         con_log(CL_ANN, (CE_WARN, "TEST_UNIT_READY fail"));
2513                                 } else {
2514                                         pkt->pkt_state |= STATE_ARQ_DONE;
2515                                         arqstat = (void *)(pkt->pkt_scbp);
2516                                         arqstat->sts_rqpkt_reason = CMD_CMPLT;
2517                                         arqstat->sts_rqpkt_resid = 0;
2518                                         arqstat->sts_rqpkt_state |=
2519                                             STATE_GOT_BUS | STATE_GOT_TARGET
2520                                             | STATE_SENT_CMD
2521                                             | STATE_XFERRED_DATA;
2522                                         *(uint8_t *)&arqstat->sts_rqpkt_status =
2523                                             STATUS_GOOD;
2524                                         con_log(CL_ANN1, (CE_NOTE,
2525                                             "Copying Sense data %x",
2526                                             cmd->SMID));
2527 
2528                                         ddi_rep_get8(
2529                                             instance->
2530                                             mpi2_frame_pool_dma_obj.acc_handle,
2531                                             (uint8_t *)
2532                                             &(arqstat->sts_sensedata),
2533                                             cmd->sense1,
2534                                             sizeof (struct scsi_extended_sense),
2535                                             DDI_DEV_AUTOINCR);
2536                         
2537                                 }
2538                                 break;
2539                         case MFI_STAT_LD_OFFLINE:
2540                                 cmn_err(CE_WARN,
2541                                         "tbolt_complete_cmd: ld offline "
2542                                         "CDB[0]=0x%x targetId=0x%x devhandle=0x%x\n",   //UNDO:
2543                                         ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2544                                         ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->RaidContext.ldTargetId),
2545                                         ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->DevHandle) );
2546                                 pkt->pkt_reason      = CMD_DEV_GONE;
2547                                 pkt->pkt_statistics  = STAT_DISCON;
2548                                 break;
2549                         case MFI_STAT_DEVICE_NOT_FOUND:
2550                         con_log(CL_ANN, (CE_CONT,
2551                                 "tbolt_complete_cmd: device not found error"));
2552                                 pkt->pkt_reason      = CMD_DEV_GONE;
2553                                 pkt->pkt_statistics  = STAT_DISCON;
2554                                 break;
2555 
2556                         case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2557                                 pkt->pkt_state |= STATE_ARQ_DONE;
2558                                 pkt->pkt_reason      = CMD_CMPLT;
2559                                 ((struct scsi_status *)
2560                                     pkt->pkt_scbp)->sts_chk = 1;
2561 
2562                                 arqstat = (void *)(pkt->pkt_scbp);
2563                                 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2564                                 arqstat->sts_rqpkt_resid = 0;
2565                                 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2566                                     | STATE_GOT_TARGET | STATE_SENT_CMD
2567                                     | STATE_XFERRED_DATA;
2568                                 *(uint8_t *)&arqstat->sts_rqpkt_status =
2569                                     STATUS_GOOD;
2570 
2571                                 arqstat->sts_sensedata.es_valid = 1;
2572                                 arqstat->sts_sensedata.es_key =
2573                                     KEY_ILLEGAL_REQUEST;
2574                                 arqstat->sts_sensedata.es_class =
2575                                     CLASS_EXTENDED_SENSE;
2576 
2577                                 /*
2578                                  * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2579                                  * ASC: 0x21h; ASCQ: 0x00h;
2580                                  */
2581                                 arqstat->sts_sensedata.es_add_code = 0x21;
2582                                 arqstat->sts_sensedata.es_qual_code = 0x00;
2583                                 break;
2584                         case MFI_STAT_INVALID_CMD:
2585                         case MFI_STAT_INVALID_DCMD:
2586                         case MFI_STAT_INVALID_PARAMETER:
2587                         case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2588                         default:
2589                                 cmn_err(CE_WARN, "tbolt_complete_cmd: Unknown status!");
2590                                 pkt->pkt_reason      = CMD_TRAN_ERR;
2591 
2592                                 break;
2593                         }
2594 
2595                         atomic_add_16(&instance->fw_outstanding, (-1));
2596 
2597                         /* Call the callback routine */
2598                         if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
2599                             pkt->pkt_comp) {
2600                                 (*pkt->pkt_comp)(pkt);
2601                         }
2602 
2603                         con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2604 
2605                         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2606                             &scsi_raid_io->RaidContext.status, 0);
2607 
2608                         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2609                             &scsi_raid_io->RaidContext.extStatus, 0);
2610 
2611                         return_raid_msg_pkt(instance, cmd);
2612                         break;
2613                 }
2614         case MPI2_FUNCTION_PASSTHRU_IO_REQUEST:  // MFA command.
2615 
2616                 if (cmd->frame->dcmd.opcode
2617                          == MR_DCMD_LD_MAP_GET_INFO && 
2618                          cmd->frame->dcmd.mbox.b[1] 
2619                          == 1) {
2620                         
2621                         mutex_enter(&instance->sync_map_mtx);
2622 
2623                         con_log(CL_ANN, (CE_NOTE,
2624                            "LDMAP sync command  SMID RECEIVED 0x%X",
2625                            cmd->SMID));
2626                         if (cmd->frame->hdr.cmd_status != 0) {
2627                                 cmn_err(CE_WARN,
2628                                         "map sync failed, status = 0x%x.\n",cmd->frame->hdr.cmd_status);
2629                         }
2630                         else {
2631                                 instance->map_id++;
2632                                 cmn_err(CE_NOTE,
2633                                         "map sync received, switched map_id to %ld \n",instance->map_id);
2634                         }
2635 
2636                         if (MR_ValidateMapInfo(instance->ld_map[(instance->map_id & 1)], instance->load_balance_info))
2637                                 instance->fast_path_io = 1; 
2638                         else 
2639                                 instance->fast_path_io = 0;     
2640                         
2641                         con_log(CL_ANN, (CE_NOTE,
2642                                 "instance->fast_path_io %d \n",instance->fast_path_io));
2643 
2644                         instance->unroll.syncCmd = 0;
2645 
2646                         if(instance->map_update_cmd == cmd) {
2647                                 return_raid_msg_pkt(instance, cmd);
2648                                 atomic_add_16(&instance->fw_outstanding, (-1));
2649                                 mrsas_tbolt_sync_map_info(instance);
2650                         }
2651         
2652                         cmn_err(CE_NOTE, "LDMAP sync completed.\n");
2653                         mutex_exit(&instance->sync_map_mtx);
2654                         break;
2655                 }
2656 
2657                 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2658                         con_log(CL_ANN1, (CE_CONT,
2659                            "AEN command SMID RECEIVED 0x%X",
2660                            cmd->SMID));
2661                          if ((instance->aen_cmd == cmd) &&
2662                                     (instance->aen_cmd->abort_aen)) {
2663                                         con_log(CL_ANN, (CE_WARN,
2664                                             "mrsas_softintr: "
2665                                             "aborted_aen returned"));
2666                         }
2667                         else
2668                         {
2669                                 atomic_add_16(&instance->fw_outstanding, (-1));
2670                    service_mfi_aen(instance, cmd);
2671                         }
2672                 }
2673 
2674                 if (cmd->sync_cmd == MRSAS_TRUE ) {
2675                         con_log(CL_ANN1, (CE_CONT,
2676                            "Sync-mode Command Response SMID RECEIVED 0x%X",
2677                            cmd->SMID));
2678 
2679                         tbolt_complete_cmd_in_sync_mode(instance, cmd);
2680                 }
2681                 else
2682                 {
2683                         con_log(CL_ANN, (CE_CONT,
2684                                 "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2685                                 cmd->SMID));
2686                 }
2687                 break;
2688         default:
2689                 /* free message */
2690                 con_log(CL_ANN, (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2691                 break;
2692         }
2693 }
2694 
2695 uint_t
2696 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2697 {
2698         uint8_t                         replyType;
2699         Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2700         Mpi2ReplyDescriptorsUnion_t     *desc;
2701         uint16_t                        smid;
2702         union desc_value                d_val;
2703         struct mrsas_cmd                *cmd;
2704         uint32_t                        i;
2705         Mpi2RaidSCSIIORequest_t         *scsi_raid_io;
2706         uint8_t                         status;
2707 
2708         struct mrsas_header     *hdr;
2709         struct scsi_pkt         *pkt;
2710 
2711         (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2712             0, 0, DDI_DMA_SYNC_FORDEV);
2713 
2714         (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2715             0, 0, DDI_DMA_SYNC_FORCPU);
2716 
2717         desc = instance->reply_frame_pool;
2718         desc += instance->reply_read_index;
2719 
2720         replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2721         replyType = replyDesc->ReplyFlags &
2722             MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2723 
2724         if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2725                 return (DDI_INTR_UNCLAIMED);
2726 
2727         con_log(CL_ANN1, (CE_NOTE, "Reply Desc  = %llx  Words = %llx \n",
2728                                 desc, desc->Words));
2729 
2730         d_val.word = desc->Words;
2731         
2732 
2733         /* Read Reply descriptor */
2734         while ((d_val.u1.low != 0xffffffff) &&
2735             (d_val.u1.high != 0xffffffff)) {
2736 
2737                 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2738                 0, 0, DDI_DMA_SYNC_FORCPU);
2739 
2740                 smid = replyDesc->SMID;
2741 
2742                 if (!smid || smid > instance->max_fw_cmds + 1) {
2743                         con_log(CL_ANN1, (CE_NOTE,
2744                             "Reply Desc at Break  = %llx  Words = %llx \n",
2745                             desc, desc->Words));
2746                         break;
2747                 }
2748 
2749                 cmd     = instance->cmd_list[smid - 1];
2750                 if(!cmd ) {
2751                         con_log(CL_ANN1, (CE_NOTE,
2752                                 "mr_sas_tbolt_process_outstanding_cmd: Invalid command "
2753                                 " or Poll commad Received in completion path\n"));
2754                 }
2755                 else {
2756                         mutex_enter(&instance->cmd_pend_mtx);
2757                         if (cmd->sync_cmd == MRSAS_TRUE) {
2758                                 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2759                                 if (hdr) {
2760                                         con_log(CL_ANN1, (CE_NOTE,
2761                                                 "mr_sas_tbolt_process_outstanding_cmd:"
2762                                                 " mlist_del_init(&cmd->list).\n"));
2763                                         mlist_del_init(&cmd->list);
2764                                 }
2765                                 } else {
2766                                         pkt = cmd->pkt;
2767                                         if (pkt) {
2768                                                 con_log(CL_ANN1, (CE_NOTE,
2769                                                         "mr_sas_tbolt_process_outstanding_cmd:"
2770                                                         "mlist_del_init(&cmd->list).\n"));
2771                                         mlist_del_init(&cmd->list);
2772                                 }
2773                         }
2774 
2775                         mutex_exit(&instance->cmd_pend_mtx);
2776                         
2777                         tbolt_complete_cmd(instance, cmd);
2778                 }       
2779                 // set it back to all 0xfffffffff.
2780                 desc->Words = (uint64_t)~0;
2781 
2782                 instance->reply_read_index++;
2783 
2784                 if (instance->reply_read_index >= (instance->reply_q_depth)) {
2785                         con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2786                         instance->reply_read_index = 0;
2787                 }
2788 
2789                 /* Get the next reply descriptor */
2790                 if (!instance->reply_read_index)
2791                         desc = instance->reply_frame_pool;
2792                 else
2793                         desc++;
2794 
2795                 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2796 
2797                 d_val.word = desc->Words;
2798 
2799                 con_log(CL_ANN1, (CE_NOTE,
2800                     "Next Reply Desc  = %llx Words = %llx\n",
2801                     desc, desc->Words));
2802 
2803                 replyType = replyDesc->ReplyFlags &
2804                     MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2805 
2806                 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2807                         break;
2808 
2809         } /* End of while loop. */
2810 
2811         /* update replyIndex to FW */
2812         WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2813 
2814 
2815         (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2816             0, 0, DDI_DMA_SYNC_FORDEV);
2817 
2818         (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2819             0, 0, DDI_DMA_SYNC_FORCPU);
2820         return (DDI_INTR_CLAIMED);
2821 }
2822 
2823 
2824 
2825 
2826 /*
2827  * complete_cmd_in_sync_mode -  Completes an internal command
2828  * @instance:                   Adapter soft state
2829  * @cmd:                        Command to be completed
2830  *
2831  * The issue_cmd_in_sync_mode() function waits for a command to complete
2832  * after it issues a command. This function wakes up that waiting routine by
2833  * calling wake_up() on the wait queue.
2834  */
2835 void
2836 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance,
2837     struct mrsas_cmd *cmd)
2838 {
2839 
2840         cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
2841             &cmd->frame->io.cmd_status);
2842 
2843         cmd->sync_cmd = MRSAS_FALSE;
2844 
2845         mutex_enter(&instance->int_cmd_mtx);
2846         if (cmd->cmd_status == ENODATA) {
2847                 cmd->cmd_status = 0;
2848         }
2849         cv_broadcast(&instance->int_cmd_cv);
2850         mutex_exit(&instance->int_cmd_mtx);
2851 
2852 }
2853 
2854 /*
2855  * mrsas_tbolt_get_ld_map_info -        Returns  ld_map structure
2856  * instance:                            Adapter soft state
2857  *
2858  * Issues an internal command (DCMD) to get the FW's controller PD
2859  * list structure.  This information is mainly used to find out SYSTEM
2860  * supported by the FW.
2861  */
2862 int
2863 mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance)
2864 {
2865         int ret = 0;
2866         struct mrsas_cmd        *cmd = NULL;
2867         struct mrsas_dcmd_frame *dcmd;
2868         MR_FW_RAID_MAP_ALL *ci;
2869         uint32_t ci_h = 0;
2870         U32 size_map_info;
2871 
2872         cmd = get_raid_msg_pkt(instance);
2873 
2874         if (cmd == NULL) {
2875                 cmn_err(CE_WARN,
2876                     "Failed to get a cmd from free-pool in get_ld_map_info()");
2877                 return (DDI_FAILURE);
2878         }
2879 
2880         dcmd = &cmd->frame->dcmd;
2881 
2882         size_map_info = sizeof (MR_FW_RAID_MAP) +
2883             (sizeof (MR_LD_SPAN_MAP) *
2884             (MAX_LOGICAL_DRIVES - 1));
2885 
2886         con_log(CL_ANN, (CE_NOTE,
2887             "size_map_info : 0x%x", size_map_info));
2888 
2889         ci = instance->ld_map[(instance->map_id & 1)];
2890         ci_h = instance->ld_map_phy[(instance->map_id & 1)];
2891 
2892         if (!ci) {
2893                 cmn_err(CE_WARN,
2894                         "Failed to alloc mem for ld_map_info");
2895                 return_raid_msg_pkt(instance, cmd);
2896                 return (-1);
2897         }
2898 
2899         memset(ci, 0, sizeof (*ci));
2900         memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2901 
2902         dcmd->cmd = MFI_CMD_OP_DCMD;
2903         dcmd->cmd_status = 0xFF;
2904         dcmd->sge_count = 1;
2905         dcmd->flags = MFI_FRAME_DIR_READ;
2906         dcmd->timeout = 0;
2907         dcmd->pad_0 = 0;
2908         dcmd->data_xfer_len = size_map_info;
2909         dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2910         dcmd->sgl.sge32[0].phys_addr = ci_h;
2911         dcmd->sgl.sge32[0].length = size_map_info;
2912 
2913 
2914         mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2915 
2916         if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2917                 ret = 0;
2918                 con_log(CL_ANN1, (CE_NOTE,
2919                     "Get LD Map Info success\n"));
2920         } else {
2921                 cmn_err(CE_WARN,
2922                     "Get LD Map Info failed\n");
2923                 ret = -1;
2924         }
2925 
2926         return_raid_msg_pkt(instance, cmd);
2927 
2928         return (ret);
2929 }
2930 
2931 void
2932 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2933 {
2934         uint32_t i;
2935         MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2936         union desc_value d_val;
2937 
2938         reply_desc = instance->reply_frame_pool;
2939 
2940         for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2941                 d_val.word = reply_desc->Words;
2942                 con_log(CL_DLEVEL3, (CE_NOTE,
2943                     "i=%d, %x:%x",
2944                     i, d_val.u1.high, d_val.u1.low));
2945         }
2946 }
2947 
2948 /**
2949  * mrsas_tbolt_command_create - Create command for fast path.
2950  * @io_info:    MegaRAID IO request packet pointer.
2951  * @ref_tag:    Reference tag for RD/WRPROTECT
2952  *
2953  * Create the command for fast path.
2954  */
2955 void
2956 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],struct IO_REQUEST_INFO *io_info,Mpi2RaidSCSIIORequest_t *scsi_io_request, U32 ref_tag)
2957 {
2958         uint16_t                EEDPFlags;
2959         uint32_t                Control;
2960         // Prepare 32-byte CDB if DIF is supported on this device
2961         con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB\n"));
2962 
2963         memset(cdb, 0, 32);     
2964         
2965         cdb[0] =  MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2966 
2967                 
2968         cdb[7] =  MRSAS_SCSI_ADDL_CDB_LEN;
2969 
2970         if (io_info->isRead) {
2971                 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2972         }
2973         else {
2974                 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2975         }
2976 
2977         cdb[10] = MRSAS_RD_WR_PROTECT;  // Verify with in linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL
2978 
2979         /* LOGICAL BLOCK ADDRESS */
2980         cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff); 
2981         cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2982         cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff); 
2983         cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff); 
2984         cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff); 
2985         cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff); 
2986         cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);  
2987         cdb[19] = (U8)((io_info->pdBlock) & 0xff);         
2988 
2989         /* Logical block reference tag */
2990         ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2991                             &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
2992                                                         BIG_ENDIAN(ref_tag));   
2993         
2994         ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
2995             &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask,
2996             0xffff);
2997 
2998         ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2999             &scsi_io_request->DataLength,
3000             ((io_info->numBlocks)*512));
3001         ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
3002                 &scsi_io_request->IoFlags,32);  /* Specify 32-byte cdb */
3003 
3004         /* Transfer length */
3005         cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);       
3006         cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
3007         cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
3008         cdb[31] = (U8)((io_info->numBlocks) & 0xff);
3009 
3010         /* set SCSI IO EEDPFlags */
3011         EEDPFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
3012                                                         &scsi_io_request->EEDPFlags);
3013         Control = ddi_get32(instance->mpi2_frame_pool_dma_obj.acc_handle,
3014                                                         &scsi_io_request->Control);
3015 
3016         // set SCSI IO EEDPFlags bits
3017         if (io_info->isRead) {
3018                 // For READ commands, the EEDPFlags shall be set to specify to
3019                 // Increment the Primary Reference Tag, to Check the Reference
3020                 // Tag, and to Check and Remove the Protection Information
3021                 // fields.
3022                 EEDPFlags       = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG  |
3023                         MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG      |
3024                         MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP   |
3025                         MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG      |
3026                         MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
3027         }
3028         else {
3029                 // For WRITE commands, the EEDPFlags shall be set to specify to
3030                 // Increment the Primary Reference Tag, and to Insert
3031                 // Protection Information fields.
3032                 EEDPFlags       = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG  |
3033                         MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
3034         }
3035         Control |= (0x4 << 26);
3036 
3037         ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
3038                         &scsi_io_request->EEDPFlags, EEDPFlags);
3039         ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
3040                         &scsi_io_request->Control, Control);
3041         ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,      
3042             &scsi_io_request->EEDPBlockSize,
3043             MRSAS_EEDPBLOCKSIZE);
3044 }
3045 
3046 
3047 /*
3048  * mrsas_tbolt_set_pd_lba -     Sets PD LBA
3049  * @cdb:                CDB
3050  * @cdb_len:            cdb length
3051  * @start_blk:          Start block of IO
3052  *
3053  * Used to set the PD LBA in CDB for FP IOs
3054  */
3055 void
3056 mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk, U32 num_blocks, U8 DifCapable)
3057 {
3058         U8 cdb_len = *cdb_len_ptr;
3059         U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
3060 
3061         /* Some drives don't support 16/12 byte CDB's, convert to 10 */
3062         if (((cdb_len == 12) || (cdb_len == 16)) && 
3063                  (start_blk <= 0xffffffff)) {
3064                 if (cdb_len == 16) {
3065                         con_log(CL_ANN, (CE_NOTE, "Converting READ/WRITE(16) to READ10\n"));
3066                         opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
3067                         flagvals = cdb[1];
3068                         groupnum = cdb[14];
3069                         control = cdb[15];
3070                 } else {
3071                         con_log(CL_ANN, (CE_NOTE, "Converting READ/WRITE(12) to READ10\n"));
3072                         opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
3073                         flagvals = cdb[1];
3074                         groupnum = cdb[10];
3075                         control = cdb[11];
3076                 }
3077 
3078                 memset(cdb, 0, sizeof(cdb));
3079 
3080                 cdb[0] = opcode;
3081                 cdb[1] = flagvals;
3082                 cdb[6] = groupnum;
3083                 cdb[9] = control;
3084                 /* Set transfer length */
3085                 cdb[8] = (U8)(num_blocks & 0xff);
3086                 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3087                 cdb_len = 10;
3088         } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3089                         /* Convert to 16 byte CDB for large LBA's */
3090                         con_log(CL_ANN, (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB\n"));
3091                         switch (cdb_len) {
3092                         case 6:
3093                                 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3094                                 control = cdb[5];
3095                                 break;
3096                         case 10:
3097                                 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3098                                 flagvals = cdb[1];
3099                                 groupnum = cdb[6];
3100                                 control = cdb[9];
3101                                 break;
3102                         case 12:
3103                                 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3104                                 flagvals = cdb[1];
3105                                 groupnum = cdb[10];
3106                                 control = cdb[11];
3107                                 break;
3108                         }
3109 
3110                         memset(cdb, 0, sizeof(cdb));
3111 
3112                         cdb[0] = opcode;
3113                         cdb[1] = flagvals;
3114                         cdb[14] = groupnum;
3115                         cdb[15] = control;
3116 
3117                         /* Transfer length */
3118                         cdb[13] = (U8)(num_blocks & 0xff);
3119                         cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3120                         cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3121                         cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3122 
3123                         /* Specify 16-byte cdb */
3124                         cdb_len = 16;
3125         } else  if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3126                                 /* convert to 10 byte CDB */
3127                                 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3128                                 control = cdb[5];
3129                 
3130                                 memset(cdb, 0, sizeof(cdb));
3131                                 cdb[0] = opcode;
3132                                 cdb[9] = control;
3133 
3134                                 /* Set transfer length */
3135                                 cdb[8] = (U8)(num_blocks & 0xff);
3136                                 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3137 
3138                                 /* Specify 10-byte cdb */
3139                                 cdb_len = 10;
3140         }
3141 
3142 
3143         /* Fall through Normal case, just load LBA here */
3144         switch (cdb_len) {
3145                 case 6:
3146                 {
3147                         U8 val = cdb[1] & 0xE0;
3148                         cdb[3] = (U8)(start_blk & 0xff);
3149                         cdb[2] = (U8)((start_blk >> 8) & 0xff);
3150                         cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3151                         break;
3152                 }
3153                 case 10:
3154                         cdb[5] = (U8)(start_blk & 0xff);
3155                         cdb[4] = (U8)((start_blk >> 8) & 0xff);
3156                         cdb[3] = (U8)((start_blk >> 16) & 0xff);
3157                         cdb[2] = (U8)((start_blk >> 24) & 0xff);
3158                         break;
3159                 case 12:
3160                         cdb[5]    = (U8)(start_blk & 0xff);
3161                         cdb[4]    = (U8)((start_blk >> 8) & 0xff);
3162                         cdb[3]    = (U8)((start_blk >> 16) & 0xff);
3163                         cdb[2]    = (U8)((start_blk >> 24) & 0xff);
3164                         break;
3165 
3166                 case 16:
3167                         cdb[9]  = (U8)(start_blk & 0xff);
3168                         cdb[8]  = (U8)((start_blk >> 8) & 0xff);
3169                         cdb[7]  = (U8)((start_blk >> 16) & 0xff);
3170                         cdb[6]  = (U8)((start_blk >> 24) & 0xff);
3171                         cdb[5]  = (U8)((start_blk >> 32) & 0xff);
3172                         cdb[4]  = (U8)((start_blk >> 40) & 0xff);
3173                         cdb[3]  = (U8)((start_blk >> 48) & 0xff);
3174                         cdb[2]  = (U8)((start_blk >> 56) & 0xff);
3175                         break;
3176         }
3177 
3178         *cdb_len_ptr = cdb_len;
3179 }
3180 
3181 
3182 U8
3183 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3184 {
3185         MR_FW_RAID_MAP_ALL *ld_map;
3186 
3187         if (!mrsas_tbolt_get_ld_map_info(instance)) {
3188 
3189                 ld_map = instance->ld_map[(instance->map_id & 1)];
3190 
3191                 con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d\n",
3192                     ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3193 
3194                 if (MR_ValidateMapInfo(instance->ld_map[(instance->map_id & 1)], instance->load_balance_info)) {
3195                         con_log(CL_ANN, (CE_CONT,
3196                                 "MR_ValidateMapInfo success"));
3197 
3198                         instance->fast_path_io = 1;  
3199                         con_log(CL_ANN, (CE_NOTE,
3200                                 "instance->fast_path_io %d \n",instance->fast_path_io));
3201 
3202                         return (DDI_SUCCESS);
3203                 }
3204 
3205         }
3206 
3207         instance->fast_path_io = 0;  
3208         cmn_err(CE_WARN, "MR_ValidateMapInfo failed");
3209         con_log(CL_ANN, (CE_NOTE,
3210                 "instance->fast_path_io %d \n",instance->fast_path_io));
3211 
3212         
3213         return (DDI_FAILURE);
3214 }
3215 /*
3216  * Marks HBA as bad. This will be called either when an
3217  * IO packet times out even after 3 FW resets
3218  * or FW is found to be fault even after 3 continuous resets.
3219  */
3220 
3221 int
3222 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3223 {
3224         cmn_err(CE_WARN, "TBOLT Kill adapter called\n");
3225 
3226         if (instance->deadadapter == 1)
3227                 return (DDI_FAILURE);
3228 
3229         con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3230                     "Writing to doorbell with MFI_STOP_ADP "));
3231         mutex_enter(&instance->ocr_flags_mtx);
3232         instance->deadadapter = 1;
3233         mutex_exit(&instance->ocr_flags_mtx);
3234         instance->func_ptr->disable_intr(instance);
3235         WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3236         /* Flush */
3237         RD_RESERVED0_REGISTER(instance);        
3238 
3239         (void) mrsas_print_pending_cmds(instance);
3240         mrsas_complete_pending_cmds(instance);
3241         return (DDI_SUCCESS);
3242 }
3243 void  mrsas_reset_reply_desc(struct mrsas_instance *instance)
3244 {
3245         int i;
3246         MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;       
3247         instance->reply_read_index= 0;
3248         
3249         /* initializing reply address to 0xFFFFFFFF */
3250         reply_desc = instance->reply_frame_pool;
3251 
3252         for (i = 0; i < instance->reply_q_depth; i++) {
3253                 reply_desc->Words = (uint64_t)~0;
3254                 reply_desc++;
3255         }
3256 }
3257 
3258 int
3259 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3260 {
3261         uint32_t status=0x00;
3262         uint32_t retry = 0;
3263         uint32_t seq_num;
3264         uint32_t cur_abs_reg_val;
3265         uint32_t fw_state;
3266         union mrsas_evt_class_locale    class_locale;
3267         uint32_t abs_state;
3268         uint32_t i;
3269 
3270         con_log(CL_ANN, (CE_NOTE,
3271             "mrsas_tbolt_reset_ppc entered\n "));
3272 
3273         if (instance->deadadapter == 1) {
3274                 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3275                     "no more resets as HBA has been marked dead ");
3276                 return (DDI_FAILURE);
3277         }
3278 
3279         mutex_enter(&instance->ocr_flags_mtx);
3280         instance->adapterresetinprogress = 1;
3281         con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc:"
3282             "adpterresetinprogress flag set, time %llx", gethrtime()));
3283         mutex_exit(&instance->ocr_flags_mtx);
3284 
3285         instance->func_ptr->disable_intr(instance);
3286 
3287         /*Add delay inorder to complete the ioctl & io cmds in-flight */
3288         for (i = 0; i<3000; i++) {
3289                 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3290         }
3291 
3292         instance->reply_read_index= 0;
3293 
3294 retry_reset:
3295         con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3296             ":Resetting TBOLT "));
3297 
3298         WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3299         WR_TBOLT_IB_WRITE_SEQ(4, instance);
3300         WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3301         WR_TBOLT_IB_WRITE_SEQ(2, instance);
3302         WR_TBOLT_IB_WRITE_SEQ(7, instance);
3303         WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3304         con_log(CL_ANN1, (CE_NOTE,
3305             "mrsas_tbolt_reset_ppc: magic number written "
3306             "to write sequence register\n"));
3307         delay(100 * drv_usectohz(MILLISEC));
3308         status = RD_TBOLT_HOST_DIAG(instance);
3309         con_log(CL_ANN1, (CE_NOTE,
3310             "mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
3311             "to write sequence register\n"));
3312         
3313         while (status & DIAG_TBOLT_RESET_ADAPTER) {
3314                 delay(100 * drv_usectohz(MILLISEC));
3315                 status = RD_TBOLT_HOST_DIAG(instance);
3316                 if (retry++ == 100) {
3317                         cmn_err(CE_WARN,
3318                             "mrsas_tbolt_reset_ppc:"
3319                             "resetadapter bit is set already "
3320                             "check retry count %d\n", retry);
3321                         return (DDI_FAILURE);
3322                 }
3323         }
3324 
3325         WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3326         delay(100 * drv_usectohz(MILLISEC));
3327 
3328         ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status, 
3329                 (uint8_t *)((uintptr_t)(instance)->regmap + 
3330                 RESET_TBOLT_STATUS_OFF),4,DDI_DEV_AUTOINCR); 
3331         
3332         while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3333                 delay(100 * drv_usectohz(MILLISEC));
3334                 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status, 
3335                         (uint8_t *)((uintptr_t)(instance)->regmap + 
3336                         RESET_TBOLT_STATUS_OFF),4,DDI_DEV_AUTOINCR); 
3337                 if (retry++ == 100) {
3338                         /* Dont call kill adapter here */
3339                         /* RESET BIT ADAPTER is cleared by firmare */   
3340                         //mrsas_tbolt_kill_adapter(instance);
3341                         cmn_err(CE_WARN, "mr_sas %d: %s(): RESET FAILED; return failure!!!", instance->instance, __func__);
3342                         return (DDI_FAILURE);
3343                 }
3344         }
3345         
3346         con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3347         con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3348             "Calling mfi_state_transition_to_ready"));
3349         
3350         abs_state = instance->func_ptr->read_fw_status_reg(instance);
3351         retry = 0;
3352         while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3353                 delay(100 * drv_usectohz(MILLISEC));
3354                 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3355         }
3356         if (abs_state <= MFI_STATE_FW_INIT) {
3357                 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT" 
3358                     "state = 0x%x, RETRY RESET.\n", abs_state);
3359                 goto retry_reset;
3360         }
3361 
3362         /* Mark HBA as bad, if FW is fault after 3 continuous resets */
3363         if (mfi_state_transition_to_ready(instance) ||
3364             debug_tbolt_fw_faults_after_ocr_g == 1) {
3365                 cur_abs_reg_val =
3366                     instance->func_ptr->read_fw_status_reg(instance);
3367                 fw_state        = cur_abs_reg_val & MFI_STATE_MASK;
3368 
3369                 con_log(CL_ANN1, (CE_NOTE,
3370                     "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3371                     "FW state = 0x%x", fw_state));
3372                 if (debug_tbolt_fw_faults_after_ocr_g == 1)
3373                         fw_state = MFI_STATE_FAULT;
3374 
3375                 con_log(CL_ANN, (CE_NOTE,  "mrsas_tbolt_reset_ppc : FW is not ready "
3376                     "FW state = 0x%x", fw_state));
3377 
3378                 if (fw_state == MFI_STATE_FAULT) {
3379                         // increment the count
3380                         instance->fw_fault_count_after_ocr++;
3381                         if (instance->fw_fault_count_after_ocr
3382                             < MAX_FW_RESET_COUNT) {
3383                                 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3384                                     "FW is in fault after OCR count %d "
3385                                         "Retry Reset",
3386                                     instance->fw_fault_count_after_ocr);
3387                                 goto retry_reset;
3388 
3389                         } else {
3390                                 cmn_err(CE_WARN, "mrsas %d: %s:"
3391                                         "Max Reset Count exceeded >%d"
3392                                     "Mark HBA as bad, KILL adapter",
3393                                         instance->instance, __func__, MAX_FW_RESET_COUNT);
3394 
3395                                 mrsas_tbolt_kill_adapter(instance);
3396                                 return (DDI_FAILURE);
3397                         }
3398                 }
3399         }
3400         
3401         // reset the counter as FW is up after OCR
3402         instance->fw_fault_count_after_ocr = 0;
3403         
3404         mrsas_reset_reply_desc(instance);
3405 
3406         
3407         con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3408             "Calling mrsas_issue_init_mpi2"));
3409         abs_state = mrsas_issue_init_mpi2(instance);
3410         if(abs_state == DDI_FAILURE) {
3411         cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3412             "INIT failed Retrying Reset");
3413                 goto retry_reset;       
3414         }
3415         con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3416             "mrsas_issue_init_mpi2 Done"));
3417 
3418         con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3419             "Calling mrsas_print_pending_cmd\n"));
3420         mrsas_print_pending_cmds(instance);
3421         con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3422             "mrsas_print_pending_cmd done\n"));
3423 
3424         instance->func_ptr->enable_intr(instance);
3425         instance->fw_outstanding = 0;
3426 
3427         con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3428             "Calling mrsas_issue_pending_cmds"));
3429         mrsas_issue_pending_cmds(instance);
3430         con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3431         "issue_pending_cmds done.\n"));
3432 
3433         con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3434             "Calling aen registration"));
3435 
3436         instance->aen_cmd->retry_count_for_ocr = 0;
3437         instance->aen_cmd->drv_pkt_time = 0;
3438 
3439         instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3440 
3441         con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n"));
3442         mutex_enter(&instance->ocr_flags_mtx);
3443         instance->adapterresetinprogress = 0;
3444         mutex_exit(&instance->ocr_flags_mtx);
3445         con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3446             "adpterresetinprogress flag unset"));
3447 
3448         con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done\n"));
3449         return (DDI_SUCCESS);
3450 
3451 }
3452 
3453 
3454 /*
3455  * mrsas_sync_map_info -        Returns FW's ld_map structure
3456  * @instance:                           Adapter soft state
3457  *
3458  * Issues an internal command (DCMD) to get the FW's controller PD
3459  * list structure.  This information is mainly used to find out SYSTEM
3460  * supported by the FW.
3461  */
3462 
3463 int
3464 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3465 {
3466         int                     ret = 0, i;
3467         struct mrsas_cmd        *cmd = NULL;
3468         struct mrsas_dcmd_frame *dcmd;
3469         uint32_t size_sync_info, num_lds;
3470         LD_TARGET_SYNC *ci = NULL;
3471         MR_FW_RAID_MAP_ALL *map;
3472         MR_LD_RAID  *raid;
3473         LD_TARGET_SYNC *ld_sync;
3474         uint32_t ci_h = 0;
3475         uint32_t size_map_info;
3476 
3477         cmd = get_raid_msg_pkt(instance);
3478         
3479         if (cmd == NULL) {
3480                 cmn_err(CE_WARN,
3481                     "Failed to get a cmd from free-pool in mrsas_tbolt_sync_map_info(). ");
3482                 return (DDI_FAILURE);
3483         }
3484                 
3485         /* Clear the frame buffer and assign back the context id */
3486         (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3487         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3488                         cmd->index);
3489         bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3490 
3491         
3492         map = instance->ld_map[instance->map_id & 1];
3493 
3494         num_lds = map->raidMap.ldCount;
3495 
3496         dcmd = &cmd->frame->dcmd;
3497 
3498         size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3499 
3500         con_log(CL_ANN, (CE_NOTE,
3501                     "size_sync_info =0x%x ; ld count = 0x%x \n ",
3502                     size_sync_info, num_lds));
3503 
3504         ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3505 
3506         memset(ci, 0, sizeof(MR_FW_RAID_MAP_ALL));
3507         ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3508 
3509         (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3510 
3511         ld_sync = (LD_TARGET_SYNC *)ci;
3512 
3513         for (i = 0; i < num_lds; i++, ld_sync++) {
3514                 raid = MR_LdRaidGet(i, map);
3515 
3516                 con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x\n",
3517                     i, raid->seqNum, raid->flags.ldSyncRequired));
3518 
3519                 ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3520 
3521                 con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x \n",
3522                     i, ld_sync->ldTargetId));
3523 
3524                 ld_sync->seqNum = raid->seqNum;
3525         }
3526 
3527 
3528         size_map_info = sizeof(MR_FW_RAID_MAP) +
3529                 (sizeof(MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3530 
3531         dcmd->cmd = MFI_CMD_OP_DCMD;
3532         dcmd->cmd_status = 0xFF;
3533         dcmd->sge_count = 1;
3534         dcmd->flags = MFI_FRAME_DIR_WRITE;
3535         dcmd->timeout = 0;
3536         dcmd->pad_0 = 0;
3537         dcmd->data_xfer_len = size_map_info;
3538         dcmd->mbox.b[0] = num_lds;
3539         dcmd->mbox.b[1] = 1; /* Pend */
3540         dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3541         dcmd->sgl.sge32[0].phys_addr = ci_h;
3542         dcmd->sgl.sge32[0].length = size_map_info;
3543 
3544 
3545         instance->map_update_cmd = cmd;
3546         mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3547 
3548         instance->func_ptr->issue_cmd(cmd, instance);
3549         
3550         instance->unroll.syncCmd = 1;
3551         con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x",cmd->SMID));
3552 
3553         return (ret);
3554 }
3555 
3556 /*
3557  * abort_syncmap_cmd
3558  */
3559 int
3560 abort_syncmap_cmd(struct mrsas_instance *instance,
3561     struct mrsas_cmd *cmd_to_abort)
3562 {
3563         int     ret = 0;
3564 
3565         struct mrsas_cmd                *cmd;
3566         struct mrsas_abort_frame        *abort_fr;
3567 
3568         con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3569 
3570         cmd = get_raid_msg_mfi_pkt(instance);
3571 
3572         if (!cmd) {
3573                 cmn_err(CE_WARN,
3574                     "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3575                 return (DDI_FAILURE);
3576         }
3577         /* Clear the frame buffer and assign back the context id */
3578         (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3579         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3580             cmd->index);
3581 
3582         abort_fr = &cmd->frame->abort;
3583 
3584         /* prepare and issue the abort frame */
3585         ddi_put8(cmd->frame_dma_obj.acc_handle,
3586             &abort_fr->cmd, MFI_CMD_OP_ABORT);
3587         ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3588             MFI_CMD_STATUS_SYNC_MODE);
3589         ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3590         ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3591             cmd_to_abort->index);
3592         ddi_put32(cmd->frame_dma_obj.acc_handle,
3593             &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3594         ddi_put32(cmd->frame_dma_obj.acc_handle,
3595             &abort_fr->abort_mfi_phys_addr_hi, 0);
3596 
3597         cmd->frame_count = 1;
3598 
3599         mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3600 
3601         if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3602                 con_log(CL_ANN1, (CE_WARN,
3603                     "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3604                 ret = -1;
3605         } else {
3606                 ret = 0;
3607         }
3608 
3609         return_raid_msg_mfi_pkt(instance, cmd);
3610 
3611         atomic_add_16(&instance->fw_outstanding, (-1));
3612 
3613         return (ret);
3614 }
3615 
3616 
3617 #ifdef PDSUPPORT
3618 int
3619 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3620     uint8_t lun, dev_info_t **ldip)
3621 {
3622         struct scsi_device *sd;
3623         dev_info_t *child;
3624         int rval, dtype;
3625         struct mrsas_tbolt_pd_info *pds = NULL;
3626         uint64_t *wwn;
3627 
3628 
3629         con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3630             tgt, lun));
3631 
3632         if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3633                 if (ldip) {
3634                         *ldip = child;
3635                 }
3636                 if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3637                         rval = mrsas_service_evt(instance, tgt, 1,
3638                                 MRSAS_EVT_UNCONFIG_TGT, NULL);
3639                         con_log(CL_ANN1, (CE_WARN,
3640                                 "mr_sas:DELETING STALE ENTRY  rval = %d "
3641                                 "tgt id = %d ", rval, tgt));
3642                         return (NDI_FAILURE);
3643                 }
3644                 return (NDI_SUCCESS);
3645         }
3646 
3647         pds = (struct mrsas_tbolt_pd_info *)
3648             kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3649         mrsas_tbolt_get_pd_info(instance, pds, tgt);
3650         dtype = pds->scsiDevType;
3651 
3652         /* Check for Disk*/
3653         if ((dtype == DTYPE_DIRECT)) {
3654                 if ((dtype == DTYPE_DIRECT) &&
3655                     (LE_16(pds->fwState) != PD_SYSTEM)) {
3656                         return (NDI_FAILURE);
3657                 }
3658                 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3659                 sd->sd_address.a_hba_tran = instance->tran;
3660                 sd->sd_address.a_target = (uint16_t)tgt;
3661                 sd->sd_address.a_lun = (uint8_t)lun;
3662 
3663                 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3664                         rval = mrsas_config_scsi_device(instance, sd, ldip);
3665                         con_log(CL_DLEVEL1, (CE_NOTE,
3666                             "Phys. device found: tgt %d dtype %d: %s",
3667                             tgt, dtype, sd->sd_inq->inq_vid));
3668                 } else {
3669                         rval = NDI_FAILURE;
3670                         con_log(CL_DLEVEL1, (CE_NOTE,
3671                             "Phys. device Not found scsi_hba_probe Failed: tgt %d dtype %d: %s",
3672                             tgt, dtype, sd->sd_inq->inq_vid));
3673                 }
3674 
3675                 /* sd_unprobe is blank now. Free buffer manually */
3676                 if (sd->sd_inq) {
3677                         kmem_free(sd->sd_inq, SUN_INQSIZE);
3678                         sd->sd_inq = (struct scsi_inquiry *)NULL;
3679                 }
3680                 kmem_free(sd, sizeof (struct scsi_device));
3681                 rval = NDI_SUCCESS;
3682         } else {
3683                 con_log(CL_ANN1, (CE_NOTE,
3684                     "Device not supported: tgt %d lun %d dtype %d",
3685                     tgt, lun, dtype));
3686                 rval = NDI_FAILURE;
3687         }
3688 
3689         kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3690         con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3691             rval));
3692         return (rval);
3693 }
3694 static void
3695 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance, struct mrsas_tbolt_pd_info *pds,
3696     int tgt)
3697 {
3698         struct mrsas_cmd        *cmd;
3699         struct mrsas_dcmd_frame *dcmd;
3700         dma_obj_t               dcmd_dma_obj;
3701 
3702         cmd = get_raid_msg_pkt(instance);
3703 
3704         if (!cmd) {
3705             con_log(CL_ANN1, (CE_WARN, "Failed to get a cmd for get pd info"));
3706             return;
3707         }
3708 
3709         /* Clear the frame buffer and assign back the context id */
3710         memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3711         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3712             cmd->index);
3713 
3714 
3715         dcmd = &cmd->frame->dcmd;
3716         dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3717         dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3718         dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3719         dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3720         dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3721         dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3722 
3723         (void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3724             DDI_STRUCTURE_LE_ACC);
3725         (void) memset(dcmd_dma_obj.buffer, 0, sizeof (struct mrsas_tbolt_pd_info));
3726         (void) memset(dcmd->mbox.b, 0, 12);
3727         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3728         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3729         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3730         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, MFI_FRAME_DIR_READ);
3731         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3732         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3733             sizeof (struct mrsas_tbolt_pd_info));
3734         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3735             MR_DCMD_PD_GET_INFO);
3736         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3737         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3738             sizeof (struct mrsas_tbolt_pd_info));
3739         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3740             dcmd_dma_obj.dma_cookie[0].dmac_address);
3741 
3742         cmd->sync_cmd = MRSAS_TRUE;
3743         cmd->frame_count = 1;
3744 
3745         if (instance->tbolt) {
3746                 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3747         }
3748 
3749         instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3750         
3751         ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds,
3752             (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info),
3753             DDI_DEV_AUTOINCR);
3754         (void) mrsas_free_dma_obj(instance, dcmd_dma_obj);
3755         return_raid_msg_pkt(instance, cmd);
3756 }
3757 #endif