1 /*
   2  * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
   3  * i.e. Thunderbolt and Invader
   4  *
   5  * Solaris MegaRAID device driver for SAS2.0 controllers
   6  * Copyright (c) 2008-2012, LSI Logic Corporation.
   7  * All rights reserved.
   8  *
   9  * Version:
  10  * Author:
  11  *              Swaminathan K S
  12  *              Arun Chandrashekhar
  13  *              Manju R
  14  *              Rasheed
  15  *              Shakeel Bukhari
  16  */
  17 
  18 
  19 #include <sys/types.h>
  20 #include <sys/file.h>
  21 #include <sys/atomic.h>
  22 #include <sys/scsi/scsi.h>
  23 #include <sys/byteorder.h>
  24 #include "ld_pd_map.h"
  25 #include "mr_sas.h"
  26 #include "fusion.h"
  27 
  28 /*
  29  * FMA header files
  30  */
  31 #include <sys/ddifm.h>
  32 #include <sys/fm/protocol.h>
  33 #include <sys/fm/util.h>
  34 #include <sys/fm/io/ddi.h>
  35 
  36 
  37 /* Pre-TB command size and TB command size. */
  38 #define MR_COMMAND_SIZE (64*20) /* 1280 bytes */
  39 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
  40 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
  41 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
  42 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *);
  43 extern ddi_dma_attr_t mrsas_generic_dma_attr;
  44 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
  45 extern struct ddi_device_acc_attr endian_attr;
  46 extern int      debug_level_g;
  47 extern unsigned int     enable_fp;
  48 volatile int dump_io_wait_time = 90;
  49 extern void
  50 io_timeout_checker(void *arg);
  51 extern volatile int  debug_timeout_g;
  52 extern int      mrsas_issue_pending_cmds(struct mrsas_instance *);
  53 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
  54 extern void     push_pending_mfi_pkt(struct mrsas_instance *,
  55                         struct mrsas_cmd *);
  56 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
  57             MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
  58 
  59 /* Local static prototypes. */
  60 static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *,
  61     struct scsi_address *, struct scsi_pkt *, uchar_t *);
  62 static void mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr,
  63     U64 start_blk, U32 num_blocks);
  64 static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
  65 static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
  66 static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
  67 static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *);
  68 #ifdef PDSUPPORT
  69 static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
  70     struct mrsas_tbolt_pd_info *, int);
  71 #endif /* PDSUPPORT */
  72 
  73 static int debug_tbolt_fw_faults_after_ocr_g = 0;
  74 
  75 /*
  76  * destroy_mfi_mpi_frame_pool
  77  */
  78 void
  79 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
  80 {
  81         int     i;
  82 
  83         struct mrsas_cmd        *cmd;
  84 
  85         /* return all mfi frames to pool */
  86         for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
  87                 cmd = instance->cmd_list[i];
  88                 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) {
  89                         (void) mrsas_free_dma_obj(instance,
  90                             cmd->frame_dma_obj);
  91                 }
  92                 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
  93         }
  94 }
  95 
  96 /*
  97  * destroy_mpi2_frame_pool
  98  */
  99 void
 100 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
 101 {
 102 
 103         if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
 104                 (void) mrsas_free_dma_obj(instance,
 105                     instance->mpi2_frame_pool_dma_obj);
 106                 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
 107         }
 108 }
 109 
 110 
 111 /*
 112  * mrsas_tbolt_free_additional_dma_buffer
 113  */
 114 void
 115 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
 116 {
 117         int i;
 118 
 119         if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
 120                 (void) mrsas_free_dma_obj(instance,
 121                     instance->mfi_internal_dma_obj);
 122                 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
 123         }
 124         if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
 125                 (void) mrsas_free_dma_obj(instance,
 126                     instance->mfi_evt_detail_obj);
 127                 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
 128         }
 129 
 130         for (i = 0; i < 2; i++) {
 131                 if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
 132                         (void) mrsas_free_dma_obj(instance,
 133                             instance->ld_map_obj[i]);
 134                         instance->ld_map_obj[i].status = DMA_OBJ_FREED;
 135                 }
 136         }
 137 }
 138 
 139 
 140 /*
 141  * free_req_desc_pool
 142  */
 143 void
 144 free_req_rep_desc_pool(struct mrsas_instance *instance)
 145 {
 146         if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
 147                 (void) mrsas_free_dma_obj(instance,
 148                     instance->request_desc_dma_obj);
 149                 instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
 150         }
 151 
 152         if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
 153                 (void) mrsas_free_dma_obj(instance,
 154                     instance->reply_desc_dma_obj);
 155                 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
 156         }
 157 
 158 
 159 }
 160 
 161 
 162 /*
 163  * ThunderBolt(TB) Request Message Frame Pool
 164  */
 165 int
 166 create_mpi2_frame_pool(struct mrsas_instance *instance)
 167 {
 168         int             i = 0;
 169         uint16_t        max_cmd;
 170         uint32_t        sgl_sz;
 171         uint32_t        raid_msg_size;
 172         uint32_t        total_size;
 173         uint32_t        offset;
 174         uint32_t        io_req_base_phys;
 175         uint8_t         *io_req_base;
 176         struct mrsas_cmd        *cmd;
 177 
 178         max_cmd = instance->max_fw_cmds;
 179 
 180         sgl_sz          = 1024;
 181         raid_msg_size   = MRSAS_THUNDERBOLT_MSG_SIZE;
 182 
 183         /* Allocating additional 256 bytes to accomodate SMID 0. */
 184         total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
 185             (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
 186 
 187         con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
 188             "max_cmd %x", max_cmd));
 189 
 190         con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
 191             "request message frame pool size %x", total_size));
 192 
 193         /*
 194          * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
 195          * and then split the memory to 1024 commands. Each command should be
 196          * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
 197          * within it. Further refer the "alloc_req_rep_desc" function where
 198          * we allocate request/reply descriptors queues for a clue.
 199          */
 200 
 201         instance->mpi2_frame_pool_dma_obj.size = total_size;
 202         instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
 203         instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
 204             0xFFFFFFFFU;
 205         instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
 206             0xFFFFFFFFU;
 207         instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
 208         instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
 209 
 210         if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
 211             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
 212                 cmn_err(CE_WARN,
 213                     "mr_sas: could not alloc mpi2 frame pool");
 214                 return (DDI_FAILURE);
 215         }
 216 
 217         bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
 218         instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
 219 
 220         instance->io_request_frames =
 221             (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
 222         instance->io_request_frames_phy =
 223             (uint32_t)
 224             instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
 225 
 226         con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p",
 227             (void *)instance->io_request_frames));
 228 
 229         con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x",
 230             instance->io_request_frames_phy));
 231 
 232         io_req_base = (uint8_t *)instance->io_request_frames +
 233             MRSAS_THUNDERBOLT_MSG_SIZE;
 234         io_req_base_phys = instance->io_request_frames_phy +
 235             MRSAS_THUNDERBOLT_MSG_SIZE;
 236 
 237         con_log(CL_DLEVEL3, (CE_NOTE,
 238             "io req_base_phys 0x%x", io_req_base_phys));
 239 
 240         for (i = 0; i < max_cmd; i++) {
 241                 cmd = instance->cmd_list[i];
 242 
 243                 offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
 244 
 245                 cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
 246                     ((uint8_t *)io_req_base + offset);
 247                 cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
 248 
 249                 cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base +
 250                     (max_cmd * raid_msg_size) + i * sgl_sz);
 251 
 252                 cmd->sgl_phys_addr = (io_req_base_phys +
 253                     (max_cmd * raid_msg_size) + i * sgl_sz);
 254 
 255                 cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base +
 256                     (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
 257                     (i * SENSE_LENGTH));
 258 
 259                 cmd->sense_phys_addr1 = (io_req_base_phys +
 260                     (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
 261                     (i * SENSE_LENGTH));
 262 
 263 
 264                 cmd->SMID = i + 1;
 265 
 266                 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p",
 267                     cmd->index, (void *)cmd->scsi_io_request));
 268 
 269                 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x",
 270                     cmd->index, cmd->scsi_io_request_phys_addr));
 271 
 272                 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p",
 273                     cmd->index, (void *)cmd->sense1));
 274 
 275                 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x",
 276                     cmd->index, cmd->sense_phys_addr1));
 277 
 278                 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p",
 279                     cmd->index, (void *)cmd->sgl));
 280 
 281                 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x",
 282                     cmd->index, cmd->sgl_phys_addr));
 283         }
 284 
 285         return (DDI_SUCCESS);
 286 
 287 }
 288 
 289 
 290 /*
 291  * alloc_additional_dma_buffer for AEN
 292  */
 293 int
 294 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
 295 {
 296         uint32_t        internal_buf_size = PAGESIZE*2;
 297         int i;
 298 
 299         /* Initialize buffer status as free */
 300         instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
 301         instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
 302         instance->ld_map_obj[0].status = DMA_OBJ_FREED;
 303         instance->ld_map_obj[1].status = DMA_OBJ_FREED;
 304 
 305 
 306         instance->mfi_internal_dma_obj.size = internal_buf_size;
 307         instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
 308         instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
 309         instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
 310             0xFFFFFFFFU;
 311         instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
 312 
 313         if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
 314             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
 315                 cmn_err(CE_WARN,
 316                     "mr_sas: could not alloc reply queue");
 317                 return (DDI_FAILURE);
 318         }
 319 
 320         bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
 321 
 322         instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
 323         instance->internal_buf =
 324             (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer));
 325         instance->internal_buf_dmac_add =
 326             instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
 327         instance->internal_buf_size = internal_buf_size;
 328 
 329         /* allocate evt_detail */
 330         instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
 331         instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
 332         instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
 333         instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
 334         instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
 335         instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
 336 
 337         if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
 338             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
 339                 cmn_err(CE_WARN, "mrsas_tbolt_alloc_additional_dma_buffer: "
 340                     "could not allocate data transfer buffer.");
 341                 goto fail_tbolt_additional_buff;
 342         }
 343 
 344         bzero(instance->mfi_evt_detail_obj.buffer,
 345             sizeof (struct mrsas_evt_detail));
 346 
 347         instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
 348 
 349         instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
 350             (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
 351 
 352         for (i = 0; i < 2; i++) {
 353                 /* allocate the data transfer buffer */
 354                 instance->ld_map_obj[i].size = instance->size_map_info;
 355                 instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
 356                 instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
 357                 instance->ld_map_obj[i].dma_attr.dma_attr_count_max =
 358                     0xFFFFFFFFU;
 359                 instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
 360                 instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
 361 
 362                 if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
 363                     (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
 364                         cmn_err(CE_WARN,
 365                             "could not allocate data transfer buffer.");
 366                         goto fail_tbolt_additional_buff;
 367                 }
 368 
 369                 instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
 370 
 371                 bzero(instance->ld_map_obj[i].buffer, instance->size_map_info);
 372 
 373                 instance->ld_map[i] =
 374                     (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
 375                 instance->ld_map_phy[i] = (uint32_t)instance->
 376                     ld_map_obj[i].dma_cookie[0].dmac_address;
 377 
 378                 con_log(CL_DLEVEL3, (CE_NOTE,
 379                     "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
 380 
 381                 con_log(CL_DLEVEL3, (CE_NOTE,
 382                     "size_map_info 0x%x", instance->size_map_info));
 383         }
 384 
 385         return (DDI_SUCCESS);
 386 
 387 fail_tbolt_additional_buff:
 388         mrsas_tbolt_free_additional_dma_buffer(instance);
 389 
 390         return (DDI_FAILURE);
 391 }
 392 
 393 MRSAS_REQUEST_DESCRIPTOR_UNION *
 394 mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index)
 395 {
 396         MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
 397 
 398         if (index > instance->max_fw_cmds) {
 399                 con_log(CL_ANN1, (CE_NOTE,
 400                     "Invalid SMID 0x%x request for descriptor", index));
 401                 con_log(CL_ANN1, (CE_NOTE,
 402                     "max_fw_cmds : 0x%x", instance->max_fw_cmds));
 403                 return (NULL);
 404         }
 405 
 406         req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
 407             ((char *)instance->request_message_pool +
 408             (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
 409 
 410         con_log(CL_ANN1, (CE_NOTE,
 411             "request descriptor : 0x%08lx", (unsigned long)req_desc));
 412 
 413         con_log(CL_ANN1, (CE_NOTE,
 414             "request descriptor base phy : 0x%08lx",
 415             (unsigned long)instance->request_message_pool_phy));
 416 
 417         return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
 418 }
 419 
 420 
 421 /*
 422  * Allocate Request and Reply  Queue Descriptors.
 423  */
 424 int
 425 alloc_req_rep_desc(struct mrsas_instance *instance)
 426 {
 427         uint32_t        request_q_sz, reply_q_sz;
 428         int             i, max_reply_q_sz;
 429         MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
 430 
 431         /*
 432          * ThunderBolt(TB) There's no longer producer consumer mechanism.
 433          * Once we have an interrupt we are supposed to scan through the list of
 434          * reply descriptors and process them accordingly. We would be needing
 435          * to allocate memory for 1024 reply descriptors
 436          */
 437 
 438         /* Allocate Reply Descriptors */
 439         con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
 440             (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
 441 
 442         /* reply queue size should be multiple of 16 */
 443         max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
 444 
 445         reply_q_sz = 8 * max_reply_q_sz;
 446 
 447 
 448         con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
 449             (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
 450 
 451         instance->reply_desc_dma_obj.size = reply_q_sz;
 452         instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
 453         instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
 454         instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
 455         instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
 456         instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
 457 
 458         if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
 459             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
 460                 cmn_err(CE_WARN,
 461                     "mr_sas: could not alloc reply queue");
 462                 return (DDI_FAILURE);
 463         }
 464 
 465         bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
 466         instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
 467 
 468         /* virtual address of  reply queue */
 469         instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
 470             instance->reply_desc_dma_obj.buffer);
 471 
 472         instance->reply_q_depth = max_reply_q_sz;
 473 
 474         con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
 475             instance->reply_q_depth));
 476 
 477         con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
 478             (void *)instance->reply_frame_pool));
 479 
 480         /* initializing reply address to 0xFFFFFFFF */
 481         reply_desc = instance->reply_frame_pool;
 482 
 483         for (i = 0; i < instance->reply_q_depth; i++) {
 484                 reply_desc->Words = (uint64_t)~0;
 485                 reply_desc++;
 486         }
 487 
 488 
 489         instance->reply_frame_pool_phy =
 490             (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
 491 
 492         con_log(CL_ANN1, (CE_NOTE,
 493             "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
 494 
 495 
 496         instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
 497             reply_q_sz);
 498 
 499         con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
 500             instance->reply_pool_limit_phy));
 501 
 502 
 503         con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
 504             (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
 505 
 506         /* Allocate Request Descriptors */
 507         con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
 508             (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
 509 
 510         request_q_sz = 8 *
 511             (instance->max_fw_cmds);
 512 
 513         instance->request_desc_dma_obj.size = request_q_sz;
 514         instance->request_desc_dma_obj.dma_attr      = mrsas_generic_dma_attr;
 515         instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
 516         instance->request_desc_dma_obj.dma_attr.dma_attr_count_max =
 517             0xFFFFFFFFU;
 518         instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen      = 1;
 519         instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
 520 
 521         if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
 522             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
 523                 cmn_err(CE_WARN,
 524                     "mr_sas: could not alloc request queue desc");
 525                 goto fail_undo_reply_queue;
 526         }
 527 
 528         bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
 529         instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
 530 
 531         /* virtual address of  request queue desc */
 532         instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
 533             (instance->request_desc_dma_obj.buffer);
 534 
 535         instance->request_message_pool_phy =
 536             (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
 537 
 538         return (DDI_SUCCESS);
 539 
 540 fail_undo_reply_queue:
 541         if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
 542                 (void) mrsas_free_dma_obj(instance,
 543                     instance->reply_desc_dma_obj);
 544                 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
 545         }
 546 
 547         return (DDI_FAILURE);
 548 }
 549 
 550 /*
 551  * mrsas_alloc_cmd_pool_tbolt
 552  *
 553  * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
 554  * routine
 555  */
 556 int
 557 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
 558 {
 559         int             i;
 560         int             count;
 561         uint32_t        max_cmd;
 562         uint32_t        reserve_cmd;
 563         size_t          sz;
 564 
 565         struct mrsas_cmd        *cmd;
 566 
 567         max_cmd = instance->max_fw_cmds;
 568         con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
 569             "max_cmd %x", max_cmd));
 570 
 571 
 572         sz = sizeof (struct mrsas_cmd *) * max_cmd;
 573 
 574         /*
 575          * instance->cmd_list is an array of struct mrsas_cmd pointers.
 576          * Allocate the dynamic array first and then allocate individual
 577          * commands.
 578          */
 579         instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
 580 
 581         /* create a frame pool and assign one frame to each cmd */
 582         for (count = 0; count < max_cmd; count++) {
 583                 instance->cmd_list[count] =
 584                     kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
 585         }
 586 
 587         /* add all the commands to command pool */
 588 
 589         INIT_LIST_HEAD(&instance->cmd_pool_list);
 590         INIT_LIST_HEAD(&instance->cmd_pend_list);
 591         INIT_LIST_HEAD(&instance->cmd_app_pool_list);
 592 
 593         reserve_cmd = MRSAS_APP_RESERVED_CMDS;
 594 
 595         /* cmd index 0 reservered for IOC INIT */
 596         for (i = 1; i < reserve_cmd; i++) {
 597                 cmd             = instance->cmd_list[i];
 598                 cmd->index   = i;
 599                 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
 600         }
 601 
 602 
 603         for (i = reserve_cmd; i < max_cmd; i++) {
 604                 cmd             = instance->cmd_list[i];
 605                 cmd->index   = i;
 606                 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
 607         }
 608 
 609         return (DDI_SUCCESS);
 610 
 611 mrsas_undo_cmds:
 612         if (count > 0) {
 613                 /* free each cmd */
 614                 for (i = 0; i < count; i++) {
 615                         if (instance->cmd_list[i] != NULL) {
 616                                 kmem_free(instance->cmd_list[i],
 617                                     sizeof (struct mrsas_cmd));
 618                         }
 619                         instance->cmd_list[i] = NULL;
 620                 }
 621         }
 622 
 623 mrsas_undo_cmd_list:
 624         if (instance->cmd_list != NULL)
 625                 kmem_free(instance->cmd_list, sz);
 626         instance->cmd_list = NULL;
 627 
 628         return (DDI_FAILURE);
 629 }
 630 
 631 
 632 /*
 633  * free_space_for_mpi2
 634  */
 635 void
 636 free_space_for_mpi2(struct mrsas_instance *instance)
 637 {
 638         /* already freed */
 639         if (instance->cmd_list == NULL) {
 640                 return;
 641         }
 642 
 643         /* First free the additional DMA buffer */
 644         mrsas_tbolt_free_additional_dma_buffer(instance);
 645 
 646         /* Free the request/reply descriptor pool */
 647         free_req_rep_desc_pool(instance);
 648 
 649         /*  Free the MPI message pool */
 650         destroy_mpi2_frame_pool(instance);
 651 
 652         /* Free the MFI frame pool */
 653         destroy_mfi_frame_pool(instance);
 654 
 655         /* Free all the commands in the cmd_list */
 656         /* Free the cmd_list buffer itself */
 657         mrsas_free_cmd_pool(instance);
 658 }
 659 
 660 
 661 /*
 662  * ThunderBolt(TB) memory allocations for commands/messages/frames.
 663  */
 664 int
 665 alloc_space_for_mpi2(struct mrsas_instance *instance)
 666 {
 667         /* Allocate command pool (memory for cmd_list & individual commands) */
 668         if (mrsas_alloc_cmd_pool_tbolt(instance)) {
 669                 cmn_err(CE_WARN, "Error creating cmd pool");
 670                 return (DDI_FAILURE);
 671         }
 672 
 673         /* Initialize single reply size and Message size */
 674         instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
 675         instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
 676 
 677         instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
 678             (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
 679             sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
 680         instance->max_sge_in_chain = (MR_COMMAND_SIZE -
 681             MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
 682 
 683         /* Reduce SG count by 1 to take care of group cmds feature in FW */
 684         instance->max_num_sge = (instance->max_sge_in_main_msg +
 685             instance->max_sge_in_chain - 2);
 686         instance->chain_offset_mpt_msg =
 687             offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
 688         instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE -
 689             sizeof (MPI2_SGE_IO_UNION)) / 16;
 690         instance->reply_read_index = 0;
 691 
 692 
 693         /* Allocate Request and Reply descriptors Array */
 694         /* Make sure the buffer is aligned to 8 for req/rep  descriptor Pool */
 695         if (alloc_req_rep_desc(instance)) {
 696                 cmn_err(CE_WARN,
 697                     "Error, allocating memory for descripter-pool");
 698                 goto mpi2_undo_cmd_pool;
 699         }
 700         con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x",
 701             instance->request_message_pool_phy));
 702 
 703 
 704         /* Allocate MFI Frame pool - for MPI-MFI passthru commands */
 705         if (create_mfi_frame_pool(instance)) {
 706                 cmn_err(CE_WARN,
 707                     "Error, allocating memory for MFI frame-pool");
 708                 goto mpi2_undo_descripter_pool;
 709         }
 710 
 711 
 712         /* Allocate MPI2 Message pool */
 713         /*
 714          * Make sure the buffer is alligned to 256 for raid message packet
 715          * create a io request pool and assign one frame to each cmd
 716          */
 717 
 718         if (create_mpi2_frame_pool(instance)) {
 719                 cmn_err(CE_WARN,
 720                     "Error, allocating memory for MPI2 Message-pool");
 721                 goto mpi2_undo_mfi_frame_pool;
 722         }
 723 
 724 #ifdef DEBUG
 725         con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x",
 726             instance->max_sge_in_main_msg));
 727         con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x",
 728             instance->max_sge_in_chain));
 729         con_log(CL_ANN1, (CE_CONT,
 730             "[max_sge]0x%x", instance->max_num_sge));
 731         con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x",
 732             instance->chain_offset_mpt_msg));
 733         con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x",
 734             instance->chain_offset_io_req));
 735 #endif
 736 
 737 
 738         /* Allocate additional dma buffer */
 739         if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) {
 740                 cmn_err(CE_WARN,
 741                     "Error, allocating tbolt additional DMA buffer");
 742                 goto mpi2_undo_message_pool;
 743         }
 744 
 745         return (DDI_SUCCESS);
 746 
 747 mpi2_undo_message_pool:
 748         destroy_mpi2_frame_pool(instance);
 749 
 750 mpi2_undo_mfi_frame_pool:
 751         destroy_mfi_frame_pool(instance);
 752 
 753 mpi2_undo_descripter_pool:
 754         free_req_rep_desc_pool(instance);
 755 
 756 mpi2_undo_cmd_pool:
 757         mrsas_free_cmd_pool(instance);
 758 
 759         return (DDI_FAILURE);
 760 }
 761 
 762 
 763 /*
 764  * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
 765  */
 766 int
 767 mrsas_init_adapter_tbolt(struct mrsas_instance *instance)
 768 {
 769 
 770         /*
 771          * Reduce the max supported cmds by 1. This is to ensure that the
 772          * reply_q_sz (1 more than the max cmd that driver may send)
 773          * does not exceed max cmds that the FW can support
 774          */
 775 
 776         if (instance->max_fw_cmds > 1008) {
 777                 instance->max_fw_cmds = 1008;
 778                 instance->max_fw_cmds = instance->max_fw_cmds-1;
 779         }
 780 
 781         con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
 782             " instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
 783 
 784 
 785         /* create a pool of commands */
 786         if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
 787                 cmn_err(CE_WARN,
 788                     " alloc_space_for_mpi2() failed.");
 789 
 790                 return (DDI_FAILURE);
 791         }
 792 
 793         /* Send ioc init message */
 794         /* NOTE: the issue_init call does FMA checking already. */
 795         if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
 796                 cmn_err(CE_WARN,
 797                     " mrsas_issue_init_mpi2() failed.");
 798 
 799                 goto fail_init_fusion;
 800         }
 801 
 802         instance->unroll.alloc_space_mpi2 = 1;
 803 
 804         con_log(CL_ANN, (CE_NOTE,
 805             "mrsas_init_adapter_tbolt: SUCCESSFUL"));
 806 
 807         return (DDI_SUCCESS);
 808 
 809 fail_init_fusion:
 810         free_space_for_mpi2(instance);
 811 
 812         return (DDI_FAILURE);
 813 }
 814 
 815 
 816 
 817 /*
 818  * init_mpi2
 819  */
 820 int
 821 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
 822 {
 823         dma_obj_t init2_dma_obj;
 824         int ret_val = DDI_SUCCESS;
 825 
 826         /* allocate DMA buffer for IOC INIT message */
 827         init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
 828         init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
 829         init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
 830         init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
 831         init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
 832         init2_dma_obj.dma_attr.dma_attr_align = 256;
 833 
 834         if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
 835             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
 836                 cmn_err(CE_WARN, "mr_sas_issue_init_mpi2 "
 837                     "could not allocate data transfer buffer.");
 838                 return (DDI_FAILURE);
 839         }
 840         (void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t));
 841 
 842         con_log(CL_ANN1, (CE_NOTE,
 843             "mrsas_issue_init_mpi2 _phys adr: %x",
 844             init2_dma_obj.dma_cookie[0].dmac_address));
 845 
 846 
 847         /* Initialize and send ioc init message */
 848         ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj);
 849         if (ret_val == DDI_FAILURE) {
 850                 con_log(CL_ANN1, (CE_WARN,
 851                     "mrsas_issue_init_mpi2: Failed"));
 852                 goto fail_init_mpi2;
 853         }
 854 
 855         /* free IOC init DMA buffer */
 856         if (mrsas_free_dma_obj(instance, init2_dma_obj)
 857             != DDI_SUCCESS) {
 858                 con_log(CL_ANN1, (CE_WARN,
 859                     "mrsas_issue_init_mpi2: Free Failed"));
 860                 return (DDI_FAILURE);
 861         }
 862 
 863         /* Get/Check and sync ld_map info */
 864         instance->map_id = 0;
 865         if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS)
 866                 (void) mrsas_tbolt_sync_map_info(instance);
 867 
 868 
 869         /* No mrsas_cmd to send, so send NULL. */
 870         if (mrsas_common_check(instance, NULL) != DDI_SUCCESS)
 871                 goto fail_init_mpi2;
 872 
 873         con_log(CL_ANN, (CE_NOTE,
 874             "mrsas_issue_init_mpi2: SUCCESSFUL"));
 875 
 876         return (DDI_SUCCESS);
 877 
 878 fail_init_mpi2:
 879         (void) mrsas_free_dma_obj(instance, init2_dma_obj);
 880 
 881         return (DDI_FAILURE);
 882 }
 883 
 884 static int
 885 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj)
 886 {
 887         int                             numbytes;
 888         uint16_t                        flags;
 889         struct mrsas_init_frame2        *mfiFrameInit2;
 890         struct mrsas_header             *frame_hdr;
 891         Mpi2IOCInitRequest_t            *init;
 892         struct mrsas_cmd                *cmd = NULL;
 893         struct mrsas_drv_ver            drv_ver_info;
 894         MRSAS_REQUEST_DESCRIPTOR_UNION  *req_desc;
 895 
 896         con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
 897 
 898 
 899 #ifdef DEBUG
 900         con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
 901             (int)sizeof (*mfiFrameInit2)));
 902         con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
 903         con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
 904             (int)sizeof (struct mrsas_init_frame2)));
 905         con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
 906             (int)sizeof (Mpi2IOCInitRequest_t)));
 907 #endif
 908 
 909         init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
 910         numbytes = sizeof (*init);
 911         bzero(init, numbytes);
 912 
 913         ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
 914             MPI2_FUNCTION_IOC_INIT);
 915 
 916         ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
 917             MPI2_WHOINIT_HOST_DRIVER);
 918 
 919         /* set MsgVersion and HeaderVersion host driver was built with */
 920         ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
 921             MPI2_VERSION);
 922 
 923         ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
 924             MPI2_HEADER_VERSION);
 925 
 926         ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
 927             instance->raid_io_msg_size / 4);
 928 
 929         ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth,
 930             0);
 931 
 932         ddi_put16(mpi2_dma_obj->acc_handle,
 933             &init->ReplyDescriptorPostQueueDepth,
 934             instance->reply_q_depth);
 935         /*
 936          * These addresses are set using the DMA cookie addresses from when the
 937          * memory was allocated.  Sense buffer hi address should be 0.
 938          * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
 939          */
 940 
 941         ddi_put32(mpi2_dma_obj->acc_handle,
 942             &init->SenseBufferAddressHigh, 0);
 943 
 944         ddi_put64(mpi2_dma_obj->acc_handle,
 945             (uint64_t *)&init->SystemRequestFrameBaseAddress,
 946             instance->io_request_frames_phy);
 947 
 948         ddi_put64(mpi2_dma_obj->acc_handle,
 949             &init->ReplyDescriptorPostQueueAddress,
 950             instance->reply_frame_pool_phy);
 951 
 952         ddi_put64(mpi2_dma_obj->acc_handle,
 953             &init->ReplyFreeQueueAddress, 0);
 954 
 955         cmd = instance->cmd_list[0];
 956         if (cmd == NULL) {
 957                 return (DDI_FAILURE);
 958         }
 959         cmd->retry_count_for_ocr = 0;
 960         cmd->pkt = NULL;
 961         cmd->drv_pkt_time = 0;
 962 
 963         mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
 964         con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2));
 965 
 966         frame_hdr = &cmd->frame->hdr;
 967 
 968         ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
 969             MFI_CMD_STATUS_POLL_MODE);
 970 
 971         flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
 972 
 973         flags   |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
 974 
 975         ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
 976 
 977         con_log(CL_ANN, (CE_CONT,
 978             "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
 979 
 980         /* Init the MFI Header */
 981         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
 982             &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
 983 
 984         con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
 985 
 986         ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
 987             &mfiFrameInit2->cmd_status,
 988             MFI_STAT_INVALID_STATUS);
 989 
 990         con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
 991 
 992         ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
 993             &mfiFrameInit2->queue_info_new_phys_addr_lo,
 994             mpi2_dma_obj->dma_cookie[0].dmac_address);
 995 
 996         ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
 997             &mfiFrameInit2->data_xfer_len,
 998             sizeof (Mpi2IOCInitRequest_t));
 999 
1000         con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1001             (int)init->ReplyDescriptorPostQueueAddress));
1002 
1003         /* fill driver version information */
1004         fill_up_drv_ver(&drv_ver_info);
1005 
1006         /* allocate the driver version data transfer buffer */
1007         instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
1008         instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1009         instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1010         instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1011         instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1012         instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1013 
1014         if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1015             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1016                 cmn_err(CE_WARN,
1017                     "fusion init: Could not allocate driver version buffer.");
1018                 return (DDI_FAILURE);
1019         }
1020         /* copy driver version to dma buffer */
1021         bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver));
1022         ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1023             (uint8_t *)drv_ver_info.drv_ver,
1024             (uint8_t *)instance->drv_ver_dma_obj.buffer,
1025             sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1026 
1027         /* send driver version physical address to firmware */
1028         ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion,
1029             instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1030 
1031         con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1032             mfiFrameInit2->queue_info_new_phys_addr_lo,
1033             (int)sizeof (Mpi2IOCInitRequest_t)));
1034 
1035         con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1036 
1037         con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1038             cmd->scsi_io_request_phys_addr,
1039             (int)sizeof (struct mrsas_init_frame2)));
1040 
1041         /* disable interrupts before sending INIT2 frame */
1042         instance->func_ptr->disable_intr(instance);
1043 
1044         req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
1045             instance->request_message_pool;
1046         req_desc->Words = cmd->scsi_io_request_phys_addr;
1047         req_desc->MFAIo.RequestFlags =
1048             (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1049 
1050         cmd->request_desc = req_desc;
1051 
1052         /* issue the init frame */
1053         instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1054 
1055         con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1056         con_log(CL_ANN1, (CE_CONT, "[cmd  Status= %x] ",
1057             frame_hdr->cmd_status));
1058 
1059         if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1060             &mfiFrameInit2->cmd_status) == 0) {
1061                 con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1062         } else {
1063                 con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1064                 mrsas_dump_reply_desc(instance);
1065                 goto fail_ioc_init;
1066         }
1067 
1068         mrsas_dump_reply_desc(instance);
1069 
1070         instance->unroll.verBuff = 1;
1071 
1072         con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL"));
1073 
1074         return (DDI_SUCCESS);
1075 
1076 
1077 fail_ioc_init:
1078 
1079         (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1080 
1081         return (DDI_FAILURE);
1082 }
1083 
1084 int
1085 wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1086 {
1087         int i;
1088         uint32_t wait_time = dump_io_wait_time;
1089         for (i = 0; i < wait_time; i++) {
1090                 /*
1091                  * Check For Outstanding poll Commands
1092                  * except ldsync command and aen command
1093                  */
1094                 if (instance->fw_outstanding <= 2) {
1095                         break;
1096                 }
1097                 drv_usecwait(10*MILLISEC);
1098                 /* complete commands from reply queue */
1099                 (void) mr_sas_tbolt_process_outstanding_cmd(instance);
1100         }
1101         if (instance->fw_outstanding > 2) {
1102                 return (1);
1103         }
1104         return (0);
1105 }
1106 /*
1107  * scsi_pkt handling
1108  *
1109  * Visible to the external world via the transport structure.
1110  */
1111 
1112 int
1113 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1114 {
1115         struct mrsas_instance   *instance = ADDR2MR(ap);
1116         struct scsa_cmd         *acmd = PKT2CMD(pkt);
1117         struct mrsas_cmd        *cmd = NULL;
1118         uchar_t                 cmd_done = 0;
1119 
1120         con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1121         if (instance->deadadapter == 1) {
1122                 cmn_err(CE_WARN,
1123                     "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1124                     "for IO, as the HBA doesnt take any more IOs");
1125                 if (pkt) {
1126                         pkt->pkt_reason              = CMD_DEV_GONE;
1127                         pkt->pkt_statistics  = STAT_DISCON;
1128                 }
1129                 return (TRAN_FATAL_ERROR);
1130         }
1131         if (instance->adapterresetinprogress) {
1132                 con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1133                     "returning mfi_pkt and setting TRAN_BUSY\n"));
1134                 return (TRAN_BUSY);
1135         }
1136         (void) mrsas_tbolt_prepare_pkt(acmd);
1137 
1138         cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1139 
1140         /*
1141          * Check if the command is already completed by the mrsas_build_cmd()
1142          * routine. In which case the busy_flag would be clear and scb will be
1143          * NULL and appropriate reason provided in pkt_reason field
1144          */
1145         if (cmd_done) {
1146                 pkt->pkt_reason = CMD_CMPLT;
1147                 pkt->pkt_scbp[0] = STATUS_GOOD;
1148                 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1149                     | STATE_SENT_CMD;
1150                 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1151                         (*pkt->pkt_comp)(pkt);
1152                 }
1153 
1154                 return (TRAN_ACCEPT);
1155         }
1156 
1157         if (cmd == NULL) {
1158                 return (TRAN_BUSY);
1159         }
1160 
1161 
1162         if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1163                 if (instance->fw_outstanding > instance->max_fw_cmds) {
1164                         cmn_err(CE_WARN,
1165                             "Command Queue Full... Returning BUSY");
1166                         return_raid_msg_pkt(instance, cmd);
1167                         return (TRAN_BUSY);
1168                 }
1169 
1170                 /* Synchronize the Cmd frame for the controller */
1171                 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1172                     DDI_DMA_SYNC_FORDEV);
1173 
1174                 con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1175                     "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0],
1176                     cmd->index, cmd->SMID));
1177 
1178                 instance->func_ptr->issue_cmd(cmd, instance);
1179         } else {
1180                 instance->func_ptr->issue_cmd(cmd, instance);
1181                 (void) wait_for_outstanding_poll_io(instance);
1182                 (void) mrsas_common_check(instance, cmd);
1183         }
1184 
1185         return (TRAN_ACCEPT);
1186 }
1187 
1188 /*
1189  * prepare the pkt:
1190  * the pkt may have been resubmitted or just reused so
1191  * initialize some fields and do some checks.
1192  */
1193 static int
1194 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1195 {
1196         struct scsi_pkt *pkt = CMD2PKT(acmd);
1197 
1198 
1199         /*
1200          * Reinitialize some fields that need it; the packet may
1201          * have been resubmitted
1202          */
1203         pkt->pkt_reason = CMD_CMPLT;
1204         pkt->pkt_state = 0;
1205         pkt->pkt_statistics = 0;
1206         pkt->pkt_resid = 0;
1207 
1208         /*
1209          * zero status byte.
1210          */
1211         *(pkt->pkt_scbp) = 0;
1212 
1213         return (0);
1214 }
1215 
1216 
1217 int
1218 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1219     struct scsa_cmd *acmd,
1220     struct mrsas_cmd *cmd,
1221     Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1222     uint32_t *datalen)
1223 {
1224         uint32_t                MaxSGEs;
1225         int                     sg_to_process;
1226         uint32_t                i, j;
1227         uint32_t                numElements, endElement;
1228         Mpi25IeeeSgeChain64_t   *ieeeChainElement = NULL;
1229         Mpi25IeeeSgeChain64_t   *scsi_raid_io_sgl_ieee = NULL;
1230         ddi_acc_handle_t acc_handle =
1231             instance->mpi2_frame_pool_dma_obj.acc_handle;
1232 
1233         con_log(CL_ANN1, (CE_NOTE,
1234             "chkpnt: Building Chained SGL :%d", __LINE__));
1235 
1236         /* Calulate SGE size in number of Words(32bit) */
1237         /* Clear the datalen before updating it. */
1238         *datalen = 0;
1239 
1240         MaxSGEs = instance->max_sge_in_main_msg;
1241 
1242         ddi_put16(acc_handle, &scsi_raid_io->SGLFlags,
1243             MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1244 
1245         /* set data transfer flag. */
1246         if (acmd->cmd_flags & CFLAG_DMASEND) {
1247                 ddi_put32(acc_handle, &scsi_raid_io->Control,
1248                     MPI2_SCSIIO_CONTROL_WRITE);
1249         } else {
1250                 ddi_put32(acc_handle, &scsi_raid_io->Control,
1251                     MPI2_SCSIIO_CONTROL_READ);
1252         }
1253 
1254 
1255         numElements = acmd->cmd_cookiecnt;
1256 
1257         con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1258 
1259         if (numElements > instance->max_num_sge) {
1260                 con_log(CL_ANN, (CE_NOTE,
1261                     "[Max SGE Count Exceeded]:%x", numElements));
1262                 return (numElements);
1263         }
1264 
1265         ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE,
1266             (uint8_t)numElements);
1267 
1268         /* set end element in main message frame */
1269         endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1270 
1271         /* prepare the scatter-gather list for the firmware */
1272         scsi_raid_io_sgl_ieee =
1273             (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1274 
1275         if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1276                 Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1277                 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1278 
1279                 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
1280         }
1281 
1282         for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1283                 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1284                     acmd->cmd_dmacookies[i].dmac_laddress);
1285 
1286                 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1287                     acmd->cmd_dmacookies[i].dmac_size);
1288 
1289                 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1290 
1291                 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1292                         if (i == (numElements - 1)) {
1293                                 ddi_put8(acc_handle,
1294                                     &scsi_raid_io_sgl_ieee->Flags,
1295                                     IEEE_SGE_FLAGS_END_OF_LIST);
1296                         }
1297                 }
1298 
1299                 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1300 
1301 #ifdef DEBUG
1302                 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1303                     scsi_raid_io_sgl_ieee->Address));
1304                 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1305                     scsi_raid_io_sgl_ieee->Length));
1306                 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1307                     scsi_raid_io_sgl_ieee->Flags));
1308 #endif
1309 
1310         }
1311 
1312         ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0);
1313 
1314         /* check if chained SGL required */
1315         if (i < numElements) {
1316 
1317                 con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1318 
1319                 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1320                         uint16_t ioFlags =
1321                             ddi_get16(acc_handle, &scsi_raid_io->IoFlags);
1322 
1323                         if ((ioFlags &
1324                             MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1325                             MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
1326                                 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1327                                     (U8)instance->chain_offset_io_req);
1328                         } else {
1329                                 ddi_put8(acc_handle,
1330                                     &scsi_raid_io->ChainOffset, 0);
1331                         }
1332                 } else {
1333                         ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1334                             (U8)instance->chain_offset_io_req);
1335                 }
1336 
1337                 /* prepare physical chain element */
1338                 ieeeChainElement = scsi_raid_io_sgl_ieee;
1339 
1340                 ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0);
1341 
1342                 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1343                         ddi_put8(acc_handle, &ieeeChainElement->Flags,
1344                             IEEE_SGE_FLAGS_CHAIN_ELEMENT);
1345                 } else {
1346                         ddi_put8(acc_handle, &ieeeChainElement->Flags,
1347                             (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1348                             MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1349                 }
1350 
1351                 ddi_put32(acc_handle, &ieeeChainElement->Length,
1352                     (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1353 
1354                 ddi_put64(acc_handle, &ieeeChainElement->Address,
1355                     (U64)cmd->sgl_phys_addr);
1356 
1357                 sg_to_process = numElements - i;
1358 
1359                 con_log(CL_ANN1, (CE_NOTE,
1360                     "[Additional SGE Count]:%x", endElement));
1361 
1362                 /* point to the chained SGL buffer */
1363                 scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1364 
1365                 /* build rest of the SGL in chained buffer */
1366                 for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1367                         con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1368 
1369                         ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1370                             acmd->cmd_dmacookies[i].dmac_laddress);
1371 
1372                         ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1373                             acmd->cmd_dmacookies[i].dmac_size);
1374 
1375                         ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1376 
1377                         if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1378                                 if (i == (numElements - 1)) {
1379                                         ddi_put8(acc_handle,
1380                                             &scsi_raid_io_sgl_ieee->Flags,
1381                                             IEEE_SGE_FLAGS_END_OF_LIST);
1382                                 }
1383                         }
1384 
1385                         *datalen += acmd->cmd_dmacookies[i].dmac_size;
1386 
1387 #if DEBUG
1388                         con_log(CL_DLEVEL1, (CE_NOTE,
1389                             "[SGL Address]: %" PRIx64,
1390                             scsi_raid_io_sgl_ieee->Address));
1391                         con_log(CL_DLEVEL1, (CE_NOTE,
1392                             "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1393                         con_log(CL_DLEVEL1, (CE_NOTE,
1394                             "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1395 #endif
1396 
1397                         i++;
1398                 }
1399         }
1400 
1401         return (0);
1402 } /*end of BuildScatterGather */
1403 
1404 
1405 /*
1406  * build_cmd
1407  */
1408 static struct mrsas_cmd *
1409 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1410     struct scsi_pkt *pkt, uchar_t *cmd_done)
1411 {
1412         uint8_t         fp_possible = 0;
1413         uint32_t        index;
1414         uint32_t        lba_count = 0;
1415         uint32_t        start_lba_hi = 0;
1416         uint32_t        start_lba_lo = 0;
1417         ddi_acc_handle_t acc_handle =
1418             instance->mpi2_frame_pool_dma_obj.acc_handle;
1419         struct mrsas_cmd                *cmd = NULL;
1420         struct scsa_cmd                 *acmd = PKT2CMD(pkt);
1421         MRSAS_REQUEST_DESCRIPTOR_UNION  *ReqDescUnion;
1422         Mpi2RaidSCSIIORequest_t         *scsi_raid_io;
1423         uint32_t                        datalen;
1424         struct IO_REQUEST_INFO io_info;
1425         MR_FW_RAID_MAP_ALL *local_map_ptr;
1426         uint16_t pd_cmd_cdblen;
1427 
1428         con_log(CL_DLEVEL1, (CE_NOTE,
1429             "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1430 
1431         /* find out if this is logical or physical drive command.  */
1432         acmd->islogical = MRDRV_IS_LOGICAL(ap);
1433         acmd->device_id = MAP_DEVICE_ID(instance, ap);
1434 
1435         *cmd_done = 0;
1436 
1437         /* get the command packet */
1438         if (!(cmd = get_raid_msg_pkt(instance))) {
1439                 return (NULL);
1440         }
1441 
1442         index = cmd->index;
1443         ReqDescUnion =  mr_sas_get_request_descriptor(instance, index);
1444         ReqDescUnion->Words = 0;
1445         ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1446         ReqDescUnion->SCSIIO.RequestFlags =
1447             (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1448             MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1449 
1450 
1451         cmd->request_desc = ReqDescUnion;
1452         cmd->pkt = pkt;
1453         cmd->cmd = acmd;
1454 
1455         /* lets get the command directions */
1456         if (acmd->cmd_flags & CFLAG_DMASEND) {
1457                 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1458                         (void) ddi_dma_sync(acmd->cmd_dmahandle,
1459                             acmd->cmd_dma_offset, acmd->cmd_dma_len,
1460                             DDI_DMA_SYNC_FORDEV);
1461                 }
1462         } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1463                 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1464                         (void) ddi_dma_sync(acmd->cmd_dmahandle,
1465                             acmd->cmd_dma_offset, acmd->cmd_dma_len,
1466                             DDI_DMA_SYNC_FORCPU);
1467                 }
1468         } else {
1469                 con_log(CL_ANN, (CE_NOTE, "NO DMA"));
1470         }
1471 
1472 
1473         /* get SCSI_IO raid message frame pointer */
1474         scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1475 
1476         /* zero out SCSI_IO raid message frame */
1477         bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t));
1478 
1479         /* Set the ldTargetId set by BuildRaidContext() */
1480         ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId,
1481             acmd->device_id);
1482 
1483         /*  Copy CDB to scsi_io_request message frame */
1484         ddi_rep_put8(acc_handle,
1485             (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32,
1486             acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1487 
1488         /*
1489          * Just the CDB length, rest of the Flags are zero
1490          * This will be modified later.
1491          */
1492         ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen);
1493 
1494         pd_cmd_cdblen = acmd->cmd_cdblen;
1495 
1496         switch (pkt->pkt_cdbp[0]) {
1497         case SCMD_READ:
1498         case SCMD_WRITE:
1499         case SCMD_READ_G1:
1500         case SCMD_WRITE_G1:
1501         case SCMD_READ_G4:
1502         case SCMD_WRITE_G4:
1503         case SCMD_READ_G5:
1504         case SCMD_WRITE_G5:
1505 
1506                 if (acmd->islogical) {
1507                         /* Initialize sense Information */
1508                         if (cmd->sense1 == NULL) {
1509                                 con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: "
1510                                     "Sense buffer ptr NULL "));
1511                         }
1512                         bzero(cmd->sense1, SENSE_LENGTH);
1513                         con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd "
1514                             "CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1515 
1516                         if (acmd->cmd_cdblen == CDB_GROUP0) {
1517                                 /* 6-byte cdb */
1518                                 lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1519                                 start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) |
1520                                     ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1521                                     ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
1522                                     << 16));
1523                         } else if (acmd->cmd_cdblen == CDB_GROUP1) {
1524                                 /* 10-byte cdb */
1525                                 lba_count =
1526                                     (((uint16_t)(pkt->pkt_cdbp[8])) |
1527                                     ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1528 
1529                                 start_lba_lo =
1530                                     (((uint32_t)(pkt->pkt_cdbp[5])) |
1531                                     ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1532                                     ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1533                                     ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1534 
1535                         } else if (acmd->cmd_cdblen == CDB_GROUP5) {
1536                                 /* 12-byte cdb */
1537                                 lba_count = (
1538                                     ((uint32_t)(pkt->pkt_cdbp[9])) |
1539                                     ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1540                                     ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1541                                     ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1542 
1543                                 start_lba_lo =
1544                                     (((uint32_t)(pkt->pkt_cdbp[5])) |
1545                                     ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1546                                     ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1547                                     ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1548 
1549                         } else if (acmd->cmd_cdblen == CDB_GROUP4) {
1550                                 /* 16-byte cdb */
1551                                 lba_count = (
1552                                     ((uint32_t)(pkt->pkt_cdbp[13])) |
1553                                     ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1554                                     ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1555                                     ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1556 
1557                                 start_lba_lo = (
1558                                     ((uint32_t)(pkt->pkt_cdbp[9])) |
1559                                     ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1560                                     ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1561                                     ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1562 
1563                                 start_lba_hi = (
1564                                     ((uint32_t)(pkt->pkt_cdbp[5])) |
1565                                     ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1566                                     ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1567                                     ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1568                         }
1569 
1570                         if (instance->tbolt &&
1571                             ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) {
1572                                 cmn_err(CE_WARN, " IO SECTOR COUNT exceeds "
1573                                     "controller limit 0x%x sectors",
1574                                     lba_count);
1575                         }
1576 
1577                         bzero(&io_info, sizeof (struct IO_REQUEST_INFO));
1578                         io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) |
1579                             start_lba_lo;
1580                         io_info.numBlocks = lba_count;
1581                         io_info.ldTgtId = acmd->device_id;
1582 
1583                         if (acmd->cmd_flags & CFLAG_DMASEND)
1584                                 io_info.isRead = 0;
1585                         else
1586                                 io_info.isRead = 1;
1587 
1588 
1589                         /* Acquire SYNC MAP UPDATE lock */
1590                         mutex_enter(&instance->sync_map_mtx);
1591 
1592                         local_map_ptr =
1593                             instance->ld_map[(instance->map_id & 1)];
1594 
1595                         if ((MR_TargetIdToLdGet(
1596                             acmd->device_id, local_map_ptr) >=
1597                             MAX_LOGICAL_DRIVES) || !instance->fast_path_io) {
1598                                 cmn_err(CE_NOTE, "Fast Path NOT Possible, "
1599                                     "targetId >= MAX_LOGICAL_DRIVES || "
1600                                     "!instance->fast_path_io");
1601                                 fp_possible = 0;
1602                                 /* Set Regionlock flags to BYPASS */
1603                                 /* io_request->RaidContext.regLockFlags  = 0; */
1604                                 ddi_put8(acc_handle,
1605                                     &scsi_raid_io->RaidContext.regLockFlags, 0);
1606                         } else {
1607                                 if (MR_BuildRaidContext(instance, &io_info,
1608                                     &scsi_raid_io->RaidContext, local_map_ptr))
1609                                         fp_possible = io_info.fpOkForIo;
1610                         }
1611 
1612                         if (!enable_fp)
1613                                 fp_possible = 0;
1614 
1615                         con_log(CL_ANN1, (CE_NOTE, "enable_fp %d  "
1616                             "instance->fast_path_io %d fp_possible %d",
1617                             enable_fp, instance->fast_path_io, fp_possible));
1618 
1619                 if (fp_possible) {
1620 
1621                         /* Check for DIF enabled LD */
1622                         if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1623                                 /* Prepare 32 Byte CDB for DIF capable Disk */
1624                                 mrsas_tbolt_prepare_cdb(instance,
1625                                     scsi_raid_io->CDB.CDB32,
1626                                     &io_info, scsi_raid_io, start_lba_lo);
1627                         } else {
1628                                 mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1629                                     (uint8_t *)&pd_cmd_cdblen,
1630                                     io_info.pdBlock, io_info.numBlocks);
1631                                 ddi_put16(acc_handle,
1632                                     &scsi_raid_io->IoFlags, pd_cmd_cdblen);
1633                         }
1634 
1635                         ddi_put8(acc_handle, &scsi_raid_io->Function,
1636                             MPI2_FUNCTION_SCSI_IO_REQUEST);
1637 
1638                         ReqDescUnion->SCSIIO.RequestFlags =
1639                             (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1640                             MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1641 
1642                         if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1643                                 uint8_t regLockFlags = ddi_get8(acc_handle,
1644                                     &scsi_raid_io->RaidContext.regLockFlags);
1645                                 uint16_t IoFlags = ddi_get16(acc_handle,
1646                                     &scsi_raid_io->IoFlags);
1647 
1648                                 if (regLockFlags == REGION_TYPE_UNUSED)
1649                                         ReqDescUnion->SCSIIO.RequestFlags =
1650                                             (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1651                                             MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1652 
1653                                 IoFlags |=
1654                                     MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1655                                 regLockFlags |=
1656                                     (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1657                                     MR_RL_FLAGS_SEQ_NUM_ENABLE);
1658 
1659                                 ddi_put8(acc_handle,
1660                                     &scsi_raid_io->ChainOffset, 0);
1661                                 ddi_put8(acc_handle,
1662                                     &scsi_raid_io->RaidContext.nsegType,
1663                                     ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1664                                     MPI2_TYPE_CUDA));
1665                                 ddi_put8(acc_handle,
1666                                     &scsi_raid_io->RaidContext.regLockFlags,
1667                                     regLockFlags);
1668                                 ddi_put16(acc_handle,
1669                                     &scsi_raid_io->IoFlags, IoFlags);
1670                         }
1671 
1672                         if ((instance->load_balance_info[
1673                             acmd->device_id].loadBalanceFlag) &&
1674                             (io_info.isRead)) {
1675                                 io_info.devHandle =
1676                                     get_updated_dev_handle(&instance->
1677                                     load_balance_info[acmd->device_id],
1678                                     &io_info);
1679                                 cmd->load_balance_flag |=
1680                                     MEGASAS_LOAD_BALANCE_FLAG;
1681                         } else {
1682                                 cmd->load_balance_flag &=
1683                                     ~MEGASAS_LOAD_BALANCE_FLAG;
1684                         }
1685 
1686                         ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1687                         ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1688                             io_info.devHandle);
1689 
1690                 } else {
1691                         ddi_put8(acc_handle, &scsi_raid_io->Function,
1692                             MPI2_FUNCTION_LD_IO_REQUEST);
1693 
1694                         ddi_put16(acc_handle,
1695                             &scsi_raid_io->DevHandle, acmd->device_id);
1696 
1697                         ReqDescUnion->SCSIIO.RequestFlags =
1698                             (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1699                             MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1700 
1701                         ddi_put16(acc_handle,
1702                             &scsi_raid_io->RaidContext.timeoutValue,
1703                             local_map_ptr->raidMap.fpPdIoTimeoutSec);
1704 
1705                         if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1706                                 uint8_t regLockFlags = ddi_get8(acc_handle,
1707                                     &scsi_raid_io->RaidContext.regLockFlags);
1708 
1709                                 if (regLockFlags == REGION_TYPE_UNUSED) {
1710                                         ReqDescUnion->SCSIIO.RequestFlags =
1711                                             (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1712                                             MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1713                                 }
1714 
1715                                 regLockFlags |=
1716                                     (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1717                                     MR_RL_FLAGS_SEQ_NUM_ENABLE);
1718 
1719                                 ddi_put8(acc_handle,
1720                                     &scsi_raid_io->RaidContext.nsegType,
1721                                     ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1722                                     MPI2_TYPE_CUDA));
1723                                 ddi_put8(acc_handle,
1724                                     &scsi_raid_io->RaidContext.regLockFlags,
1725                                     regLockFlags);
1726                         }
1727                 } /* Not FP */
1728 
1729                 /* Release SYNC MAP UPDATE lock */
1730                 mutex_exit(&instance->sync_map_mtx);
1731 
1732 
1733                 /*
1734                  * Set sense buffer physical address/length in scsi_io_request.
1735                  */
1736                 ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
1737                     cmd->sense_phys_addr1);
1738                 ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength,
1739                     SENSE_LENGTH);
1740 
1741                 /* Construct SGL */
1742                 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1743                     offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1744 
1745                 (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1746                     scsi_raid_io, &datalen);
1747 
1748                 ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen);
1749 
1750                 break;
1751 #ifndef PDSUPPORT       /* if PDSUPPORT, skip break and fall through */
1752         } else {
1753                 break;
1754 #endif
1755         }
1756         /* fall through For all non-rd/wr cmds */
1757         default:
1758                 switch (pkt->pkt_cdbp[0]) {
1759                 case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
1760                         return_raid_msg_pkt(instance, cmd);
1761                         *cmd_done = 1;
1762                         return (NULL);
1763                 }
1764 
1765                 case SCMD_MODE_SENSE:
1766                 case SCMD_MODE_SENSE_G1: {
1767                         union scsi_cdb  *cdbp;
1768                         uint16_t        page_code;
1769 
1770                         cdbp = (void *)pkt->pkt_cdbp;
1771                         page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1772                         switch (page_code) {
1773                         case 0x3:
1774                         case 0x4:
1775                                 (void) mrsas_mode_sense_build(pkt);
1776                                 return_raid_msg_pkt(instance, cmd);
1777                                 *cmd_done = 1;
1778                                 return (NULL);
1779                         }
1780                         break;
1781                 }
1782 
1783                 default: {
1784                         /*
1785                          * Here we need to handle PASSTHRU for
1786                          * Logical Devices. Like Inquiry etc.
1787                          */
1788 
1789                         if (!(acmd->islogical)) {
1790 
1791                                 /* Acquire SYNC MAP UPDATE lock */
1792                                 mutex_enter(&instance->sync_map_mtx);
1793 
1794                                 local_map_ptr =
1795                                     instance->ld_map[(instance->map_id & 1)];
1796 
1797                                 ddi_put8(acc_handle, &scsi_raid_io->Function,
1798                                     MPI2_FUNCTION_SCSI_IO_REQUEST);
1799 
1800                                 ReqDescUnion->SCSIIO.RequestFlags =
1801                                     (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1802                                     MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1803 
1804                                 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1805                                     local_map_ptr->raidMap.
1806                                     devHndlInfo[acmd->device_id].curDevHdl);
1807 
1808 
1809                                 /* Set regLockFlasgs to REGION_TYPE_BYPASS */
1810                                 ddi_put8(acc_handle,
1811                                     &scsi_raid_io->RaidContext.regLockFlags, 0);
1812                                 ddi_put64(acc_handle,
1813                                     &scsi_raid_io->RaidContext.regLockRowLBA,
1814                                     0);
1815                                 ddi_put32(acc_handle,
1816                                     &scsi_raid_io->RaidContext.regLockLength,
1817                                     0);
1818                                 ddi_put8(acc_handle,
1819                                     &scsi_raid_io->RaidContext.RAIDFlags,
1820                                     MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
1821                                     MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1822                                 ddi_put16(acc_handle,
1823                                     &scsi_raid_io->RaidContext.timeoutValue,
1824                                     local_map_ptr->raidMap.fpPdIoTimeoutSec);
1825                                 ddi_put16(acc_handle,
1826                                     &scsi_raid_io->RaidContext.ldTargetId,
1827                                     acmd->device_id);
1828                                 ddi_put8(acc_handle,
1829                                     &scsi_raid_io->LUN[1], acmd->lun);
1830 
1831                                 /* Release SYNC MAP UPDATE lock */
1832                                 mutex_exit(&instance->sync_map_mtx);
1833 
1834                         } else {
1835                                 ddi_put8(acc_handle, &scsi_raid_io->Function,
1836                                     MPI2_FUNCTION_LD_IO_REQUEST);
1837                                 ddi_put8(acc_handle,
1838                                     &scsi_raid_io->LUN[1], acmd->lun);
1839                                 ddi_put16(acc_handle,
1840                                     &scsi_raid_io->DevHandle, acmd->device_id);
1841                                 ReqDescUnion->SCSIIO.RequestFlags =
1842                                     (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1843                                     MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1844                         }
1845 
1846                         /*
1847                          * Set sense buffer physical address/length in
1848                          * scsi_io_request.
1849                          */
1850                         ddi_put32(acc_handle,
1851                             &scsi_raid_io->SenseBufferLowAddress,
1852                             cmd->sense_phys_addr1);
1853                         ddi_put8(acc_handle,
1854                             &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1855 
1856                         /* Construct SGL */
1857                         ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1858                             offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1859 
1860                         (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1861                             scsi_raid_io, &datalen);
1862 
1863                         ddi_put32(acc_handle,
1864                             &scsi_raid_io->DataLength, datalen);
1865 
1866 
1867                         con_log(CL_ANN, (CE_CONT,
1868                             "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1869                             pkt->pkt_cdbp[0], acmd->device_id));
1870                         con_log(CL_DLEVEL1, (CE_CONT,
1871                             "data length = %x\n",
1872                             scsi_raid_io->DataLength));
1873                         con_log(CL_DLEVEL1, (CE_CONT,
1874                             "cdb length = %x\n",
1875                             acmd->cmd_cdblen));
1876                 }
1877                         break;
1878                 }
1879 
1880         }
1881 
1882         return (cmd);
1883 }
1884 
1885 /*
1886  * mrsas_tbolt_tran_init_pkt - allocate & initialize a scsi_pkt structure
1887  * @ap:
1888  * @pkt:
1889  * @bp:
1890  * @cmdlen:
1891  * @statuslen:
1892  * @tgtlen:
1893  * @flags:
1894  * @callback:
1895  *
1896  * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1897  * structure and DMA resources for a target driver request. The
1898  * tran_init_pkt() entry point is called when the target driver calls the
1899  * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1900  * is a request to perform one or more of three possible services:
1901  *  - allocation and initialization of a scsi_pkt structure
1902  *  - allocation of DMA resources for data transfer
1903  *  - reallocation of DMA resources for the next portion of the data transfer
1904  */
1905 struct scsi_pkt *
1906 mrsas_tbolt_tran_init_pkt(struct scsi_address *ap,
1907         register struct scsi_pkt *pkt,
1908         struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1909         int flags, int (*callback)(), caddr_t arg)
1910 {
1911         struct scsa_cmd *acmd;
1912         struct mrsas_instance   *instance;
1913         struct scsi_pkt *new_pkt;
1914 
1915         instance = ADDR2MR(ap);
1916 
1917         /* step #1 : pkt allocation */
1918         if (pkt == NULL) {
1919                 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1920                     tgtlen, sizeof (struct scsa_cmd), callback, arg);
1921                 if (pkt == NULL) {
1922                         return (NULL);
1923                 }
1924 
1925                 acmd = PKT2CMD(pkt);
1926 
1927                 /*
1928                  * Initialize the new pkt - we redundantly initialize
1929                  * all the fields for illustrative purposes.
1930                  */
1931                 acmd->cmd_pkt                = pkt;
1932                 acmd->cmd_flags              = 0;
1933                 acmd->cmd_scblen     = statuslen;
1934                 acmd->cmd_cdblen     = cmdlen;
1935                 acmd->cmd_dmahandle  = NULL;
1936                 acmd->cmd_ncookies   = 0;
1937                 acmd->cmd_cookie     = 0;
1938                 acmd->cmd_cookiecnt  = 0;
1939                 acmd->cmd_nwin               = 0;
1940 
1941                 pkt->pkt_address     = *ap;
1942                 pkt->pkt_comp                = (void (*)())NULL;
1943                 pkt->pkt_flags               = 0;
1944                 pkt->pkt_time                = 0;
1945                 pkt->pkt_resid               = 0;
1946                 pkt->pkt_state               = 0;
1947                 pkt->pkt_statistics  = 0;
1948                 pkt->pkt_reason              = 0;
1949                 new_pkt                 = pkt;
1950         } else {
1951                 acmd = PKT2CMD(pkt);
1952                 new_pkt = NULL;
1953         }
1954 
1955         /* step #2 : dma allocation/move */
1956         if (bp && bp->b_bcount != 0) {
1957                 if (acmd->cmd_dmahandle == NULL) {
1958                         if (mrsas_dma_alloc(instance, pkt, bp, flags,
1959                             callback) == DDI_FAILURE) {
1960                                 if (new_pkt) {
1961                                         scsi_hba_pkt_free(ap, new_pkt);
1962                                 }
1963                                 return ((struct scsi_pkt *)NULL);
1964                         }
1965                 } else {
1966                         if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) {
1967                                 return ((struct scsi_pkt *)NULL);
1968                         }
1969                 }
1970         }
1971         return (pkt);
1972 }
1973 
1974 
1975 uint32_t
1976 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1977 {
1978         return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1979 }
1980 
1981 void
1982 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1983 {
1984         MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1985         atomic_add_16(&instance->fw_outstanding, 1);
1986 
1987         struct scsi_pkt *pkt;
1988 
1989         con_log(CL_ANN1,
1990             (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1991 
1992         con_log(CL_DLEVEL1, (CE_CONT,
1993             " [req desc Words] %" PRIx64 " \n", req_desc->Words));
1994         con_log(CL_DLEVEL1, (CE_CONT,
1995             " [req desc low part] %x \n",
1996             (uint_t)(req_desc->Words & 0xffffffffff)));
1997         con_log(CL_DLEVEL1, (CE_CONT,
1998             " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
1999         pkt = cmd->pkt;
2000 
2001         if (pkt) {
2002                 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
2003                     "ISSUED CMD TO FW : called : cmd:"
2004                     ": %p instance : %p pkt : %p pkt_time : %x\n",
2005                     gethrtime(), (void *)cmd, (void *)instance,
2006                     (void *)pkt, cmd->drv_pkt_time));
2007                 if (instance->adapterresetinprogress) {
2008                         cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2009                         con_log(CL_ANN, (CE_NOTE,
2010                             "TBOLT Reset the scsi_pkt timer"));
2011                 } else {
2012                         push_pending_mfi_pkt(instance, cmd);
2013                 }
2014 
2015         } else {
2016                 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
2017                     "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
2018                     "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
2019         }
2020 
2021         /* Issue the command to the FW */
2022         mutex_enter(&instance->reg_write_mtx);
2023         WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2024         WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2025         mutex_exit(&instance->reg_write_mtx);
2026 }
2027 
2028 /*
2029  * issue_cmd_in_sync_mode
2030  */
2031 int
2032 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
2033     struct mrsas_cmd *cmd)
2034 {
2035         int             i;
2036         uint32_t        msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2037         MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2038 
2039         struct mrsas_header     *hdr;
2040         hdr = (struct mrsas_header *)&cmd->frame->hdr;
2041 
2042         con_log(CL_ANN,
2043             (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
2044             cmd->SMID));
2045 
2046 
2047         if (instance->adapterresetinprogress) {
2048                 cmd->drv_pkt_time = ddi_get16
2049                     (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2050                 if (cmd->drv_pkt_time < debug_timeout_g)
2051                         cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2052                 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
2053                     "RESET-IN-PROGRESS, issue cmd & return."));
2054 
2055                 mutex_enter(&instance->reg_write_mtx);
2056                 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2057                 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2058                 mutex_exit(&instance->reg_write_mtx);
2059 
2060                 return (DDI_SUCCESS);
2061         } else {
2062                 con_log(CL_ANN1, (CE_NOTE,
2063                     "tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
2064                 push_pending_mfi_pkt(instance, cmd);
2065         }
2066 
2067         con_log(CL_DLEVEL2, (CE_NOTE,
2068             "HighQport offset :%p",
2069             (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
2070         con_log(CL_DLEVEL2, (CE_NOTE,
2071             "LowQport offset :%p",
2072             (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
2073 
2074         cmd->sync_cmd = MRSAS_TRUE;
2075         cmd->cmd_status =  ENODATA;
2076 
2077 
2078         mutex_enter(&instance->reg_write_mtx);
2079         WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2080         WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2081         mutex_exit(&instance->reg_write_mtx);
2082 
2083         con_log(CL_ANN1, (CE_NOTE,
2084             " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2085         con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2086             (uint_t)(req_desc->Words & 0xffffffff)));
2087 
2088         mutex_enter(&instance->int_cmd_mtx);
2089         for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
2090                 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
2091         }
2092         mutex_exit(&instance->int_cmd_mtx);
2093 
2094 
2095         if (i < (msecs -1)) {
2096                 return (DDI_SUCCESS);
2097         } else {
2098                 return (DDI_FAILURE);
2099         }
2100 }
2101 
2102 /*
2103  * issue_cmd_in_poll_mode
2104  */
2105 int
2106 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2107     struct mrsas_cmd *cmd)
2108 {
2109         int             i;
2110         uint16_t        flags;
2111         uint32_t        msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2112         struct mrsas_header *frame_hdr;
2113 
2114         con_log(CL_ANN,
2115             (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
2116             cmd->SMID));
2117 
2118         MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2119 
2120         frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2121         ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2122             MFI_CMD_STATUS_POLL_MODE);
2123         flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2124         flags   |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2125         ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2126 
2127         con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2128             (uint_t)(req_desc->Words & 0xffffffff)));
2129         con_log(CL_ANN1, (CE_NOTE,
2130             " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2131 
2132         /* issue the frame using inbound queue port */
2133         mutex_enter(&instance->reg_write_mtx);
2134         WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2135         WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2136         mutex_exit(&instance->reg_write_mtx);
2137 
2138         for (i = 0; i < msecs && (
2139             ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2140             == MFI_CMD_STATUS_POLL_MODE); i++) {
2141                 /* wait for cmd_status to change from 0xFF */
2142                 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2143         }
2144 
2145         if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2146             &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2147                 con_log(CL_ANN1, (CE_NOTE,
2148                     " cmd failed %" PRIx64, (req_desc->Words)));
2149                 return (DDI_FAILURE);
2150         }
2151 
2152         return (DDI_SUCCESS);
2153 }
2154 
2155 void
2156 tbolt_enable_intr(struct mrsas_instance *instance)
2157 {
2158         /* TODO: For Thunderbolt/Invader also clear intr on enable */
2159         /* writel(~0, ®s->outbound_intr_status); */
2160         /* readl(®s->outbound_intr_status); */
2161 
2162         WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2163 
2164         /* dummy read to force PCI flush */
2165         (void) RD_OB_INTR_MASK(instance);
2166 
2167 }
2168 
2169 void
2170 tbolt_disable_intr(struct mrsas_instance *instance)
2171 {
2172         uint32_t mask = 0xFFFFFFFF;
2173 
2174         WR_OB_INTR_MASK(mask, instance);
2175 
2176         /* Dummy readl to force pci flush */
2177 
2178         (void) RD_OB_INTR_MASK(instance);
2179 }
2180 
2181 
2182 int
2183 tbolt_intr_ack(struct mrsas_instance *instance)
2184 {
2185         uint32_t        status;
2186 
2187         /* check if it is our interrupt */
2188         status = RD_OB_INTR_STATUS(instance);
2189         con_log(CL_ANN1, (CE_NOTE,
2190             "chkpnt: Entered tbolt_intr_ack status = %d", status));
2191 
2192         if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2193                 return (DDI_INTR_UNCLAIMED);
2194         }
2195 
2196         if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2197                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2198                 return (DDI_INTR_UNCLAIMED);
2199         }
2200 
2201         if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2202                 /* clear the interrupt by writing back the same value */
2203                 WR_OB_INTR_STATUS(status, instance);
2204                 /* dummy READ */
2205                 (void) RD_OB_INTR_STATUS(instance);
2206         }
2207         return (DDI_INTR_CLAIMED);
2208 }
2209 
2210 /*
2211  * get_raid_msg_pkt : Get a command from the free pool
2212  * After successful allocation, the caller of this routine
2213  * must clear the frame buffer (memset to zero) before
2214  * using the packet further.
2215  *
2216  * ***** Note *****
2217  * After clearing the frame buffer the context id of the
2218  * frame buffer SHOULD be restored back.
2219  */
2220 
2221 struct mrsas_cmd *
2222 get_raid_msg_pkt(struct mrsas_instance *instance)
2223 {
2224         mlist_t                 *head = &instance->cmd_pool_list;
2225         struct mrsas_cmd        *cmd = NULL;
2226 
2227         mutex_enter(&instance->cmd_pool_mtx);
2228         ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2229 
2230 
2231         if (!mlist_empty(head)) {
2232                 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2233                 mlist_del_init(head->next);
2234         }
2235         if (cmd != NULL) {
2236                 cmd->pkt = NULL;
2237                 cmd->retry_count_for_ocr = 0;
2238                 cmd->drv_pkt_time = 0;
2239         }
2240         mutex_exit(&instance->cmd_pool_mtx);
2241 
2242         if (cmd != NULL)
2243                 bzero(cmd->scsi_io_request,
2244                     sizeof (Mpi2RaidSCSIIORequest_t));
2245         return (cmd);
2246 }
2247 
2248 struct mrsas_cmd *
2249 get_raid_msg_mfi_pkt(struct mrsas_instance *instance)
2250 {
2251         mlist_t                 *head = &instance->cmd_app_pool_list;
2252         struct mrsas_cmd        *cmd = NULL;
2253 
2254         mutex_enter(&instance->cmd_app_pool_mtx);
2255         ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2256 
2257         if (!mlist_empty(head)) {
2258                 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2259                 mlist_del_init(head->next);
2260         }
2261         if (cmd != NULL) {
2262                 cmd->retry_count_for_ocr = 0;
2263                 cmd->drv_pkt_time = 0;
2264                 cmd->pkt = NULL;
2265                 cmd->request_desc = NULL;
2266 
2267         }
2268 
2269         mutex_exit(&instance->cmd_app_pool_mtx);
2270 
2271         if (cmd != NULL) {
2272                 bzero(cmd->scsi_io_request,
2273                     sizeof (Mpi2RaidSCSIIORequest_t));
2274         }
2275 
2276         return (cmd);
2277 }
2278 
2279 /*
2280  * return_raid_msg_pkt : Return a cmd to free command pool
2281  */
2282 void
2283 return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2284 {
2285         mutex_enter(&instance->cmd_pool_mtx);
2286         ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2287 
2288 
2289         mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2290 
2291         mutex_exit(&instance->cmd_pool_mtx);
2292 }
2293 
2294 void
2295 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2296 {
2297         mutex_enter(&instance->cmd_app_pool_mtx);
2298         ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2299 
2300         mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2301 
2302         mutex_exit(&instance->cmd_app_pool_mtx);
2303 }
2304 
2305 
2306 void
2307 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2308     struct mrsas_cmd *cmd)
2309 {
2310         Mpi2RaidSCSIIORequest_t         *scsi_raid_io;
2311         Mpi25IeeeSgeChain64_t           *scsi_raid_io_sgl_ieee;
2312         MRSAS_REQUEST_DESCRIPTOR_UNION  *ReqDescUnion;
2313         uint32_t                        index;
2314         ddi_acc_handle_t acc_handle =
2315             instance->mpi2_frame_pool_dma_obj.acc_handle;
2316 
2317         if (!instance->tbolt) {
2318                 con_log(CL_ANN, (CE_NOTE, "Not MFA enabled."));
2319                 return;
2320         }
2321 
2322         index = cmd->index;
2323 
2324         ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
2325 
2326         if (!ReqDescUnion) {
2327                 con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2328                 return;
2329         }
2330 
2331         con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2332 
2333         ReqDescUnion->Words = 0;
2334 
2335         ReqDescUnion->SCSIIO.RequestFlags =
2336             (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2337             MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2338 
2339         ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2340 
2341         cmd->request_desc = ReqDescUnion;
2342 
2343         /* get raid message frame pointer */
2344         scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2345 
2346         if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
2347                 Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)
2348                     &scsi_raid_io->SGL.IeeeChain;
2349                 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2350                 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
2351         }
2352 
2353         ddi_put8(acc_handle, &scsi_raid_io->Function,
2354             MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2355 
2356         ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
2357             offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2358 
2359         ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
2360             (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2361 
2362         ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
2363             cmd->sense_phys_addr1);
2364 
2365 
2366         scsi_raid_io_sgl_ieee =
2367             (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2368 
2369         ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
2370             (U64)cmd->frame_phys_addr);
2371 
2372         ddi_put8(acc_handle,
2373             &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2374             MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2375         /* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
2376         ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024);
2377 
2378         con_log(CL_ANN1, (CE_NOTE,
2379             "[MFI CMD PHY ADDRESS]:%" PRIx64,
2380             scsi_raid_io_sgl_ieee->Address));
2381         con_log(CL_ANN1, (CE_NOTE,
2382             "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2383         con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2384             scsi_raid_io_sgl_ieee->Flags));
2385 }
2386 
2387 
2388 void
2389 tbolt_complete_cmd(struct mrsas_instance *instance,
2390     struct mrsas_cmd *cmd)
2391 {
2392         uint8_t                         status;
2393         uint8_t                         extStatus;
2394         uint8_t                         arm;
2395         struct scsa_cmd                 *acmd;
2396         struct scsi_pkt                 *pkt;
2397         struct scsi_arq_status          *arqstat;
2398         Mpi2RaidSCSIIORequest_t         *scsi_raid_io;
2399         LD_LOAD_BALANCE_INFO            *lbinfo;
2400         ddi_acc_handle_t acc_handle =
2401             instance->mpi2_frame_pool_dma_obj.acc_handle;
2402 
2403         scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2404 
2405         status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status);
2406         extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus);
2407 
2408         con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2409         con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2410 
2411         if (status != MFI_STAT_OK) {
2412                 con_log(CL_ANN, (CE_WARN,
2413                     "IO Cmd Failed SMID %x", cmd->SMID));
2414         } else {
2415                 con_log(CL_ANN, (CE_NOTE,
2416                     "IO Cmd Success  SMID %x", cmd->SMID));
2417         }
2418 
2419         /* regular commands */
2420 
2421         switch (ddi_get8(acc_handle, &scsi_raid_io->Function)) {
2422 
2423         case MPI2_FUNCTION_SCSI_IO_REQUEST :  /* Fast Path IO. */
2424                 acmd =  (struct scsa_cmd *)cmd->cmd;
2425                 lbinfo = &instance->load_balance_info[acmd->device_id];
2426 
2427                 if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2428                         arm = lbinfo->raid1DevHandle[0] ==
2429                             scsi_raid_io->DevHandle ? 0 : 1;
2430 
2431                         lbinfo->scsi_pending_cmds[arm]--;
2432                         cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2433                 }
2434                 con_log(CL_DLEVEL3, (CE_NOTE,
2435                     "FastPath IO Completion Success "));
2436                 /* FALLTHRU */
2437 
2438         case MPI2_FUNCTION_LD_IO_REQUEST :   { /* Regular Path IO. */
2439                 acmd =  (struct scsa_cmd *)cmd->cmd;
2440                 pkt =   (struct scsi_pkt *)CMD2PKT(acmd);
2441 
2442                 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2443                         if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2444                                 (void) ddi_dma_sync(acmd->cmd_dmahandle,
2445                                     acmd->cmd_dma_offset, acmd->cmd_dma_len,
2446                                     DDI_DMA_SYNC_FORCPU);
2447                         }
2448                 }
2449 
2450                 pkt->pkt_reason              = CMD_CMPLT;
2451                 pkt->pkt_statistics  = 0;
2452                 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2453                     STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2454 
2455                 con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: "
2456                     "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0],
2457                     ((acmd->islogical) ? "LD" : "PD"),
2458                     acmd->cmd_dmacount, cmd->SMID, status));
2459 
2460                 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2461                         struct scsi_inquiry     *inq;
2462 
2463                         if (acmd->cmd_dmacount != 0) {
2464                                 bp_mapin(acmd->cmd_buf);
2465                                 inq = (struct scsi_inquiry *)
2466                                     acmd->cmd_buf->b_un.b_addr;
2467 
2468                                 /* don't expose physical drives to OS */
2469                                 if (acmd->islogical &&
2470                                     (status == MFI_STAT_OK)) {
2471                                         display_scsi_inquiry((caddr_t)inq);
2472 #ifdef PDSUPPORT
2473                                 } else if ((status == MFI_STAT_OK) &&
2474                                     inq->inq_dtype == DTYPE_DIRECT) {
2475                                         display_scsi_inquiry((caddr_t)inq);
2476 #endif
2477                                 } else {
2478                                         /* for physical disk */
2479                                         status = MFI_STAT_DEVICE_NOT_FOUND;
2480                                 }
2481                         }
2482                 }
2483 
2484                 switch (status) {
2485                 case MFI_STAT_OK:
2486                         pkt->pkt_scbp[0] = STATUS_GOOD;
2487                         break;
2488                 case MFI_STAT_LD_CC_IN_PROGRESS:
2489                 case MFI_STAT_LD_RECON_IN_PROGRESS:
2490                         pkt->pkt_scbp[0] = STATUS_GOOD;
2491                         break;
2492                 case MFI_STAT_LD_INIT_IN_PROGRESS:
2493                         pkt->pkt_reason      = CMD_TRAN_ERR;
2494                         break;
2495                 case MFI_STAT_SCSI_IO_FAILED:
2496                         cmn_err(CE_WARN, "tbolt_complete_cmd: scsi_io failed");
2497                         pkt->pkt_reason      = CMD_TRAN_ERR;
2498                         break;
2499                 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2500                         con_log(CL_ANN, (CE_WARN,
2501                             "tbolt_complete_cmd: scsi_done with error"));
2502 
2503                         pkt->pkt_reason      = CMD_CMPLT;
2504                         ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2505 
2506                         if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2507                                 con_log(CL_ANN,
2508                                     (CE_WARN, "TEST_UNIT_READY fail"));
2509                         } else {
2510                                 pkt->pkt_state |= STATE_ARQ_DONE;
2511                                 arqstat = (void *)(pkt->pkt_scbp);
2512                                 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2513                                 arqstat->sts_rqpkt_resid = 0;
2514                                 arqstat->sts_rqpkt_state |=
2515                                     STATE_GOT_BUS | STATE_GOT_TARGET
2516                                     | STATE_SENT_CMD
2517                                     | STATE_XFERRED_DATA;
2518                                 *(uint8_t *)&arqstat->sts_rqpkt_status =
2519                                     STATUS_GOOD;
2520                                 con_log(CL_ANN1,
2521                                     (CE_NOTE, "Copying Sense data %x",
2522                                     cmd->SMID));
2523 
2524                                 ddi_rep_get8(acc_handle,
2525                                     (uint8_t *)&(arqstat->sts_sensedata),
2526                                     cmd->sense1,
2527                                     sizeof (struct scsi_extended_sense),
2528                                     DDI_DEV_AUTOINCR);
2529 
2530                         }
2531                         break;
2532                 case MFI_STAT_LD_OFFLINE:
2533                         cmn_err(CE_WARN,
2534                             "tbolt_complete_cmd: ld offline "
2535                             "CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
2536                             /* UNDO: */
2537                             ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2538 
2539                             ddi_get16(acc_handle,
2540                             &scsi_raid_io->RaidContext.ldTargetId),
2541 
2542                             ddi_get16(acc_handle, &scsi_raid_io->DevHandle));
2543 
2544                         pkt->pkt_reason      = CMD_DEV_GONE;
2545                         pkt->pkt_statistics  = STAT_DISCON;
2546                         break;
2547                 case MFI_STAT_DEVICE_NOT_FOUND:
2548                         con_log(CL_ANN, (CE_CONT,
2549                             "tbolt_complete_cmd: device not found error"));
2550                         pkt->pkt_reason      = CMD_DEV_GONE;
2551                         pkt->pkt_statistics  = STAT_DISCON;
2552                         break;
2553 
2554                 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2555                         pkt->pkt_state |= STATE_ARQ_DONE;
2556                         pkt->pkt_reason      = CMD_CMPLT;
2557                         ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2558 
2559                         arqstat = (void *)(pkt->pkt_scbp);
2560                         arqstat->sts_rqpkt_reason = CMD_CMPLT;
2561                         arqstat->sts_rqpkt_resid = 0;
2562                         arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2563                             | STATE_GOT_TARGET | STATE_SENT_CMD
2564                             | STATE_XFERRED_DATA;
2565                         *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
2566 
2567                         arqstat->sts_sensedata.es_valid = 1;
2568                         arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST;
2569                         arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2570 
2571                         /*
2572                          * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2573                          * ASC: 0x21h; ASCQ: 0x00h;
2574                          */
2575                         arqstat->sts_sensedata.es_add_code = 0x21;
2576                         arqstat->sts_sensedata.es_qual_code = 0x00;
2577                         break;
2578                 case MFI_STAT_INVALID_CMD:
2579                 case MFI_STAT_INVALID_DCMD:
2580                 case MFI_STAT_INVALID_PARAMETER:
2581                 case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2582                 default:
2583                         cmn_err(CE_WARN, "tbolt_complete_cmd: Unknown status!");
2584                         pkt->pkt_reason      = CMD_TRAN_ERR;
2585 
2586                         break;
2587                 }
2588 
2589                 atomic_add_16(&instance->fw_outstanding, (-1));
2590 
2591                 (void) mrsas_common_check(instance, cmd);
2592                 if (acmd->cmd_dmahandle) {
2593                         if (mrsas_check_dma_handle(acmd->cmd_dmahandle) !=
2594                             DDI_SUCCESS) {
2595                                 ddi_fm_service_impact(instance->dip,
2596                                     DDI_SERVICE_UNAFFECTED);
2597                                 pkt->pkt_reason = CMD_TRAN_ERR;
2598                                 pkt->pkt_statistics = 0;
2599                         }
2600                 }
2601 
2602                 /* Call the callback routine */
2603                 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp)
2604                         (*pkt->pkt_comp)(pkt);
2605 
2606                 con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2607 
2608                 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0);
2609 
2610                 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0);
2611 
2612                 return_raid_msg_pkt(instance, cmd);
2613                 break;
2614         }
2615         case MPI2_FUNCTION_PASSTHRU_IO_REQUEST:  /* MFA command. */
2616 
2617                 if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO &&
2618                     cmd->frame->dcmd.mbox.b[1] == 1) {
2619 
2620                         mutex_enter(&instance->sync_map_mtx);
2621 
2622                         con_log(CL_ANN, (CE_NOTE,
2623                             "LDMAP sync command SMID RECEIVED 0x%X",
2624                             cmd->SMID));
2625                         if (cmd->frame->hdr.cmd_status != 0) {
2626                                 cmn_err(CE_WARN,
2627                                     "map sync failed, status = 0x%x.",
2628                                     cmd->frame->hdr.cmd_status);
2629                         } else {
2630                                 instance->map_id++;
2631                                 cmn_err(CE_NOTE,
2632                                     "map sync received, switched map_id to %"
2633                                     PRIu64 " \n", instance->map_id);
2634                         }
2635 
2636                         if (MR_ValidateMapInfo(instance->ld_map[
2637                             (instance->map_id & 1)],
2638                             instance->load_balance_info)) {
2639                                 instance->fast_path_io = 1;
2640                         } else {
2641                                 instance->fast_path_io = 0;
2642                         }
2643 
2644                         con_log(CL_ANN, (CE_NOTE,
2645                             "instance->fast_path_io %d",
2646                             instance->fast_path_io));
2647 
2648                         instance->unroll.syncCmd = 0;
2649 
2650                         if (instance->map_update_cmd == cmd) {
2651                                 return_raid_msg_pkt(instance, cmd);
2652                                 atomic_add_16(&instance->fw_outstanding, (-1));
2653                                 (void) mrsas_tbolt_sync_map_info(instance);
2654                         }
2655 
2656                         cmn_err(CE_NOTE, "LDMAP sync completed.");
2657                         mutex_exit(&instance->sync_map_mtx);
2658                         break;
2659                 }
2660 
2661                 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2662                         con_log(CL_ANN1, (CE_CONT,
2663                             "AEN command SMID RECEIVED 0x%X",
2664                             cmd->SMID));
2665                         if ((instance->aen_cmd == cmd) &&
2666                             (instance->aen_cmd->abort_aen)) {
2667                                 con_log(CL_ANN, (CE_WARN, "mrsas_softintr: "
2668                                     "aborted_aen returned"));
2669                         } else {
2670                                 atomic_add_16(&instance->fw_outstanding, (-1));
2671                                 service_mfi_aen(instance, cmd);
2672                         }
2673                 }
2674 
2675                 if (cmd->sync_cmd == MRSAS_TRUE) {
2676                         con_log(CL_ANN1, (CE_CONT,
2677                             "Sync-mode Command Response SMID RECEIVED 0x%X",
2678                             cmd->SMID));
2679 
2680                         tbolt_complete_cmd_in_sync_mode(instance, cmd);
2681                 } else {
2682                         con_log(CL_ANN, (CE_CONT,
2683                             "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2684                             cmd->SMID));
2685                 }
2686                 break;
2687         default:
2688                 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2689                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2690 
2691                 /* free message */
2692                 con_log(CL_ANN,
2693                     (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2694                 break;
2695         }
2696 }
2697 
2698 uint_t
2699 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2700 {
2701         uint8_t                         replyType;
2702         Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2703         Mpi2ReplyDescriptorsUnion_t     *desc;
2704         uint16_t                        smid;
2705         union desc_value                d_val;
2706         struct mrsas_cmd                *cmd;
2707 
2708         struct mrsas_header     *hdr;
2709         struct scsi_pkt         *pkt;
2710 
2711         (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2712             0, 0, DDI_DMA_SYNC_FORDEV);
2713 
2714         (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2715             0, 0, DDI_DMA_SYNC_FORCPU);
2716 
2717         desc = instance->reply_frame_pool;
2718         desc += instance->reply_read_index;
2719 
2720         replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2721         replyType = replyDesc->ReplyFlags &
2722             MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2723 
2724         if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2725                 return (DDI_INTR_UNCLAIMED);
2726 
2727         if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2728             != DDI_SUCCESS) {
2729                 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2730                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2731                 con_log(CL_ANN1,
2732                     (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
2733                     "FMA check, returning DDI_INTR_UNCLAIMED"));
2734                 return (DDI_INTR_CLAIMED);
2735         }
2736 
2737         con_log(CL_ANN1, (CE_NOTE, "Reply Desc  = %p  Words = %" PRIx64,
2738             (void *)desc, desc->Words));
2739 
2740         d_val.word = desc->Words;
2741 
2742 
2743         /* Read Reply descriptor */
2744         while ((d_val.u1.low != 0xffffffff) &&
2745             (d_val.u1.high != 0xffffffff)) {
2746 
2747                 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2748                     0, 0, DDI_DMA_SYNC_FORCPU);
2749 
2750                 smid = replyDesc->SMID;
2751 
2752                 if (!smid || smid > instance->max_fw_cmds + 1) {
2753                         con_log(CL_ANN1, (CE_NOTE,
2754                             "Reply Desc at Break  = %p  Words = %" PRIx64,
2755                             (void *)desc, desc->Words));
2756                         break;
2757                 }
2758 
2759                 cmd     = instance->cmd_list[smid - 1];
2760                 if (!cmd) {
2761                         con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_"
2762                             "outstanding_cmd: Invalid command "
2763                             " or Poll commad Received in completion path"));
2764                 } else {
2765                         mutex_enter(&instance->cmd_pend_mtx);
2766                         if (cmd->sync_cmd == MRSAS_TRUE) {
2767                                 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2768                                 if (hdr) {
2769                                         con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2770                                             "tbolt_process_outstanding_cmd:"
2771                                             " mlist_del_init(&cmd->list)."));
2772                                         mlist_del_init(&cmd->list);
2773                                 }
2774                         } else {
2775                                 pkt = cmd->pkt;
2776                                 if (pkt) {
2777                                         con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2778                                             "tbolt_process_outstanding_cmd:"
2779                                             "mlist_del_init(&cmd->list)."));
2780                                         mlist_del_init(&cmd->list);
2781                                 }
2782                         }
2783 
2784                         mutex_exit(&instance->cmd_pend_mtx);
2785 
2786                         tbolt_complete_cmd(instance, cmd);
2787                 }
2788                 /* set it back to all 1s. */
2789                 desc->Words = -1LL;
2790 
2791                 instance->reply_read_index++;
2792 
2793                 if (instance->reply_read_index >= (instance->reply_q_depth)) {
2794                         con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2795                         instance->reply_read_index = 0;
2796                 }
2797 
2798                 /* Get the next reply descriptor */
2799                 if (!instance->reply_read_index)
2800                         desc = instance->reply_frame_pool;
2801                 else
2802                         desc++;
2803 
2804                 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2805 
2806                 d_val.word = desc->Words;
2807 
2808                 con_log(CL_ANN1, (CE_NOTE,
2809                     "Next Reply Desc  = %p Words = %" PRIx64,
2810                     (void *)desc, desc->Words));
2811 
2812                 replyType = replyDesc->ReplyFlags &
2813                     MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2814 
2815                 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2816                         break;
2817 
2818         } /* End of while loop. */
2819 
2820         /* update replyIndex to FW */
2821         WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2822 
2823 
2824         (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2825             0, 0, DDI_DMA_SYNC_FORDEV);
2826 
2827         (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2828             0, 0, DDI_DMA_SYNC_FORCPU);
2829         return (DDI_INTR_CLAIMED);
2830 }
2831 
2832 
2833 
2834 
2835 /*
2836  * complete_cmd_in_sync_mode -  Completes an internal command
2837  * @instance:                   Adapter soft state
2838  * @cmd:                        Command to be completed
2839  *
2840  * The issue_cmd_in_sync_mode() function waits for a command to complete
2841  * after it issues a command. This function wakes up that waiting routine by
2842  * calling wake_up() on the wait queue.
2843  */
2844 void
2845 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance,
2846     struct mrsas_cmd *cmd)
2847 {
2848 
2849         cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
2850             &cmd->frame->io.cmd_status);
2851 
2852         cmd->sync_cmd = MRSAS_FALSE;
2853 
2854         mutex_enter(&instance->int_cmd_mtx);
2855         if (cmd->cmd_status == ENODATA) {
2856                 cmd->cmd_status = 0;
2857         }
2858         cv_broadcast(&instance->int_cmd_cv);
2859         mutex_exit(&instance->int_cmd_mtx);
2860 
2861 }
2862 
2863 /*
2864  * mrsas_tbolt_get_ld_map_info -        Returns  ld_map structure
2865  * instance:                            Adapter soft state
2866  *
2867  * Issues an internal command (DCMD) to get the FW's controller PD
2868  * list structure.  This information is mainly used to find out SYSTEM
2869  * supported by the FW.
2870  */
2871 int
2872 mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance)
2873 {
2874         int ret = 0;
2875         struct mrsas_cmd        *cmd = NULL;
2876         struct mrsas_dcmd_frame *dcmd;
2877         MR_FW_RAID_MAP_ALL *ci;
2878         uint32_t ci_h = 0;
2879         U32 size_map_info;
2880 
2881         cmd = get_raid_msg_pkt(instance);
2882 
2883         if (cmd == NULL) {
2884                 cmn_err(CE_WARN,
2885                     "Failed to get a cmd from free-pool in get_ld_map_info()");
2886                 return (DDI_FAILURE);
2887         }
2888 
2889         dcmd = &cmd->frame->dcmd;
2890 
2891         size_map_info = sizeof (MR_FW_RAID_MAP) +
2892             (sizeof (MR_LD_SPAN_MAP) *
2893             (MAX_LOGICAL_DRIVES - 1));
2894 
2895         con_log(CL_ANN, (CE_NOTE,
2896             "size_map_info : 0x%x", size_map_info));
2897 
2898         ci = instance->ld_map[(instance->map_id & 1)];
2899         ci_h = instance->ld_map_phy[(instance->map_id & 1)];
2900 
2901         if (!ci) {
2902                 cmn_err(CE_WARN, "Failed to alloc mem for ld_map_info");
2903                 return_raid_msg_pkt(instance, cmd);
2904                 return (-1);
2905         }
2906 
2907         bzero(ci, sizeof (*ci));
2908         bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
2909 
2910         dcmd->cmd = MFI_CMD_OP_DCMD;
2911         dcmd->cmd_status = 0xFF;
2912         dcmd->sge_count = 1;
2913         dcmd->flags = MFI_FRAME_DIR_READ;
2914         dcmd->timeout = 0;
2915         dcmd->pad_0 = 0;
2916         dcmd->data_xfer_len = size_map_info;
2917         dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2918         dcmd->sgl.sge32[0].phys_addr = ci_h;
2919         dcmd->sgl.sge32[0].length = size_map_info;
2920 
2921 
2922         mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2923 
2924         if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2925                 ret = 0;
2926                 con_log(CL_ANN1, (CE_NOTE, "Get LD Map Info success"));
2927         } else {
2928                 cmn_err(CE_WARN, "Get LD Map Info failed");
2929                 ret = -1;
2930         }
2931 
2932         return_raid_msg_pkt(instance, cmd);
2933 
2934         return (ret);
2935 }
2936 
2937 void
2938 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2939 {
2940         uint32_t i;
2941         MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2942         union desc_value d_val;
2943 
2944         reply_desc = instance->reply_frame_pool;
2945 
2946         for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2947                 d_val.word = reply_desc->Words;
2948                 con_log(CL_DLEVEL3, (CE_NOTE,
2949                     "i=%d, %x:%x",
2950                     i, d_val.u1.high, d_val.u1.low));
2951         }
2952 }
2953 
2954 /*
2955  * mrsas_tbolt_command_create - Create command for fast path.
2956  * @io_info:    MegaRAID IO request packet pointer.
2957  * @ref_tag:    Reference tag for RD/WRPROTECT
2958  *
2959  * Create the command for fast path.
2960  */
2961 void
2962 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],
2963     struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request,
2964     U32 ref_tag)
2965 {
2966         uint16_t                EEDPFlags;
2967         uint32_t                Control;
2968         ddi_acc_handle_t acc_handle =
2969             instance->mpi2_frame_pool_dma_obj.acc_handle;
2970 
2971         /* Prepare 32-byte CDB if DIF is supported on this device */
2972         con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB"));
2973 
2974         bzero(cdb, 32);
2975 
2976         cdb[0] =  MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2977 
2978 
2979         cdb[7] =  MRSAS_SCSI_ADDL_CDB_LEN;
2980 
2981         if (io_info->isRead)
2982                 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2983         else
2984                 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2985 
2986         /* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
2987         cdb[10] = MRSAS_RD_WR_PROTECT;
2988 
2989         /* LOGICAL BLOCK ADDRESS */
2990         cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2991         cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2992         cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2993         cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2994         cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2995         cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2996         cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2997         cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2998 
2999         /* Logical block reference tag */
3000         ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
3001             BE_32(ref_tag));
3002 
3003         ddi_put16(acc_handle,
3004             &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff);
3005 
3006         ddi_put32(acc_handle, &scsi_io_request->DataLength,
3007             ((io_info->numBlocks)*512));
3008         /* Specify 32-byte cdb */
3009         ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32);
3010 
3011         /* Transfer length */
3012         cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
3013         cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
3014         cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
3015         cdb[31] = (U8)((io_info->numBlocks) & 0xff);
3016 
3017         /* set SCSI IO EEDPFlags */
3018         EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags);
3019         Control = ddi_get32(acc_handle, &scsi_io_request->Control);
3020 
3021         /* set SCSI IO EEDPFlags bits */
3022         if (io_info->isRead) {
3023                 /*
3024                  * For READ commands, the EEDPFlags shall be set to specify to
3025                  * Increment the Primary Reference Tag, to Check the Reference
3026                  * Tag, and to Check and Remove the Protection Information
3027                  * fields.
3028                  */
3029                 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG        |
3030                     MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG  |
3031                     MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP       |
3032                     MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG  |
3033                     MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
3034         } else {
3035                 /*
3036                  * For WRITE commands, the EEDPFlags shall be set to specify to
3037                  * Increment the Primary Reference Tag, and to Insert
3038                  * Protection Information fields.
3039                  */
3040                 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG        |
3041                     MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
3042         }
3043         Control |= (0x4 << 26);
3044 
3045         ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags);
3046         ddi_put32(acc_handle, &scsi_io_request->Control, Control);
3047         ddi_put32(acc_handle,
3048             &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE);
3049 }
3050 
3051 
3052 /*
3053  * mrsas_tbolt_set_pd_lba -     Sets PD LBA
3054  * @cdb:                CDB
3055  * @cdb_len:            cdb length
3056  * @start_blk:          Start block of IO
3057  *
3058  * Used to set the PD LBA in CDB for FP IOs
3059  */
3060 static void
3061 mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk,
3062     U32 num_blocks)
3063 {
3064         U8 cdb_len = *cdb_len_ptr;
3065         U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
3066 
3067         /* Some drives don't support 16/12 byte CDB's, convert to 10 */
3068         if (((cdb_len == 12) || (cdb_len == 16)) &&
3069             (start_blk <= 0xffffffff)) {
3070                 if (cdb_len == 16) {
3071                         con_log(CL_ANN,
3072                             (CE_NOTE, "Converting READ/WRITE(16) to READ10"));
3073                         opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
3074                         flagvals = cdb[1];
3075                         groupnum = cdb[14];
3076                         control = cdb[15];
3077                 } else {
3078                         con_log(CL_ANN,
3079                             (CE_NOTE, "Converting READ/WRITE(12) to READ10"));
3080                         opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
3081                         flagvals = cdb[1];
3082                         groupnum = cdb[10];
3083                         control = cdb[11];
3084                 }
3085 
3086                 bzero(cdb, sizeof (cdb));
3087 
3088                 cdb[0] = opcode;
3089                 cdb[1] = flagvals;
3090                 cdb[6] = groupnum;
3091                 cdb[9] = control;
3092                 /* Set transfer length */
3093                 cdb[8] = (U8)(num_blocks & 0xff);
3094                 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3095                 cdb_len = 10;
3096         } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3097                 /* Convert to 16 byte CDB for large LBA's */
3098                 con_log(CL_ANN,
3099                     (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB"));
3100                 switch (cdb_len) {
3101                 case 6:
3102                         opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3103                         control = cdb[5];
3104                         break;
3105                 case 10:
3106                         opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3107                         flagvals = cdb[1];
3108                         groupnum = cdb[6];
3109                         control = cdb[9];
3110                         break;
3111                 case 12:
3112                         opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3113                         flagvals = cdb[1];
3114                         groupnum = cdb[10];
3115                         control = cdb[11];
3116                         break;
3117                 }
3118 
3119                 bzero(cdb, sizeof (cdb));
3120 
3121                 cdb[0] = opcode;
3122                 cdb[1] = flagvals;
3123                 cdb[14] = groupnum;
3124                 cdb[15] = control;
3125 
3126                 /* Transfer length */
3127                 cdb[13] = (U8)(num_blocks & 0xff);
3128                 cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3129                 cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3130                 cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3131 
3132                 /* Specify 16-byte cdb */
3133                 cdb_len = 16;
3134         } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3135                 /* convert to 10 byte CDB */
3136                 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3137                 control = cdb[5];
3138 
3139                 bzero(cdb, sizeof (cdb));
3140                 cdb[0] = opcode;
3141                 cdb[9] = control;
3142 
3143                 /* Set transfer length */
3144                 cdb[8] = (U8)(num_blocks & 0xff);
3145                 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3146 
3147                 /* Specify 10-byte cdb */
3148                 cdb_len = 10;
3149         }
3150 
3151 
3152         /* Fall through Normal case, just load LBA here */
3153         switch (cdb_len) {
3154         case 6:
3155         {
3156                 U8 val = cdb[1] & 0xE0;
3157                 cdb[3] = (U8)(start_blk & 0xff);
3158                 cdb[2] = (U8)((start_blk >> 8) & 0xff);
3159                 cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3160                 break;
3161         }
3162         case 10:
3163                 cdb[5] = (U8)(start_blk & 0xff);
3164                 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3165                 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3166                 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3167                 break;
3168         case 12:
3169                 cdb[5]    = (U8)(start_blk & 0xff);
3170                 cdb[4]    = (U8)((start_blk >> 8) & 0xff);
3171                 cdb[3]    = (U8)((start_blk >> 16) & 0xff);
3172                 cdb[2]    = (U8)((start_blk >> 24) & 0xff);
3173                 break;
3174 
3175         case 16:
3176                 cdb[9]  = (U8)(start_blk & 0xff);
3177                 cdb[8]  = (U8)((start_blk >> 8) & 0xff);
3178                 cdb[7]  = (U8)((start_blk >> 16) & 0xff);
3179                 cdb[6]  = (U8)((start_blk >> 24) & 0xff);
3180                 cdb[5]  = (U8)((start_blk >> 32) & 0xff);
3181                 cdb[4]  = (U8)((start_blk >> 40) & 0xff);
3182                 cdb[3]  = (U8)((start_blk >> 48) & 0xff);
3183                 cdb[2]  = (U8)((start_blk >> 56) & 0xff);
3184                 break;
3185         }
3186 
3187         *cdb_len_ptr = cdb_len;
3188 }
3189 
3190 
3191 static int
3192 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3193 {
3194         MR_FW_RAID_MAP_ALL *ld_map;
3195 
3196         if (!mrsas_tbolt_get_ld_map_info(instance)) {
3197 
3198                 ld_map = instance->ld_map[(instance->map_id & 1)];
3199 
3200                 con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d",
3201                     ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3202 
3203                 if (MR_ValidateMapInfo(instance->ld_map[
3204                     (instance->map_id & 1)], instance->load_balance_info)) {
3205                         con_log(CL_ANN,
3206                             (CE_CONT, "MR_ValidateMapInfo success"));
3207 
3208                         instance->fast_path_io = 1;
3209                         con_log(CL_ANN,
3210                             (CE_NOTE, "instance->fast_path_io %d",
3211                             instance->fast_path_io));
3212 
3213                         return (DDI_SUCCESS);
3214                 }
3215 
3216         }
3217 
3218         instance->fast_path_io = 0;
3219         cmn_err(CE_WARN, "MR_ValidateMapInfo failed");
3220         con_log(CL_ANN, (CE_NOTE,
3221             "instance->fast_path_io %d", instance->fast_path_io));
3222 
3223         return (DDI_FAILURE);
3224 }
3225 
3226 /*
3227  * Marks HBA as bad. This will be called either when an
3228  * IO packet times out even after 3 FW resets
3229  * or FW is found to be fault even after 3 continuous resets.
3230  */
3231 
3232 void
3233 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3234 {
3235         cmn_err(CE_NOTE, "TBOLT Kill adapter called");
3236 
3237         if (instance->deadadapter == 1)
3238                 return;
3239 
3240         con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3241             "Writing to doorbell with MFI_STOP_ADP "));
3242         mutex_enter(&instance->ocr_flags_mtx);
3243         instance->deadadapter = 1;
3244         mutex_exit(&instance->ocr_flags_mtx);
3245         instance->func_ptr->disable_intr(instance);
3246         WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3247         /* Flush */
3248         (void) RD_RESERVED0_REGISTER(instance);
3249 
3250         (void) mrsas_print_pending_cmds(instance);
3251         (void) mrsas_complete_pending_cmds(instance);
3252 }
3253 
3254 void
3255 mrsas_reset_reply_desc(struct mrsas_instance *instance)
3256 {
3257         int i;
3258         MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3259         instance->reply_read_index = 0;
3260 
3261         /* initializing reply address to 0xFFFFFFFF */
3262         reply_desc = instance->reply_frame_pool;
3263 
3264         for (i = 0; i < instance->reply_q_depth; i++) {
3265                 reply_desc->Words = (uint64_t)~0;
3266                 reply_desc++;
3267         }
3268 }
3269 
3270 int
3271 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3272 {
3273         uint32_t status = 0x00;
3274         uint32_t retry = 0;
3275         uint32_t cur_abs_reg_val;
3276         uint32_t fw_state;
3277         uint32_t abs_state;
3278         uint32_t i;
3279 
3280         con_log(CL_ANN, (CE_NOTE,
3281             "mrsas_tbolt_reset_ppc entered"));
3282 
3283         if (instance->deadadapter == 1) {
3284                 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3285                     "no more resets as HBA has been marked dead ");
3286                 return (DDI_FAILURE);
3287         }
3288 
3289         mutex_enter(&instance->ocr_flags_mtx);
3290         instance->adapterresetinprogress = 1;
3291         con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc:"
3292             "adpterresetinprogress flag set, time %llx", gethrtime()));
3293         mutex_exit(&instance->ocr_flags_mtx);
3294 
3295         instance->func_ptr->disable_intr(instance);
3296 
3297         /* Add delay inorder to complete the ioctl & io cmds in-flight */
3298         for (i = 0; i < 3000; i++) {
3299                 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3300         }
3301 
3302         instance->reply_read_index = 0;
3303 
3304 retry_reset:
3305         con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3306             ":Resetting TBOLT "));
3307 
3308         WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3309         WR_TBOLT_IB_WRITE_SEQ(4, instance);
3310         WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3311         WR_TBOLT_IB_WRITE_SEQ(2, instance);
3312         WR_TBOLT_IB_WRITE_SEQ(7, instance);
3313         WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3314         con_log(CL_ANN1, (CE_NOTE,
3315             "mrsas_tbolt_reset_ppc: magic number written "
3316             "to write sequence register"));
3317         delay(100 * drv_usectohz(MILLISEC));
3318         status = RD_TBOLT_HOST_DIAG(instance);
3319         con_log(CL_ANN1, (CE_NOTE,
3320             "mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
3321             "to write sequence register"));
3322 
3323         while (status & DIAG_TBOLT_RESET_ADAPTER) {
3324                 delay(100 * drv_usectohz(MILLISEC));
3325                 status = RD_TBOLT_HOST_DIAG(instance);
3326                 if (retry++ == 100) {
3327                         cmn_err(CE_WARN,
3328                             "mrsas_tbolt_reset_ppc:"
3329                             "resetadapter bit is set already "
3330                             "check retry count %d", retry);
3331                         return (DDI_FAILURE);
3332                 }
3333         }
3334 
3335         WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3336         delay(100 * drv_usectohz(MILLISEC));
3337 
3338         ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3339             (uint8_t *)((uintptr_t)(instance)->regmap +
3340             RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3341 
3342         while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3343                 delay(100 * drv_usectohz(MILLISEC));
3344                 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3345                     (uint8_t *)((uintptr_t)(instance)->regmap +
3346                     RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3347                 if (retry++ == 100) {
3348                         /* Dont call kill adapter here */
3349                         /* RESET BIT ADAPTER is cleared by firmare */
3350                         /* mrsas_tbolt_kill_adapter(instance); */
3351                         cmn_err(CE_WARN,
3352                             "mr_sas %d: %s(): RESET FAILED; return failure!!!",
3353                             instance->instance, __func__);
3354                         return (DDI_FAILURE);
3355                 }
3356         }
3357 
3358         con_log(CL_ANN,
3359             (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3360         con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3361             "Calling mfi_state_transition_to_ready"));
3362 
3363         abs_state = instance->func_ptr->read_fw_status_reg(instance);
3364         retry = 0;
3365         while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3366                 delay(100 * drv_usectohz(MILLISEC));
3367                 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3368         }
3369         if (abs_state <= MFI_STATE_FW_INIT) {
3370                 cmn_err(CE_WARN,
3371                     "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3372                     "state = 0x%x, RETRY RESET.", abs_state);
3373                 goto retry_reset;
3374         }
3375 
3376         /* Mark HBA as bad, if FW is fault after 3 continuous resets */
3377         if (mfi_state_transition_to_ready(instance) ||
3378             debug_tbolt_fw_faults_after_ocr_g == 1) {
3379                 cur_abs_reg_val =
3380                     instance->func_ptr->read_fw_status_reg(instance);
3381                 fw_state        = cur_abs_reg_val & MFI_STATE_MASK;
3382 
3383                 con_log(CL_ANN1, (CE_NOTE,
3384                     "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3385                     "FW state = 0x%x", fw_state));
3386                 if (debug_tbolt_fw_faults_after_ocr_g == 1)
3387                         fw_state = MFI_STATE_FAULT;
3388 
3389                 con_log(CL_ANN,
3390                     (CE_NOTE,  "mrsas_tbolt_reset_ppc : FW is not ready "
3391                     "FW state = 0x%x", fw_state));
3392 
3393                 if (fw_state == MFI_STATE_FAULT) {
3394                         /* increment the count */
3395                         instance->fw_fault_count_after_ocr++;
3396                         if (instance->fw_fault_count_after_ocr
3397                             < MAX_FW_RESET_COUNT) {
3398                                 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3399                                     "FW is in fault after OCR count %d "
3400                                     "Retry Reset",
3401                                     instance->fw_fault_count_after_ocr);
3402                                 goto retry_reset;
3403 
3404                         } else {
3405                                 cmn_err(CE_WARN, "mrsas %d: %s:"
3406                                     "Max Reset Count exceeded >%d"
3407                                     "Mark HBA as bad, KILL adapter",
3408                                     instance->instance, __func__,
3409                                     MAX_FW_RESET_COUNT);
3410 
3411                                 mrsas_tbolt_kill_adapter(instance);
3412                                 return (DDI_FAILURE);
3413                         }
3414                 }
3415         }
3416 
3417         /* reset the counter as FW is up after OCR */
3418         instance->fw_fault_count_after_ocr = 0;
3419 
3420         mrsas_reset_reply_desc(instance);
3421 
3422 
3423         con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3424             "Calling mrsas_issue_init_mpi2"));
3425         abs_state = mrsas_issue_init_mpi2(instance);
3426         if (abs_state == (uint32_t)DDI_FAILURE) {
3427                 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3428                     "INIT failed Retrying Reset");
3429                 goto retry_reset;
3430         }
3431         con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3432             "mrsas_issue_init_mpi2 Done"));
3433 
3434         con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3435             "Calling mrsas_print_pending_cmd"));
3436         (void) mrsas_print_pending_cmds(instance);
3437         con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3438             "mrsas_print_pending_cmd done"));
3439 
3440         instance->func_ptr->enable_intr(instance);
3441         instance->fw_outstanding = 0;
3442 
3443         con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3444             "Calling mrsas_issue_pending_cmds"));
3445         (void) mrsas_issue_pending_cmds(instance);
3446         con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3447         "issue_pending_cmds done."));
3448 
3449         con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3450             "Calling aen registration"));
3451 
3452         instance->aen_cmd->retry_count_for_ocr = 0;
3453         instance->aen_cmd->drv_pkt_time = 0;
3454 
3455         instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3456 
3457         con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag."));
3458         mutex_enter(&instance->ocr_flags_mtx);
3459         instance->adapterresetinprogress = 0;
3460         mutex_exit(&instance->ocr_flags_mtx);
3461         con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3462             "adpterresetinprogress flag unset"));
3463 
3464         con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done"));
3465         return (DDI_SUCCESS);
3466 
3467 }
3468 
3469 
3470 /*
3471  * mrsas_sync_map_info -        Returns FW's ld_map structure
3472  * @instance:                           Adapter soft state
3473  *
3474  * Issues an internal command (DCMD) to get the FW's controller PD
3475  * list structure.  This information is mainly used to find out SYSTEM
3476  * supported by the FW.
3477  */
3478 
3479 static int
3480 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3481 {
3482         int                     ret = 0, i;
3483         struct mrsas_cmd        *cmd = NULL;
3484         struct mrsas_dcmd_frame *dcmd;
3485         uint32_t size_sync_info, num_lds;
3486         LD_TARGET_SYNC *ci = NULL;
3487         MR_FW_RAID_MAP_ALL *map;
3488         MR_LD_RAID  *raid;
3489         LD_TARGET_SYNC *ld_sync;
3490         uint32_t ci_h = 0;
3491         uint32_t size_map_info;
3492 
3493         cmd = get_raid_msg_pkt(instance);
3494 
3495         if (cmd == NULL) {
3496                 cmn_err(CE_WARN, "Failed to get a cmd from free-pool in "
3497                     "mrsas_tbolt_sync_map_info(). ");
3498                 return (DDI_FAILURE);
3499         }
3500 
3501         /* Clear the frame buffer and assign back the context id */
3502         bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3503         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3504             cmd->index);
3505         bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3506 
3507 
3508         map = instance->ld_map[instance->map_id & 1];
3509 
3510         num_lds = map->raidMap.ldCount;
3511 
3512         dcmd = &cmd->frame->dcmd;
3513 
3514         size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3515 
3516         con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x",
3517             size_sync_info, num_lds));
3518 
3519         ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3520 
3521         bzero(ci, sizeof (MR_FW_RAID_MAP_ALL));
3522         ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3523 
3524         bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
3525 
3526         ld_sync = (LD_TARGET_SYNC *)ci;
3527 
3528         for (i = 0; i < num_lds; i++, ld_sync++) {
3529                 raid = MR_LdRaidGet(i, map);
3530 
3531                 con_log(CL_ANN1,
3532                     (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
3533                     i, raid->seqNum, raid->flags.ldSyncRequired));
3534 
3535                 ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3536 
3537                 con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x",
3538                     i, ld_sync->ldTargetId));
3539 
3540                 ld_sync->seqNum = raid->seqNum;
3541         }
3542 
3543 
3544         size_map_info = sizeof (MR_FW_RAID_MAP) +
3545             (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3546 
3547         dcmd->cmd = MFI_CMD_OP_DCMD;
3548         dcmd->cmd_status = 0xFF;
3549         dcmd->sge_count = 1;
3550         dcmd->flags = MFI_FRAME_DIR_WRITE;
3551         dcmd->timeout = 0;
3552         dcmd->pad_0 = 0;
3553         dcmd->data_xfer_len = size_map_info;
3554         ASSERT(num_lds <= 255);
3555         dcmd->mbox.b[0] = (U8)num_lds;
3556         dcmd->mbox.b[1] = 1; /* Pend */
3557         dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3558         dcmd->sgl.sge32[0].phys_addr = ci_h;
3559         dcmd->sgl.sge32[0].length = size_map_info;
3560 
3561 
3562         instance->map_update_cmd = cmd;
3563         mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3564 
3565         instance->func_ptr->issue_cmd(cmd, instance);
3566 
3567         instance->unroll.syncCmd = 1;
3568         con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID));
3569 
3570         return (ret);
3571 }
3572 
3573 /*
3574  * abort_syncmap_cmd
3575  */
3576 int
3577 abort_syncmap_cmd(struct mrsas_instance *instance,
3578     struct mrsas_cmd *cmd_to_abort)
3579 {
3580         int     ret = 0;
3581 
3582         struct mrsas_cmd                *cmd;
3583         struct mrsas_abort_frame        *abort_fr;
3584 
3585         con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3586 
3587         cmd = get_raid_msg_mfi_pkt(instance);
3588 
3589         if (!cmd) {
3590                 cmn_err(CE_WARN,
3591                     "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3592                 return (DDI_FAILURE);
3593         }
3594         /* Clear the frame buffer and assign back the context id */
3595         bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3596         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3597             cmd->index);
3598 
3599         abort_fr = &cmd->frame->abort;
3600 
3601         /* prepare and issue the abort frame */
3602         ddi_put8(cmd->frame_dma_obj.acc_handle,
3603             &abort_fr->cmd, MFI_CMD_OP_ABORT);
3604         ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3605             MFI_CMD_STATUS_SYNC_MODE);
3606         ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3607         ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3608             cmd_to_abort->index);
3609         ddi_put32(cmd->frame_dma_obj.acc_handle,
3610             &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3611         ddi_put32(cmd->frame_dma_obj.acc_handle,
3612             &abort_fr->abort_mfi_phys_addr_hi, 0);
3613 
3614         cmd->frame_count = 1;
3615 
3616         mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3617 
3618         if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3619                 con_log(CL_ANN1, (CE_WARN,
3620                     "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3621                 ret = -1;
3622         } else {
3623                 ret = 0;
3624         }
3625 
3626         return_raid_msg_mfi_pkt(instance, cmd);
3627 
3628         atomic_add_16(&instance->fw_outstanding, (-1));
3629 
3630         return (ret);
3631 }
3632 
3633 
3634 #ifdef PDSUPPORT
3635 int
3636 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3637     uint8_t lun, dev_info_t **ldip)
3638 {
3639         struct scsi_device *sd;
3640         dev_info_t *child;
3641         int rval, dtype;
3642         struct mrsas_tbolt_pd_info *pds = NULL;
3643 
3644         con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3645             tgt, lun));
3646 
3647         if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3648                 if (ldip) {
3649                         *ldip = child;
3650                 }
3651                 if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3652                         rval = mrsas_service_evt(instance, tgt, 1,
3653                             MRSAS_EVT_UNCONFIG_TGT, NULL);
3654                         con_log(CL_ANN1, (CE_WARN,
3655                             "mr_sas:DELETING STALE ENTRY  rval = %d "
3656                             "tgt id = %d", rval, tgt));
3657                         return (NDI_FAILURE);
3658                 }
3659                 return (NDI_SUCCESS);
3660         }
3661 
3662         pds = (struct mrsas_tbolt_pd_info *)
3663             kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3664         mrsas_tbolt_get_pd_info(instance, pds, tgt);
3665         dtype = pds->scsiDevType;
3666 
3667         /* Check for Disk */
3668         if ((dtype == DTYPE_DIRECT)) {
3669                 if ((dtype == DTYPE_DIRECT) &&
3670                     (LE_16(pds->fwState) != PD_SYSTEM)) {
3671                         kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3672                         return (NDI_FAILURE);
3673                 }
3674                 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3675                 sd->sd_address.a_hba_tran = instance->tran;
3676                 sd->sd_address.a_target = (uint16_t)tgt;
3677                 sd->sd_address.a_lun = (uint8_t)lun;
3678 
3679                 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3680                         rval = mrsas_config_scsi_device(instance, sd, ldip);
3681                         con_log(CL_DLEVEL1, (CE_NOTE,
3682                             "Phys. device found: tgt %d dtype %d: %s",
3683                             tgt, dtype, sd->sd_inq->inq_vid));
3684                 } else {
3685                         rval = NDI_FAILURE;
3686                         con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found "
3687                             "scsi_hba_probe Failed: tgt %d dtype %d: %s",
3688                             tgt, dtype, sd->sd_inq->inq_vid));
3689                 }
3690 
3691                 /* sd_unprobe is blank now. Free buffer manually */
3692                 if (sd->sd_inq) {
3693                         kmem_free(sd->sd_inq, SUN_INQSIZE);
3694                         sd->sd_inq = (struct scsi_inquiry *)NULL;
3695                 }
3696                 kmem_free(sd, sizeof (struct scsi_device));
3697                 rval = NDI_SUCCESS;
3698         } else {
3699                 con_log(CL_ANN1, (CE_NOTE,
3700                     "Device not supported: tgt %d lun %d dtype %d",
3701                     tgt, lun, dtype));
3702                 rval = NDI_FAILURE;
3703         }
3704 
3705         kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3706         con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3707             rval));
3708         return (rval);
3709 }
3710 
3711 static void
3712 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance,
3713     struct mrsas_tbolt_pd_info *pds, int tgt)
3714 {
3715         struct mrsas_cmd        *cmd;
3716         struct mrsas_dcmd_frame *dcmd;
3717         dma_obj_t               dcmd_dma_obj;
3718 
3719         cmd = get_raid_msg_pkt(instance);
3720 
3721         if (!cmd) {
3722                 con_log(CL_ANN1,
3723                     (CE_WARN, "Failed to get a cmd for get pd info"));
3724                 return;
3725         }
3726 
3727         /* Clear the frame buffer and assign back the context id */
3728         bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3729         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3730             cmd->index);
3731 
3732 
3733         dcmd = &cmd->frame->dcmd;
3734         dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3735         dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3736         dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3737         dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3738         dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3739         dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3740 
3741         (void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3742             DDI_STRUCTURE_LE_ACC);
3743         bzero(dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info));
3744         bzero(dcmd->mbox.b, 12);
3745         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3746         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3747         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3748         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3749             MFI_FRAME_DIR_READ);
3750         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3751         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3752             sizeof (struct mrsas_tbolt_pd_info));
3753         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3754             MR_DCMD_PD_GET_INFO);
3755         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3756         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3757             sizeof (struct mrsas_tbolt_pd_info));
3758         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3759             dcmd_dma_obj.dma_cookie[0].dmac_address);
3760 
3761         cmd->sync_cmd = MRSAS_TRUE;
3762         cmd->frame_count = 1;
3763 
3764         if (instance->tbolt) {
3765                 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3766         }
3767 
3768         instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3769 
3770         ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds,
3771             (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info),
3772             DDI_DEV_AUTOINCR);
3773         (void) mrsas_free_dma_obj(instance, dcmd_dma_obj);
3774         return_raid_msg_pkt(instance, cmd);
3775 }
3776 #endif