Print this page
*** 14,60 ****
* Rasheed
* Shakeel Bukhari
*/
- #include <stddef.h>
#include <sys/types.h>
#include <sys/file.h>
#include <sys/atomic.h>
#include <sys/scsi/scsi.h>
#include <sys/byteorder.h>
#include "ld_pd_map.h"
#include "mr_sas.h"
#include "fusion.h"
! // Pre-TB command size and TB command size.
! #define MR_COMMAND_SIZE (64*20) // 1280 bytes
MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
! U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *in_info);
extern ddi_dma_attr_t mrsas_generic_dma_attr;
extern uint32_t mrsas_tbolt_max_cap_maxxfer;
extern struct ddi_device_acc_attr endian_attr;
extern int debug_level_g;
extern unsigned int enable_fp;
volatile int dump_io_wait_time = 90;
extern void
io_timeout_checker(void *arg);
- extern int
- mfi_state_transition_to_ready(struct mrsas_instance *instance);
extern volatile int debug_timeout_g;
extern int mrsas_issue_pending_cmds(struct mrsas_instance *);
extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
extern void push_pending_mfi_pkt(struct mrsas_instance *,
struct mrsas_cmd *);
extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
! static volatile int debug_tbolt_fw_faults_after_ocr_g = 0;
/*
* destroy_mfi_mpi_frame_pool
*/
void
destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
--- 14,79 ----
* Rasheed
* Shakeel Bukhari
*/
#include <sys/types.h>
#include <sys/file.h>
#include <sys/atomic.h>
#include <sys/scsi/scsi.h>
#include <sys/byteorder.h>
#include "ld_pd_map.h"
#include "mr_sas.h"
#include "fusion.h"
+ /*
+ * FMA header files
+ */
+ #include <sys/ddifm.h>
+ #include <sys/fm/protocol.h>
+ #include <sys/fm/util.h>
+ #include <sys/fm/io/ddi.h>
!
! /* Pre-TB command size and TB command size. */
! #define MR_COMMAND_SIZE (64*20) /* 1280 bytes */
MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
! U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *);
extern ddi_dma_attr_t mrsas_generic_dma_attr;
extern uint32_t mrsas_tbolt_max_cap_maxxfer;
extern struct ddi_device_acc_attr endian_attr;
extern int debug_level_g;
extern unsigned int enable_fp;
volatile int dump_io_wait_time = 90;
extern void
io_timeout_checker(void *arg);
extern volatile int debug_timeout_g;
extern int mrsas_issue_pending_cmds(struct mrsas_instance *);
extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
extern void push_pending_mfi_pkt(struct mrsas_instance *,
struct mrsas_cmd *);
extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
! /* Local static prototypes. */
! static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *,
! struct scsi_address *, struct scsi_pkt *, uchar_t *);
! static void mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr,
! U64 start_blk, U32 num_blocks);
! static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
! static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
! static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
! static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *);
! #ifdef PDSUPPORT
! static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
! struct mrsas_tbolt_pd_info *, int);
! #endif /* PDSUPPORT */
+ static int debug_tbolt_fw_faults_after_ocr_g = 0;
+
/*
* destroy_mfi_mpi_frame_pool
*/
void
destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
*** 64,76 ****
struct mrsas_cmd *cmd;
/* return all mfi frames to pool */
for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
cmd = instance->cmd_list[i];
! if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
(void) mrsas_free_dma_obj(instance,
cmd->frame_dma_obj);
cmd->frame_dma_obj_status = DMA_OBJ_FREED;
}
}
/*
--- 83,96 ----
struct mrsas_cmd *cmd;
/* return all mfi frames to pool */
for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
cmd = instance->cmd_list[i];
! if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) {
(void) mrsas_free_dma_obj(instance,
cmd->frame_dma_obj);
+ }
cmd->frame_dma_obj_status = DMA_OBJ_FREED;
}
}
/*
*** 93,102 ****
--- 113,123 ----
*/
void
mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
{
int i;
+
if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
(void) mrsas_free_dma_obj(instance,
instance->mfi_internal_dma_obj);
instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
}
*** 143,153 ****
*/
int
create_mpi2_frame_pool(struct mrsas_instance *instance)
{
int i = 0;
- int cookie_cnt;
uint16_t max_cmd;
uint32_t sgl_sz;
uint32_t raid_msg_size;
uint32_t total_size;
uint32_t offset;
--- 164,173 ----
*** 158,173 ****
max_cmd = instance->max_fw_cmds;
sgl_sz = 1024;
raid_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
! // Allocating additional 256 bytes to accomodate SMID 0.
total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
(max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
! "max_cmd %x ", max_cmd));
con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
"request message frame pool size %x", total_size));
/*
--- 178,193 ----
max_cmd = instance->max_fw_cmds;
sgl_sz = 1024;
raid_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
! /* Allocating additional 256 bytes to accomodate SMID 0. */
total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
(max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
! "max_cmd %x", max_cmd));
con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
"request message frame pool size %x", total_size));
/*
*** 201,216 ****
(uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
instance->io_request_frames_phy =
(uint32_t)
instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
! con_log(CL_DLEVEL3, (CE_NOTE,
! "io_request_frames 0x%x",
! instance->io_request_frames));
! con_log(CL_DLEVEL3, (CE_NOTE,
! "io_request_frames_phy 0x%x",
instance->io_request_frames_phy));
io_req_base = (uint8_t *)instance->io_request_frames +
MRSAS_THUNDERBOLT_MSG_SIZE;
io_req_base_phys = instance->io_request_frames_phy +
--- 221,234 ----
(uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
instance->io_request_frames_phy =
(uint32_t)
instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
! con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p",
! (void *)instance->io_request_frames));
! con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x",
instance->io_request_frames_phy));
io_req_base = (uint8_t *)instance->io_request_frames +
MRSAS_THUNDERBOLT_MSG_SIZE;
io_req_base_phys = instance->io_request_frames_phy +
*** 226,279 ****
cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
((uint8_t *)io_req_base + offset);
cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
! cmd->sgl = (Mpi2SGEIOUnion_t *)
! ((uint8_t *)io_req_base +
(max_cmd * raid_msg_size) + i * sgl_sz);
! cmd->sgl_phys_addr =
! (io_req_base_phys +
(max_cmd * raid_msg_size) + i * sgl_sz);
! cmd->sense1 = (uint8_t *)
! ((uint8_t *)io_req_base +
(max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
(i * SENSE_LENGTH));
! cmd->sense_phys_addr1 =
! (io_req_base_phys +
(max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
(i * SENSE_LENGTH));
! cmd->SMID = i+1;
! con_log(CL_DLEVEL3, (CE_NOTE,
! "Frame Pool Addr [%x]0x%x",
! cmd->index, cmd->scsi_io_request));
! con_log(CL_DLEVEL3, (CE_NOTE,
! "Frame Pool Phys Addr [%x]0x%x",
cmd->index, cmd->scsi_io_request_phys_addr));
! con_log(CL_DLEVEL3, (CE_NOTE,
! "Sense Addr [%x]0x%x",
! cmd->index, cmd->sense1));
! con_log(CL_DLEVEL3, (CE_NOTE,
! "Sense Addr Phys [%x]0x%x",
cmd->index, cmd->sense_phys_addr1));
! con_log(CL_DLEVEL3, (CE_NOTE,
! "Sgl bufffers [%x]0x%x",
! cmd->index, cmd->sgl));
!
! con_log(CL_DLEVEL3, (CE_NOTE,
! "Sgl bufffers phys [%x]0x%x",
cmd->index, cmd->sgl_phys_addr));
}
return (DDI_SUCCESS);
--- 244,286 ----
cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
((uint8_t *)io_req_base + offset);
cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
! cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base +
(max_cmd * raid_msg_size) + i * sgl_sz);
! cmd->sgl_phys_addr = (io_req_base_phys +
(max_cmd * raid_msg_size) + i * sgl_sz);
! cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base +
(max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
(i * SENSE_LENGTH));
! cmd->sense_phys_addr1 = (io_req_base_phys +
(max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
(i * SENSE_LENGTH));
! cmd->SMID = i + 1;
! con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p",
! cmd->index, (void *)cmd->scsi_io_request));
! con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x",
cmd->index, cmd->scsi_io_request_phys_addr));
! con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p",
! cmd->index, (void *)cmd->sense1));
! con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x",
cmd->index, cmd->sense_phys_addr1));
+ con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p",
+ cmd->index, (void *)cmd->sgl));
! con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x",
cmd->index, cmd->sgl_phys_addr));
}
return (DDI_SUCCESS);
*** 297,307 ****
instance->mfi_internal_dma_obj.size = internal_buf_size;
instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
! instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
(uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
cmn_err(CE_WARN,
--- 304,315 ----
instance->mfi_internal_dma_obj.size = internal_buf_size;
instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
! instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
! 0xFFFFFFFFU;
instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
(uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
cmn_err(CE_WARN,
*** 310,321 ****
}
bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
! instance->internal_buf = (caddr_t)(((unsigned long)
! instance->mfi_internal_dma_obj.buffer));
instance->internal_buf_dmac_add =
instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
instance->internal_buf_size = internal_buf_size;
/* allocate evt_detail */
--- 318,329 ----
}
bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
! instance->internal_buf =
! (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer));
instance->internal_buf_dmac_add =
instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
instance->internal_buf_size = internal_buf_size;
/* allocate evt_detail */
*** 326,337 ****
instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
(uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
! cmn_err(CE_WARN,
! "mrsas_tbolt_alloc_additional_dma_buffer: "
"could not allocate data transfer buffer.");
goto fail_tbolt_additional_buff;
}
bzero(instance->mfi_evt_detail_obj.buffer,
--- 334,344 ----
instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
(uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
! cmn_err(CE_WARN, "mrsas_tbolt_alloc_additional_dma_buffer: "
"could not allocate data transfer buffer.");
goto fail_tbolt_additional_buff;
}
bzero(instance->mfi_evt_detail_obj.buffer,
*** 345,355 ****
for (i = 0; i < 2; i++) {
/* allocate the data transfer buffer */
instance->ld_map_obj[i].size = instance->size_map_info;
instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
! instance->ld_map_obj[i].dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
(uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
--- 352,363 ----
for (i = 0; i < 2; i++) {
/* allocate the data transfer buffer */
instance->ld_map_obj[i].size = instance->size_map_info;
instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
! instance->ld_map_obj[i].dma_attr.dma_attr_count_max =
! 0xFFFFFFFFU;
instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
(uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
*** 358,380 ****
goto fail_tbolt_additional_buff;
}
instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
! (void) memset(instance->ld_map_obj[i].buffer, 0,
! instance->size_map_info);
! instance->ld_map[i] = (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
! instance->ld_map_phy[i] =
! (uint32_t)instance->ld_map_obj[i].dma_cookie[0].dmac_address;
con_log(CL_DLEVEL3, (CE_NOTE,
"ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
con_log(CL_DLEVEL3, (CE_NOTE,
"size_map_info 0x%x", instance->size_map_info));
-
}
return (DDI_SUCCESS);
fail_tbolt_additional_buff:
--- 366,387 ----
goto fail_tbolt_additional_buff;
}
instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
! bzero(instance->ld_map_obj[i].buffer, instance->size_map_info);
! instance->ld_map[i] =
! (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
! instance->ld_map_phy[i] = (uint32_t)instance->
! ld_map_obj[i].dma_cookie[0].dmac_address;
con_log(CL_DLEVEL3, (CE_NOTE,
"ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
con_log(CL_DLEVEL3, (CE_NOTE,
"size_map_info 0x%x", instance->size_map_info));
}
return (DDI_SUCCESS);
fail_tbolt_additional_buff:
*** 382,413 ****
return (DDI_FAILURE);
}
MRSAS_REQUEST_DESCRIPTOR_UNION *
! mr_sas_get_request_descriptor(struct mrsas_instance *instance,
! uint16_t index, struct mrsas_cmd *cmd)
{
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
if (index > instance->max_fw_cmds) {
con_log(CL_ANN1, (CE_NOTE,
"Invalid SMID 0x%x request for descriptor", index));
con_log(CL_ANN1, (CE_NOTE,
! "max_fw_cmds : 0x%x\n", instance->max_fw_cmds));
return (NULL);
}
req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
((char *)instance->request_message_pool +
(sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
con_log(CL_ANN1, (CE_NOTE,
! "request descriptor : 0x%08lx\n", (unsigned long)req_desc));
con_log(CL_ANN1, (CE_NOTE,
! "request descriptor base phy : 0x%08lx\n",
(unsigned long)instance->request_message_pool_phy));
return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
}
--- 389,419 ----
return (DDI_FAILURE);
}
MRSAS_REQUEST_DESCRIPTOR_UNION *
! mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index)
{
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
if (index > instance->max_fw_cmds) {
con_log(CL_ANN1, (CE_NOTE,
"Invalid SMID 0x%x request for descriptor", index));
con_log(CL_ANN1, (CE_NOTE,
! "max_fw_cmds : 0x%x", instance->max_fw_cmds));
return (NULL);
}
req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
((char *)instance->request_message_pool +
(sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
con_log(CL_ANN1, (CE_NOTE,
! "request descriptor : 0x%08lx", (unsigned long)req_desc));
con_log(CL_ANN1, (CE_NOTE,
! "request descriptor base phy : 0x%08lx",
(unsigned long)instance->request_message_pool_phy));
return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
}
*** 417,450 ****
*/
int
alloc_req_rep_desc(struct mrsas_instance *instance)
{
uint32_t request_q_sz, reply_q_sz;
! int i, max_request_q_sz, max_reply_q_sz;
! uint64_t request_desc;
MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
- uint64_t *reply_ptr;
/*
* ThunderBolt(TB) There's no longer producer consumer mechanism.
* Once we have an interrupt we are supposed to scan through the list of
* reply descriptors and process them accordingly. We would be needing
* to allocate memory for 1024 reply descriptors
*/
/* Allocate Reply Descriptors */
! con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x\n",
! sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
! // reply queue size should be multiple of 16
max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
reply_q_sz = 8 * max_reply_q_sz;
! con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x\n",
! sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
instance->reply_desc_dma_obj.size = reply_q_sz;
instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
--- 423,454 ----
*/
int
alloc_req_rep_desc(struct mrsas_instance *instance)
{
uint32_t request_q_sz, reply_q_sz;
! int i, max_reply_q_sz;
MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
/*
* ThunderBolt(TB) There's no longer producer consumer mechanism.
* Once we have an interrupt we are supposed to scan through the list of
* reply descriptors and process them accordingly. We would be needing
* to allocate memory for 1024 reply descriptors
*/
/* Allocate Reply Descriptors */
! con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
! (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
! /* reply queue size should be multiple of 16 */
max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
reply_q_sz = 8 * max_reply_q_sz;
! con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
! (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
instance->reply_desc_dma_obj.size = reply_q_sz;
instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
*** 459,479 ****
}
bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
! // virtual address of reply queue
instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
instance->reply_desc_dma_obj.buffer);
instance->reply_q_depth = max_reply_q_sz;
con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
instance->reply_q_depth));
! con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%x",
! instance->reply_frame_pool));
/* initializing reply address to 0xFFFFFFFF */
reply_desc = instance->reply_frame_pool;
for (i = 0; i < instance->reply_q_depth; i++) {
--- 463,483 ----
}
bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
! /* virtual address of reply queue */
instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
instance->reply_desc_dma_obj.buffer);
instance->reply_q_depth = max_reply_q_sz;
con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
instance->reply_q_depth));
! con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
! (void *)instance->reply_frame_pool));
/* initializing reply address to 0xFFFFFFFF */
reply_desc = instance->reply_frame_pool;
for (i = 0; i < instance->reply_q_depth; i++) {
*** 494,517 ****
con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
instance->reply_pool_limit_phy));
! con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x\n",
! sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
/* Allocate Request Descriptors */
! con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x\n",
! sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
request_q_sz = 8 *
(instance->max_fw_cmds);
instance->request_desc_dma_obj.size = request_q_sz;
instance->request_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
! instance->request_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
(uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
--- 498,522 ----
con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
instance->reply_pool_limit_phy));
! con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
! (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
/* Allocate Request Descriptors */
! con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
! (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
request_q_sz = 8 *
(instance->max_fw_cmds);
instance->request_desc_dma_obj.size = request_q_sz;
instance->request_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
! instance->request_desc_dma_obj.dma_attr.dma_attr_count_max =
! 0xFFFFFFFFU;
instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
(uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
*** 528,539 ****
(instance->request_desc_dma_obj.buffer);
instance->request_message_pool_phy =
(uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
- max_request_q_sz = instance->max_fw_cmds;
-
return (DDI_SUCCESS);
fail_undo_reply_queue:
if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
(void) mrsas_free_dma_obj(instance,
--- 533,542 ----
*** 544,554 ****
return (DDI_FAILURE);
}
/*
* mrsas_alloc_cmd_pool_tbolt
! * TODO: merge tbolt-specific codee into mrsas_alloc_cmd_pool() to have single routine
*/
int
mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
{
int i;
--- 547,559 ----
return (DDI_FAILURE);
}
/*
* mrsas_alloc_cmd_pool_tbolt
! *
! * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
! * routine
*/
int
mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
{
int i;
*** 570,605 ****
* instance->cmd_list is an array of struct mrsas_cmd pointers.
* Allocate the dynamic array first and then allocate individual
* commands.
*/
instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
- if (instance->cmd_list == NULL) {
- con_log(CL_NONE, (CE_WARN,
- "Failed to allocate memory for cmd_list"));
- return (DDI_FAILURE);
- }
/* create a frame pool and assign one frame to each cmd */
for (count = 0; count < max_cmd; count++) {
! instance->cmd_list[count] = kmem_zalloc(sizeof (struct mrsas_cmd),
! KM_SLEEP);
! if (instance->cmd_list[count] == NULL) {
! con_log(CL_NONE, (CE_WARN,
! "Failed to allocate memory for mrsas_cmd"));
! goto mrsas_undo_cmds;
}
- }
/* add all the commands to command pool */
INIT_LIST_HEAD(&instance->cmd_pool_list);
INIT_LIST_HEAD(&instance->cmd_pend_list);
INIT_LIST_HEAD(&instance->cmd_app_pool_list);
reserve_cmd = MRSAS_APP_RESERVED_CMDS;
! for (i = 1; i < reserve_cmd; i++) { //cmd index 0 reservered for IOC INIT
cmd = instance->cmd_list[i];
cmd->index = i;
mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
}
--- 575,601 ----
* instance->cmd_list is an array of struct mrsas_cmd pointers.
* Allocate the dynamic array first and then allocate individual
* commands.
*/
instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
/* create a frame pool and assign one frame to each cmd */
for (count = 0; count < max_cmd; count++) {
! instance->cmd_list[count] =
! kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
}
/* add all the commands to command pool */
INIT_LIST_HEAD(&instance->cmd_pool_list);
INIT_LIST_HEAD(&instance->cmd_pend_list);
INIT_LIST_HEAD(&instance->cmd_app_pool_list);
reserve_cmd = MRSAS_APP_RESERVED_CMDS;
! /* cmd index 0 reservered for IOC INIT */
! for (i = 1; i < reserve_cmd; i++) {
cmd = instance->cmd_list[i];
cmd->index = i;
mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
}
*** 614,632 ****
mrsas_undo_cmds:
if (count > 0) {
/* free each cmd */
for (i = 0; i < count; i++) {
! if (instance->cmd_list[i] != NULL)
! kmem_free(instance->cmd_list[i],sizeof (struct mrsas_cmd));
instance->cmd_list[i] = NULL;
}
}
mrsas_undo_cmd_list:
if (instance->cmd_list != NULL)
! kmem_free(instance->cmd_list,sz);
instance->cmd_list = NULL;
return (DDI_FAILURE);
}
--- 610,630 ----
mrsas_undo_cmds:
if (count > 0) {
/* free each cmd */
for (i = 0; i < count; i++) {
! if (instance->cmd_list[i] != NULL) {
! kmem_free(instance->cmd_list[i],
! sizeof (struct mrsas_cmd));
! }
instance->cmd_list[i] = NULL;
}
}
mrsas_undo_cmd_list:
if (instance->cmd_list != NULL)
! kmem_free(instance->cmd_list, sz);
instance->cmd_list = NULL;
return (DDI_FAILURE);
}
*** 664,674 ****
* ThunderBolt(TB) memory allocations for commands/messages/frames.
*/
int
alloc_space_for_mpi2(struct mrsas_instance *instance)
{
! /* Allocate command pool ( memory for cmd_list & individual commands )*/
if (mrsas_alloc_cmd_pool_tbolt(instance)) {
cmn_err(CE_WARN, "Error creating cmd pool");
return (DDI_FAILURE);
}
--- 662,672 ----
* ThunderBolt(TB) memory allocations for commands/messages/frames.
*/
int
alloc_space_for_mpi2(struct mrsas_instance *instance)
{
! /* Allocate command pool (memory for cmd_list & individual commands) */
if (mrsas_alloc_cmd_pool_tbolt(instance)) {
cmn_err(CE_WARN, "Error creating cmd pool");
return (DDI_FAILURE);
}
*** 764,774 ****
/*
* mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
*/
int
! mrsas_init_adapter_tbolt (struct mrsas_instance *instance)
{
/*
* Reduce the max supported cmds by 1. This is to ensure that the
* reply_q_sz (1 more than the max cmd that driver may send)
--- 762,772 ----
/*
* mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
*/
int
! mrsas_init_adapter_tbolt(struct mrsas_instance *instance)
{
/*
* Reduce the max supported cmds by 1. This is to ensure that the
* reply_q_sz (1 more than the max cmd that driver may send)
*** 783,817 ****
con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
" instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
/* create a pool of commands */
! if ( alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
cmn_err(CE_WARN,
" alloc_space_for_mpi2() failed.");
return (DDI_FAILURE);
}
/* Send ioc init message */
! if ( mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
cmn_err(CE_WARN,
" mrsas_issue_init_mpi2() failed.");
goto fail_init_fusion;
}
instance->unroll.alloc_space_mpi2 = 1;
con_log(CL_ANN, (CE_NOTE,
! "mrsas_init_adapter_tbolt: SUCCESSFULL\n"));
return (DDI_SUCCESS);
fail_init_fusion:
-
- fail_undo_alloc_mpi2:
free_space_for_mpi2(instance);
return (DDI_FAILURE);
}
--- 781,814 ----
con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
" instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
/* create a pool of commands */
! if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
cmn_err(CE_WARN,
" alloc_space_for_mpi2() failed.");
return (DDI_FAILURE);
}
/* Send ioc init message */
! /* NOTE: the issue_init call does FMA checking already. */
! if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
cmn_err(CE_WARN,
" mrsas_issue_init_mpi2() failed.");
goto fail_init_fusion;
}
instance->unroll.alloc_space_mpi2 = 1;
con_log(CL_ANN, (CE_NOTE,
! "mrsas_init_adapter_tbolt: SUCCESSFUL"));
return (DDI_SUCCESS);
fail_init_fusion:
free_space_for_mpi2(instance);
return (DDI_FAILURE);
}
*** 838,919 ****
(uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
cmn_err(CE_WARN, "mr_sas_issue_init_mpi2 "
"could not allocate data transfer buffer.");
return (DDI_FAILURE);
}
! (void) memset(init2_dma_obj.buffer, 2,
! sizeof (Mpi2IOCInitRequest_t));
con_log(CL_ANN1, (CE_NOTE,
! "mrsas_issue_init_mpi2 _phys adr: %x \n",
init2_dma_obj.dma_cookie[0].dmac_address));
/* Initialize and send ioc init message */
! ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj,
! init2_dma_obj.acc_handle);
if (ret_val == DDI_FAILURE) {
con_log(CL_ANN1, (CE_WARN,
! "mrsas_issue_init_mpi2: Failed\n"));
goto fail_init_mpi2;
}
/* free IOC init DMA buffer */
if (mrsas_free_dma_obj(instance, init2_dma_obj)
!= DDI_SUCCESS) {
con_log(CL_ANN1, (CE_WARN,
! "mrsas_issue_init_mpi2: Free Failed\n"));
return (DDI_FAILURE);
}
-
/* Get/Check and sync ld_map info */
instance->map_id = 0;
! if( mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS )
! mrsas_tbolt_sync_map_info(instance);
con_log(CL_ANN, (CE_NOTE,
! "mrsas_issue_init_mpi2: SUCCESSFULL\n"));
return (DDI_SUCCESS);
fail_init_mpi2:
! mrsas_free_dma_obj(instance, init2_dma_obj);
return (DDI_FAILURE);
}
! int
! mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj,
! ddi_acc_handle_t accessp)
{
! int numbytes, i;
! int ret = DDI_SUCCESS;
uint16_t flags;
- int status;
- timespec_t time;
- uint64_t mSec;
- uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
struct mrsas_init_frame2 *mfiFrameInit2;
struct mrsas_header *frame_hdr;
Mpi2IOCInitRequest_t *init;
struct mrsas_cmd *cmd = NULL;
struct mrsas_drv_ver drv_ver_info;
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
-
con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
#ifdef DEBUG
con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
! sizeof (*mfiFrameInit2)));
! con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", sizeof (*init)));
con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
! sizeof (struct mrsas_init_frame2)));
con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
! sizeof (Mpi2IOCInitRequest_t)));
#endif
init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
numbytes = sizeof (*init);
bzero(init, numbytes);
--- 835,911 ----
(uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
cmn_err(CE_WARN, "mr_sas_issue_init_mpi2 "
"could not allocate data transfer buffer.");
return (DDI_FAILURE);
}
! (void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t));
con_log(CL_ANN1, (CE_NOTE,
! "mrsas_issue_init_mpi2 _phys adr: %x",
init2_dma_obj.dma_cookie[0].dmac_address));
/* Initialize and send ioc init message */
! ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj);
if (ret_val == DDI_FAILURE) {
con_log(CL_ANN1, (CE_WARN,
! "mrsas_issue_init_mpi2: Failed"));
goto fail_init_mpi2;
}
/* free IOC init DMA buffer */
if (mrsas_free_dma_obj(instance, init2_dma_obj)
!= DDI_SUCCESS) {
con_log(CL_ANN1, (CE_WARN,
! "mrsas_issue_init_mpi2: Free Failed"));
return (DDI_FAILURE);
}
/* Get/Check and sync ld_map info */
instance->map_id = 0;
! if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS)
! (void) mrsas_tbolt_sync_map_info(instance);
+
+ /* No mrsas_cmd to send, so send NULL. */
+ if (mrsas_common_check(instance, NULL) != DDI_SUCCESS)
+ goto fail_init_mpi2;
+
con_log(CL_ANN, (CE_NOTE,
! "mrsas_issue_init_mpi2: SUCCESSFUL"));
return (DDI_SUCCESS);
fail_init_mpi2:
! (void) mrsas_free_dma_obj(instance, init2_dma_obj);
return (DDI_FAILURE);
}
! static int
! mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj)
{
! int numbytes;
uint16_t flags;
struct mrsas_init_frame2 *mfiFrameInit2;
struct mrsas_header *frame_hdr;
Mpi2IOCInitRequest_t *init;
struct mrsas_cmd *cmd = NULL;
struct mrsas_drv_ver drv_ver_info;
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
#ifdef DEBUG
con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
! (int)sizeof (*mfiFrameInit2)));
! con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
! (int)sizeof (struct mrsas_init_frame2)));
con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
! (int)sizeof (Mpi2IOCInitRequest_t)));
#endif
init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
numbytes = sizeof (*init);
bzero(init, numbytes);
*** 967,977 ****
cmd->retry_count_for_ocr = 0;
cmd->pkt = NULL;
cmd->drv_pkt_time = 0;
mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
! con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%x", mfiFrameInit2));
frame_hdr = &cmd->frame->hdr;
ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
MFI_CMD_STATUS_POLL_MODE);
--- 959,969 ----
cmd->retry_count_for_ocr = 0;
cmd->pkt = NULL;
cmd->drv_pkt_time = 0;
mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
! con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2));
frame_hdr = &cmd->frame->hdr;
ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
MFI_CMD_STATUS_POLL_MODE);
*** 983,993 ****
ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
con_log(CL_ANN, (CE_CONT,
"mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
! // Init the MFI Header
ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
&mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
--- 975,985 ----
ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
con_log(CL_ANN, (CE_CONT,
"mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
! /* Init the MFI Header */
ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
&mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
*** 1004,1020 ****
ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
&mfiFrameInit2->data_xfer_len,
sizeof (Mpi2IOCInitRequest_t));
con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
! init->ReplyDescriptorPostQueueAddress));
! /* fill driver version information*/
fill_up_drv_ver(&drv_ver_info);
/* allocate the driver version data transfer buffer */
! instance->drv_ver_dma_obj.size = sizeof(drv_ver_info.drv_ver);
instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
--- 996,1012 ----
ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
&mfiFrameInit2->data_xfer_len,
sizeof (Mpi2IOCInitRequest_t));
con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
! (int)init->ReplyDescriptorPostQueueAddress));
! /* fill driver version information */
fill_up_drv_ver(&drv_ver_info);
/* allocate the driver version data transfer buffer */
! instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
*** 1023,1051 ****
(uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
cmn_err(CE_WARN,
"fusion init: Could not allocate driver version buffer.");
return (DDI_FAILURE);
}
! /* copy driver version to dma buffer*/
! (void) memset(instance->drv_ver_dma_obj.buffer, 0,sizeof(drv_ver_info.drv_ver));
ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
(uint8_t *)drv_ver_info.drv_ver,
(uint8_t *)instance->drv_ver_dma_obj.buffer,
! sizeof(drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
! /*send driver version physical address to firmware*/
! ddi_put64(cmd->frame_dma_obj.acc_handle,
! &mfiFrameInit2->driverversion, instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
mfiFrameInit2->queue_info_new_phys_addr_lo,
! sizeof (Mpi2IOCInitRequest_t)));
con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
! cmd->scsi_io_request_phys_addr, sizeof (struct mrsas_init_frame2)));
/* disable interrupts before sending INIT2 frame */
instance->func_ptr->disable_intr(instance);
req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
--- 1015,1044 ----
(uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
cmn_err(CE_WARN,
"fusion init: Could not allocate driver version buffer.");
return (DDI_FAILURE);
}
! /* copy driver version to dma buffer */
! bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver));
ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
(uint8_t *)drv_ver_info.drv_ver,
(uint8_t *)instance->drv_ver_dma_obj.buffer,
! sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
! /* send driver version physical address to firmware */
! ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion,
! instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
mfiFrameInit2->queue_info_new_phys_addr_lo,
! (int)sizeof (Mpi2IOCInitRequest_t)));
con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
! cmd->scsi_io_request_phys_addr,
! (int)sizeof (struct mrsas_init_frame2)));
/* disable interrupts before sending INIT2 frame */
instance->func_ptr->disable_intr(instance);
req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
*** 1064,1074 ****
frame_hdr->cmd_status));
if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
&mfiFrameInit2->cmd_status) == 0) {
con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
- ret = DDI_SUCCESS;
} else {
con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
mrsas_dump_reply_desc(instance);
goto fail_ioc_init;
}
--- 1057,1066 ----
*** 1075,1099 ****
mrsas_dump_reply_desc(instance);
instance->unroll.verBuff = 1;
! con_log(CL_ANN, (CE_NOTE,
! "mrsas_tbolt_ioc_init: SUCCESSFULL\n"));
-
return (DDI_SUCCESS);
fail_ioc_init:
! mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
return (DDI_FAILURE);
}
! int wait_for_outstanding_poll_io(struct mrsas_instance *instance)
{
int i;
uint32_t wait_time = dump_io_wait_time;
for (i = 0; i < wait_time; i++) {
/*
--- 1067,1090 ----
mrsas_dump_reply_desc(instance);
instance->unroll.verBuff = 1;
! con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL"));
return (DDI_SUCCESS);
fail_ioc_init:
! (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
return (DDI_FAILURE);
}
! int
! wait_for_outstanding_poll_io(struct mrsas_instance *instance)
{
int i;
uint32_t wait_time = dump_io_wait_time;
for (i = 0; i < wait_time; i++) {
/*
*** 1122,1135 ****
mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
{
struct mrsas_instance *instance = ADDR2MR(ap);
struct scsa_cmd *acmd = PKT2CMD(pkt);
struct mrsas_cmd *cmd = NULL;
- int rval, i;
uchar_t cmd_done = 0;
- Mpi2RaidSCSIIORequest_t *scsi_raid_io;
- uint32_t msecs = 120 * MILLISEC;
con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
if (instance->deadadapter == 1) {
cmn_err(CE_WARN,
"mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
--- 1113,1123 ----
*** 1143,1153 ****
if (instance->adapterresetinprogress) {
con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
"returning mfi_pkt and setting TRAN_BUSY\n"));
return (TRAN_BUSY);
}
! rval = mrsas_tbolt_prepare_pkt(acmd);
cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
/*
* Check if the command is already completed by the mrsas_build_cmd()
--- 1131,1141 ----
if (instance->adapterresetinprogress) {
con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
"returning mfi_pkt and setting TRAN_BUSY\n"));
return (TRAN_BUSY);
}
! (void) mrsas_tbolt_prepare_pkt(acmd);
cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
/*
* Check if the command is already completed by the mrsas_build_cmd()
*** 1172,1210 ****
if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
if (instance->fw_outstanding > instance->max_fw_cmds) {
cmn_err(CE_WARN,
! "Command Queue Full... Returning BUSY \n");
return_raid_msg_pkt(instance, cmd);
return (TRAN_BUSY);
}
/* Synchronize the Cmd frame for the controller */
(void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
DDI_DMA_SYNC_FORDEV);
con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
! "cmd->index:0x%x SMID %0x%x\n", pkt->pkt_cdbp[0], cmd->index, cmd->SMID));
instance->func_ptr->issue_cmd(cmd, instance);
-
- return (TRAN_ACCEPT);
-
} else {
instance->func_ptr->issue_cmd(cmd, instance);
(void) wait_for_outstanding_poll_io(instance);
! return (TRAN_ACCEPT);
}
}
/*
* prepare the pkt:
* the pkt may have been resubmitted or just reused so
* initialize some fields and do some checks.
*/
! int
mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
{
struct scsi_pkt *pkt = CMD2PKT(acmd);
--- 1160,1198 ----
if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
if (instance->fw_outstanding > instance->max_fw_cmds) {
cmn_err(CE_WARN,
! "Command Queue Full... Returning BUSY");
return_raid_msg_pkt(instance, cmd);
return (TRAN_BUSY);
}
/* Synchronize the Cmd frame for the controller */
(void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
DDI_DMA_SYNC_FORDEV);
con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
! "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0],
! cmd->index, cmd->SMID));
instance->func_ptr->issue_cmd(cmd, instance);
} else {
instance->func_ptr->issue_cmd(cmd, instance);
(void) wait_for_outstanding_poll_io(instance);
! (void) mrsas_common_check(instance, cmd);
}
+
+ return (TRAN_ACCEPT);
}
/*
* prepare the pkt:
* the pkt may have been resubmitted or just reused so
* initialize some fields and do some checks.
*/
! static int
mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
{
struct scsi_pkt *pkt = CMD2PKT(acmd);
*** 1233,1271 ****
Mpi2RaidSCSIIORequest_t *scsi_raid_io,
uint32_t *datalen)
{
uint32_t MaxSGEs;
int sg_to_process;
! uint32_t i, j, SGEdwords = 0;
uint32_t numElements, endElement;
Mpi25IeeeSgeChain64_t *ieeeChainElement = NULL;
Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee = NULL;
! uint32_t SGLFlags = 0;
con_log(CL_ANN1, (CE_NOTE,
"chkpnt: Building Chained SGL :%d", __LINE__));
/* Calulate SGE size in number of Words(32bit) */
/* Clear the datalen before updating it. */
*datalen = 0;
- SGEdwords = sizeof (Mpi25IeeeSgeChain64_t) / 4;
-
MaxSGEs = instance->max_sge_in_main_msg;
! ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->SGLFlags,
MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
! // set data transfer flag.
if (acmd->cmd_flags & CFLAG_DMASEND) {
! ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->Control,
MPI2_SCSIIO_CONTROL_WRITE);
} else {
! ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->Control, MPI2_SCSIIO_CONTROL_READ);
}
numElements = acmd->cmd_cookiecnt;
--- 1221,1256 ----
Mpi2RaidSCSIIORequest_t *scsi_raid_io,
uint32_t *datalen)
{
uint32_t MaxSGEs;
int sg_to_process;
! uint32_t i, j;
uint32_t numElements, endElement;
Mpi25IeeeSgeChain64_t *ieeeChainElement = NULL;
Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee = NULL;
! ddi_acc_handle_t acc_handle =
! instance->mpi2_frame_pool_dma_obj.acc_handle;
con_log(CL_ANN1, (CE_NOTE,
"chkpnt: Building Chained SGL :%d", __LINE__));
/* Calulate SGE size in number of Words(32bit) */
/* Clear the datalen before updating it. */
*datalen = 0;
MaxSGEs = instance->max_sge_in_main_msg;
! ddi_put16(acc_handle, &scsi_raid_io->SGLFlags,
MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
! /* set data transfer flag. */
if (acmd->cmd_flags & CFLAG_DMASEND) {
! ddi_put32(acc_handle, &scsi_raid_io->Control,
MPI2_SCSIIO_CONTROL_WRITE);
} else {
! ddi_put32(acc_handle, &scsi_raid_io->Control,
! MPI2_SCSIIO_CONTROL_READ);
}
numElements = acmd->cmd_cookiecnt;
*** 1275,1286 ****
con_log(CL_ANN, (CE_NOTE,
"[Max SGE Count Exceeded]:%x", numElements));
return (numElements);
}
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->RaidContext.numSGE, (uint8_t)numElements);
/* set end element in main message frame */
endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
/* prepare the scatter-gather list for the firmware */
--- 1260,1271 ----
con_log(CL_ANN, (CE_NOTE,
"[Max SGE Count Exceeded]:%x", numElements));
return (numElements);
}
! ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE,
! (uint8_t)numElements);
/* set end element in main message frame */
endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
/* prepare the scatter-gather list for the firmware */
*** 1289,1378 ****
if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
sgl_ptr_end += instance->max_sge_in_main_msg - 1;
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &sgl_ptr_end->Flags, 0);
}
for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
! ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io_sgl_ieee->Address,
acmd->cmd_dmacookies[i].dmac_laddress);
! ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io_sgl_ieee->Length,
acmd->cmd_dmacookies[i].dmac_size);
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io_sgl_ieee->Flags, 0);
if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
! if (i == (numElements - 1))
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io_sgl_ieee->Flags, IEEE_SGE_FLAGS_END_OF_LIST);
}
*datalen += acmd->cmd_dmacookies[i].dmac_size;
#ifdef DEBUG
! con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]:%llx",
scsi_raid_io_sgl_ieee->Address));
con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
scsi_raid_io_sgl_ieee->Length));
con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
scsi_raid_io_sgl_ieee->Flags));
#endif
}
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->ChainOffset, 0);
/* check if chained SGL required */
if (i < numElements) {
con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
! uint16_t ioFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->IoFlags);
! if ((ioFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->ChainOffset, (U8)instance->chain_offset_io_req);
! else
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
&scsi_raid_io->ChainOffset, 0);
}
! else {
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->ChainOffset, (U8)instance->chain_offset_io_req);
}
/* prepare physical chain element */
ieeeChainElement = scsi_raid_io_sgl_ieee;
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &ieeeChainElement->NextChainOffset, 0);
! if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER)
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &ieeeChainElement->Flags, IEEE_SGE_FLAGS_CHAIN_ELEMENT );
! else
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &ieeeChainElement->Flags,
(IEEE_SGE_FLAGS_CHAIN_ELEMENT |
MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
! ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &ieeeChainElement->Length,
(sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
! ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &ieeeChainElement->Address,
(U64)cmd->sgl_phys_addr);
sg_to_process = numElements - i;
con_log(CL_ANN1, (CE_NOTE,
--- 1274,1359 ----
if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
sgl_ptr_end += instance->max_sge_in_main_msg - 1;
! ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
}
for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
! ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
acmd->cmd_dmacookies[i].dmac_laddress);
! ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
acmd->cmd_dmacookies[i].dmac_size);
! ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
! if (i == (numElements - 1)) {
! ddi_put8(acc_handle,
! &scsi_raid_io_sgl_ieee->Flags,
! IEEE_SGE_FLAGS_END_OF_LIST);
}
+ }
*datalen += acmd->cmd_dmacookies[i].dmac_size;
#ifdef DEBUG
! con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
scsi_raid_io_sgl_ieee->Address));
con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
scsi_raid_io_sgl_ieee->Length));
con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
scsi_raid_io_sgl_ieee->Flags));
#endif
}
! ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0);
/* check if chained SGL required */
if (i < numElements) {
con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
! uint16_t ioFlags =
! ddi_get16(acc_handle, &scsi_raid_io->IoFlags);
! if ((ioFlags &
! MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
! MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
! ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
! (U8)instance->chain_offset_io_req);
! } else {
! ddi_put8(acc_handle,
&scsi_raid_io->ChainOffset, 0);
}
! } else {
! ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
! (U8)instance->chain_offset_io_req);
}
/* prepare physical chain element */
ieeeChainElement = scsi_raid_io_sgl_ieee;
! ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0);
! if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
! ddi_put8(acc_handle, &ieeeChainElement->Flags,
! IEEE_SGE_FLAGS_CHAIN_ELEMENT);
! } else {
! ddi_put8(acc_handle, &ieeeChainElement->Flags,
(IEEE_SGE_FLAGS_CHAIN_ELEMENT |
MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
+ }
! ddi_put32(acc_handle, &ieeeChainElement->Length,
(sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
! ddi_put64(acc_handle, &ieeeChainElement->Address,
(U64)cmd->sgl_phys_addr);
sg_to_process = numElements - i;
con_log(CL_ANN1, (CE_NOTE,
*** 1383,1414 ****
/* build rest of the SGL in chained buffer */
for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
! ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io_sgl_ieee->Address,
acmd->cmd_dmacookies[i].dmac_laddress);
! ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io_sgl_ieee->Length,
acmd->cmd_dmacookies[i].dmac_size);
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io_sgl_ieee->Flags, 0);
if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
! if (i == (numElements - 1))
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io_sgl_ieee->Flags, IEEE_SGE_FLAGS_END_OF_LIST);
}
*datalen += acmd->cmd_dmacookies[i].dmac_size;
#if DEBUG
con_log(CL_DLEVEL1, (CE_NOTE,
! "[SGL Address]:%llx",
scsi_raid_io_sgl_ieee->Address));
con_log(CL_DLEVEL1, (CE_NOTE,
"[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
con_log(CL_DLEVEL1, (CE_NOTE,
"[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
--- 1364,1394 ----
/* build rest of the SGL in chained buffer */
for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
! ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
acmd->cmd_dmacookies[i].dmac_laddress);
! ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
acmd->cmd_dmacookies[i].dmac_size);
! ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
! if (i == (numElements - 1)) {
! ddi_put8(acc_handle,
! &scsi_raid_io_sgl_ieee->Flags,
! IEEE_SGE_FLAGS_END_OF_LIST);
}
+ }
*datalen += acmd->cmd_dmacookies[i].dmac_size;
#if DEBUG
con_log(CL_DLEVEL1, (CE_NOTE,
! "[SGL Address]: %" PRIx64,
scsi_raid_io_sgl_ieee->Address));
con_log(CL_DLEVEL1, (CE_NOTE,
"[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
con_log(CL_DLEVEL1, (CE_NOTE,
"[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
*** 1423,1457 ****
/*
* build_cmd
*/
! struct mrsas_cmd *
mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
struct scsi_pkt *pkt, uchar_t *cmd_done)
{
uint8_t fp_possible = 0;
! uint16_t flags = 0;
! uint32_t i, index;
! uint32_t context;
! uint32_t sge_bytes;
! uint8_t ChainOffsetValue;
! uint32_t SGLFlags;
! uint32_t lba_count=0;
! uint32_t start_lba_hi=0;
! uint32_t start_lba_lo=0;
! ddi_acc_handle_t acc_handle;
struct mrsas_cmd *cmd = NULL;
struct scsa_cmd *acmd = PKT2CMD(pkt);
MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
Mpi2RaidSCSIIORequest_t *scsi_raid_io;
- Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
uint32_t datalen;
struct IO_REQUEST_INFO io_info;
MR_FW_RAID_MAP_ALL *local_map_ptr;
- MR_LD_RAID *raid;
- U32 ld;
uint16_t pd_cmd_cdblen;
con_log(CL_DLEVEL1, (CE_NOTE,
"chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
--- 1403,1430 ----
/*
* build_cmd
*/
! static struct mrsas_cmd *
mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
struct scsi_pkt *pkt, uchar_t *cmd_done)
{
uint8_t fp_possible = 0;
! uint32_t index;
! uint32_t lba_count = 0;
! uint32_t start_lba_hi = 0;
! uint32_t start_lba_lo = 0;
! ddi_acc_handle_t acc_handle =
! instance->mpi2_frame_pool_dma_obj.acc_handle;
struct mrsas_cmd *cmd = NULL;
struct scsa_cmd *acmd = PKT2CMD(pkt);
MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
Mpi2RaidSCSIIORequest_t *scsi_raid_io;
uint32_t datalen;
struct IO_REQUEST_INFO io_info;
MR_FW_RAID_MAP_ALL *local_map_ptr;
uint16_t pd_cmd_cdblen;
con_log(CL_DLEVEL1, (CE_NOTE,
"chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
*** 1465,1475 ****
if (!(cmd = get_raid_msg_pkt(instance))) {
return (NULL);
}
index = cmd->index;
! ReqDescUnion = mr_sas_get_request_descriptor(instance, index, cmd);
ReqDescUnion->Words = 0;
ReqDescUnion->SCSIIO.SMID = cmd->SMID;
ReqDescUnion->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
--- 1438,1448 ----
if (!(cmd = get_raid_msg_pkt(instance))) {
return (NULL);
}
index = cmd->index;
! ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
ReqDescUnion->Words = 0;
ReqDescUnion->SCSIIO.SMID = cmd->SMID;
ReqDescUnion->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
*** 1491,1528 ****
(void) ddi_dma_sync(acmd->cmd_dmahandle,
acmd->cmd_dma_offset, acmd->cmd_dma_len,
DDI_DMA_SYNC_FORCPU);
}
} else {
! con_log(CL_ANN, (CE_NOTE, "NO DMA\n"));
}
! // get SCSI_IO raid message frame pointer
scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
/* zero out SCSI_IO raid message frame */
! memset(scsi_raid_io, 0, sizeof(Mpi2RaidSCSIIORequest_t));
! /*Set the ldTargetId set by BuildRaidContext() */
! ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->RaidContext.ldTargetId,
acmd->device_id);
/* Copy CDB to scsi_io_request message frame */
! ddi_rep_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! (uint8_t *)pkt->pkt_cdbp,
! (uint8_t *)scsi_raid_io->CDB.CDB32,
acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
/*
! * Just the CDB length,rest of the Flags are zero
* This will be modified later.
*/
! ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->IoFlags,
! acmd->cmd_cdblen);
pd_cmd_cdblen = acmd->cmd_cdblen;
switch (pkt->pkt_cdbp[0]) {
case SCMD_READ:
--- 1464,1497 ----
(void) ddi_dma_sync(acmd->cmd_dmahandle,
acmd->cmd_dma_offset, acmd->cmd_dma_len,
DDI_DMA_SYNC_FORCPU);
}
} else {
! con_log(CL_ANN, (CE_NOTE, "NO DMA"));
}
! /* get SCSI_IO raid message frame pointer */
scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
/* zero out SCSI_IO raid message frame */
! bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t));
! /* Set the ldTargetId set by BuildRaidContext() */
! ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId,
acmd->device_id);
/* Copy CDB to scsi_io_request message frame */
! ddi_rep_put8(acc_handle,
! (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32,
acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
/*
! * Just the CDB length, rest of the Flags are zero
* This will be modified later.
*/
! ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen);
pd_cmd_cdblen = acmd->cmd_cdblen;
switch (pkt->pkt_cdbp[0]) {
case SCMD_READ:
*** 1535,1558 ****
case SCMD_WRITE_G5:
if (acmd->islogical) {
/* Initialize sense Information */
if (cmd->sense1 == NULL) {
! con_log(CL_ANN, (CE_NOTE,
! "tbolt_build_cmd: Sense buffer ptr NULL \n"));
}
bzero(cmd->sense1, SENSE_LENGTH);
! con_log(CL_DLEVEL2, (CE_NOTE,
! "tbolt_build_cmd CDB[0] = %x\n", pkt->pkt_cdbp[0]));
! if (acmd->cmd_cdblen == CDB_GROUP0) { /* 6-byte cdb */
lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
! start_lba_lo =
! ((uint32_t)(pkt->pkt_cdbp[3]) |
((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
! ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F) << 16));
! } else if (acmd->cmd_cdblen == CDB_GROUP1) { /* 10-byte cdb */
lba_count =
(((uint16_t)(pkt->pkt_cdbp[8])) |
((uint16_t)(pkt->pkt_cdbp[7]) << 8));
start_lba_lo =
--- 1504,1529 ----
case SCMD_WRITE_G5:
if (acmd->islogical) {
/* Initialize sense Information */
if (cmd->sense1 == NULL) {
! con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: "
! "Sense buffer ptr NULL "));
}
bzero(cmd->sense1, SENSE_LENGTH);
! con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd "
! "CDB[0] = %x\n", pkt->pkt_cdbp[0]));
! if (acmd->cmd_cdblen == CDB_GROUP0) {
! /* 6-byte cdb */
lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
! start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) |
((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
! ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
! << 16));
! } else if (acmd->cmd_cdblen == CDB_GROUP1) {
! /* 10-byte cdb */
lba_count =
(((uint16_t)(pkt->pkt_cdbp[8])) |
((uint16_t)(pkt->pkt_cdbp[7]) << 8));
start_lba_lo =
*** 1559,1569 ****
(((uint32_t)(pkt->pkt_cdbp[5])) |
((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
((uint32_t)(pkt->pkt_cdbp[2]) << 24));
! } else if (acmd->cmd_cdblen == CDB_GROUP5) { /* 12-byte cdb */
lba_count = (
((uint32_t)(pkt->pkt_cdbp[9])) |
((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
((uint32_t)(pkt->pkt_cdbp[6]) << 24));
--- 1530,1541 ----
(((uint32_t)(pkt->pkt_cdbp[5])) |
((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
((uint32_t)(pkt->pkt_cdbp[2]) << 24));
! } else if (acmd->cmd_cdblen == CDB_GROUP5) {
! /* 12-byte cdb */
lba_count = (
((uint32_t)(pkt->pkt_cdbp[9])) |
((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
((uint32_t)(pkt->pkt_cdbp[6]) << 24));
*** 1572,1582 ****
(((uint32_t)(pkt->pkt_cdbp[5])) |
((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
((uint32_t)(pkt->pkt_cdbp[2]) << 24));
! } else if (acmd->cmd_cdblen == CDB_GROUP4) { /* 16-byte cdb */
lba_count = (
((uint32_t)(pkt->pkt_cdbp[13])) |
((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
((uint32_t)(pkt->pkt_cdbp[10]) << 24));
--- 1544,1555 ----
(((uint32_t)(pkt->pkt_cdbp[5])) |
((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
((uint32_t)(pkt->pkt_cdbp[2]) << 24));
! } else if (acmd->cmd_cdblen == CDB_GROUP4) {
! /* 16-byte cdb */
lba_count = (
((uint32_t)(pkt->pkt_cdbp[13])) |
((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
((uint32_t)(pkt->pkt_cdbp[10]) << 24));
*** 1593,1770 ****
((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
((uint32_t)(pkt->pkt_cdbp[2]) << 24));
}
if (instance->tbolt &&
! ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer) )
! cmn_err(CE_WARN," IO SECTOR COUNT exceeds controller limit 0x%x sectors\n", lba_count);
! memset(&io_info, 0, sizeof (struct IO_REQUEST_INFO));
! io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo;
io_info.numBlocks = lba_count;
io_info.ldTgtId = acmd->device_id;
if (acmd->cmd_flags & CFLAG_DMASEND)
io_info.isRead = 0;
else
io_info.isRead = 1;
! /*Aquire SYNC MAP UPDATE lock */
mutex_enter(&instance->sync_map_mtx);
! local_map_ptr = instance->ld_map[(instance->map_id & 1)];
! if ( (MR_TargetIdToLdGet(acmd->device_id, local_map_ptr) >= MAX_LOGICAL_DRIVES) || !instance->fast_path_io ){
! cmn_err(CE_NOTE,
! "Fast Path NOT Possible, targetId >= MAX_LOGICAL_DRIVES || !instance->fast_path_io\n");
fp_possible = 0;
! /* Set Regionlock flags to BYPASS
! io_request->RaidContext.regLockFlags = 0; */
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
&scsi_raid_io->RaidContext.regLockFlags, 0);
} else {
if (MR_BuildRaidContext(instance, &io_info,
&scsi_raid_io->RaidContext, local_map_ptr))
fp_possible = io_info.fpOkForIo;
}
! if (!enable_fp) {
fp_possible = 0;
! }
! con_log(CL_ANN1, (CE_NOTE,
! "enable_fp %d instance->fast_path_io %d fp_possible %d \n",
enable_fp, instance->fast_path_io, fp_possible));
if (fp_possible) {
/* Check for DIF enabled LD */
if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
/* Prepare 32 Byte CDB for DIF capable Disk */
mrsas_tbolt_prepare_cdb(instance,
scsi_raid_io->CDB.CDB32,
! &io_info,
! scsi_raid_io,
! start_lba_lo);
} else {
mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
! (uint8_t *)&pd_cmd_cdblen, io_info.pdBlock, io_info.numBlocks, 0);
! ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->IoFlags,
! pd_cmd_cdblen);
}
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->Function,
MPI2_FUNCTION_SCSI_IO_REQUEST);
ReqDescUnion->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
! uint8_t regLockFlags = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
&scsi_raid_io->RaidContext.regLockFlags);
! uint16_t IoFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
&scsi_raid_io->IoFlags);
if (regLockFlags == REGION_TYPE_UNUSED)
ReqDescUnion->SCSIIO.RequestFlags =
! (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
! IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
! regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE);
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
&scsi_raid_io->ChainOffset, 0);
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->RaidContext.nsegType, ((0x01 << MPI2_NSEG_FLAGS_SHIFT) | MPI2_TYPE_CUDA));
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->RaidContext.regLockFlags, regLockFlags);
! ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
&scsi_raid_io->IoFlags, IoFlags);
}
! if ((instance->load_balance_info[acmd->device_id].loadBalanceFlag) && (io_info.isRead)) {
! io_info.devHandle = get_updated_dev_handle(&instance->load_balance_info[acmd->device_id], &io_info);
! cmd->load_balance_flag |= MEGASAS_LOAD_BALANCE_FLAG;
! } else
! cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
! ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->DevHandle,
io_info.devHandle);
} else {
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->Function,
MPI2_FUNCTION_LD_IO_REQUEST);
! ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
&scsi_raid_io->DevHandle, acmd->device_id);
ReqDescUnion->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
! ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->RaidContext.timeoutValue, local_map_ptr->raidMap.fpPdIoTimeoutSec);
if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
! uint8_t regLockFlags = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
&scsi_raid_io->RaidContext.regLockFlags);
! if (regLockFlags == REGION_TYPE_UNUSED)
ReqDescUnion->SCSIIO.RequestFlags =
! (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
! regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | MR_RL_FLAGS_SEQ_NUM_ENABLE);
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->RaidContext.nsegType, ((0x01 << MPI2_NSEG_FLAGS_SHIFT) | MPI2_TYPE_CUDA));
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->RaidContext.regLockFlags, regLockFlags);
}
-
} /* Not FP */
! /*Release SYNC MAP UPDATE lock */
mutex_exit(&instance->sync_map_mtx);
! /* Set sense buffer physical address/length in scsi_io_request.*/
! ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->SenseBufferLowAddress,
cmd->sense_phys_addr1);
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->SenseBufferLength,
SENSE_LENGTH);
! /* Construct SGL*/
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->SGLOffset0,
offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
! mr_sas_tbolt_build_sgl(instance, acmd, cmd,
scsi_raid_io, &datalen);
! ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->DataLength, datalen);
break;
-
- }
- else {
#ifndef PDSUPPORT /* if PDSUPPORT, skip break and fall through */
break;
#endif
}
/* fall through For all non-rd/wr cmds */
default:
switch (pkt->pkt_cdbp[0]) {
! case 0x35: { // SCMD_SYNCHRONIZE_CACHE
return_raid_msg_pkt(instance, cmd);
*cmd_done = 1;
return (NULL);
}
--- 1566,1764 ----
((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
((uint32_t)(pkt->pkt_cdbp[2]) << 24));
}
if (instance->tbolt &&
! ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) {
! cmn_err(CE_WARN, " IO SECTOR COUNT exceeds "
! "controller limit 0x%x sectors",
! lba_count);
! }
! bzero(&io_info, sizeof (struct IO_REQUEST_INFO));
! io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) |
! start_lba_lo;
io_info.numBlocks = lba_count;
io_info.ldTgtId = acmd->device_id;
if (acmd->cmd_flags & CFLAG_DMASEND)
io_info.isRead = 0;
else
io_info.isRead = 1;
! /* Acquire SYNC MAP UPDATE lock */
mutex_enter(&instance->sync_map_mtx);
! local_map_ptr =
! instance->ld_map[(instance->map_id & 1)];
! if ((MR_TargetIdToLdGet(
! acmd->device_id, local_map_ptr) >=
! MAX_LOGICAL_DRIVES) || !instance->fast_path_io) {
! cmn_err(CE_NOTE, "Fast Path NOT Possible, "
! "targetId >= MAX_LOGICAL_DRIVES || "
! "!instance->fast_path_io");
fp_possible = 0;
! /* Set Regionlock flags to BYPASS */
! /* io_request->RaidContext.regLockFlags = 0; */
! ddi_put8(acc_handle,
&scsi_raid_io->RaidContext.regLockFlags, 0);
} else {
if (MR_BuildRaidContext(instance, &io_info,
&scsi_raid_io->RaidContext, local_map_ptr))
fp_possible = io_info.fpOkForIo;
}
! if (!enable_fp)
fp_possible = 0;
!
! con_log(CL_ANN1, (CE_NOTE, "enable_fp %d "
! "instance->fast_path_io %d fp_possible %d",
enable_fp, instance->fast_path_io, fp_possible));
if (fp_possible) {
/* Check for DIF enabled LD */
if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
/* Prepare 32 Byte CDB for DIF capable Disk */
mrsas_tbolt_prepare_cdb(instance,
scsi_raid_io->CDB.CDB32,
! &io_info, scsi_raid_io, start_lba_lo);
} else {
mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
! (uint8_t *)&pd_cmd_cdblen,
! io_info.pdBlock, io_info.numBlocks);
! ddi_put16(acc_handle,
! &scsi_raid_io->IoFlags, pd_cmd_cdblen);
}
! ddi_put8(acc_handle, &scsi_raid_io->Function,
MPI2_FUNCTION_SCSI_IO_REQUEST);
ReqDescUnion->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
! uint8_t regLockFlags = ddi_get8(acc_handle,
&scsi_raid_io->RaidContext.regLockFlags);
! uint16_t IoFlags = ddi_get16(acc_handle,
&scsi_raid_io->IoFlags);
if (regLockFlags == REGION_TYPE_UNUSED)
ReqDescUnion->SCSIIO.RequestFlags =
! (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
! MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
! IoFlags |=
! MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
! regLockFlags |=
! (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
! MR_RL_FLAGS_SEQ_NUM_ENABLE);
! ddi_put8(acc_handle,
&scsi_raid_io->ChainOffset, 0);
! ddi_put8(acc_handle,
! &scsi_raid_io->RaidContext.nsegType,
! ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
! MPI2_TYPE_CUDA));
! ddi_put8(acc_handle,
! &scsi_raid_io->RaidContext.regLockFlags,
! regLockFlags);
! ddi_put16(acc_handle,
&scsi_raid_io->IoFlags, IoFlags);
}
! if ((instance->load_balance_info[
! acmd->device_id].loadBalanceFlag) &&
! (io_info.isRead)) {
! io_info.devHandle =
! get_updated_dev_handle(&instance->
! load_balance_info[acmd->device_id],
! &io_info);
! cmd->load_balance_flag |=
! MEGASAS_LOAD_BALANCE_FLAG;
! } else {
! cmd->load_balance_flag &=
! ~MEGASAS_LOAD_BALANCE_FLAG;
! }
ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
! ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
io_info.devHandle);
} else {
! ddi_put8(acc_handle, &scsi_raid_io->Function,
MPI2_FUNCTION_LD_IO_REQUEST);
! ddi_put16(acc_handle,
&scsi_raid_io->DevHandle, acmd->device_id);
ReqDescUnion->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
! ddi_put16(acc_handle,
! &scsi_raid_io->RaidContext.timeoutValue,
! local_map_ptr->raidMap.fpPdIoTimeoutSec);
if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
! uint8_t regLockFlags = ddi_get8(acc_handle,
&scsi_raid_io->RaidContext.regLockFlags);
! if (regLockFlags == REGION_TYPE_UNUSED) {
ReqDescUnion->SCSIIO.RequestFlags =
! (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
! MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
! }
! regLockFlags |=
! (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
! MR_RL_FLAGS_SEQ_NUM_ENABLE);
! ddi_put8(acc_handle,
! &scsi_raid_io->RaidContext.nsegType,
! ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
! MPI2_TYPE_CUDA));
! ddi_put8(acc_handle,
! &scsi_raid_io->RaidContext.regLockFlags,
! regLockFlags);
}
} /* Not FP */
! /* Release SYNC MAP UPDATE lock */
mutex_exit(&instance->sync_map_mtx);
! /*
! * Set sense buffer physical address/length in scsi_io_request.
! */
! ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
cmd->sense_phys_addr1);
! ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength,
SENSE_LENGTH);
! /* Construct SGL */
! ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
! (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
scsi_raid_io, &datalen);
! ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen);
break;
#ifndef PDSUPPORT /* if PDSUPPORT, skip break and fall through */
+ } else {
break;
#endif
}
/* fall through For all non-rd/wr cmds */
default:
switch (pkt->pkt_cdbp[0]) {
! case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
return_raid_msg_pkt(instance, cmd);
*cmd_done = 1;
return (NULL);
}
*** 1785,1862 ****
}
break;
}
default: {
! /* Here we need to handle PASSTHRU for
! Logical Devices. Like Inquiry etc.*/
! if(!(acmd->islogical)) {
! /* Aquire SYNC MAP UPDATE lock */
mutex_enter(&instance->sync_map_mtx);
! local_map_ptr = instance->ld_map[(instance->map_id & 1)];
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->Function, MPI2_FUNCTION_SCSI_IO_REQUEST);
ReqDescUnion->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
! ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->DevHandle,
! local_map_ptr->raidMap.devHndlInfo[acmd->device_id].curDevHdl);
! /*Set regLockFlasgs to REGION_TYPE_BYPASS */
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
&scsi_raid_io->RaidContext.regLockFlags, 0);
! ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->RaidContext.regLockRowLBA, 0);
! ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->RaidContext.regLockLength, 0);
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->RaidContext.RAIDFlags,
! MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
! ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->RaidContext.timeoutValue, local_map_ptr->raidMap.fpPdIoTimeoutSec);
! ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->RaidContext.ldTargetId, acmd->device_id);
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
&scsi_raid_io->LUN[1], acmd->lun);
/* Release SYNC MAP UPDATE lock */
mutex_exit(&instance->sync_map_mtx);
} else {
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->Function, MPI2_FUNCTION_LD_IO_REQUEST);
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
&scsi_raid_io->LUN[1], acmd->lun);
! ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
&scsi_raid_io->DevHandle, acmd->device_id);
ReqDescUnion->SCSIIO.RequestFlags =
! (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
}
! /* Set sense buffer physical address/length in scsi_io_request.*/
! ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
&scsi_raid_io->SenseBufferLowAddress,
cmd->sense_phys_addr1);
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
&scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
! /* Construct SGL*/
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->SGLOffset0,
offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
! mr_sas_tbolt_build_sgl(instance, acmd, cmd,
scsi_raid_io, &datalen);
! ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
&scsi_raid_io->DataLength, datalen);
con_log(CL_ANN, (CE_CONT,
"tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
--- 1779,1868 ----
}
break;
}
default: {
! /*
! * Here we need to handle PASSTHRU for
! * Logical Devices. Like Inquiry etc.
! */
! if (!(acmd->islogical)) {
! /* Acquire SYNC MAP UPDATE lock */
mutex_enter(&instance->sync_map_mtx);
! local_map_ptr =
! instance->ld_map[(instance->map_id & 1)];
! ddi_put8(acc_handle, &scsi_raid_io->Function,
! MPI2_FUNCTION_SCSI_IO_REQUEST);
ReqDescUnion->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
! ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
! local_map_ptr->raidMap.
! devHndlInfo[acmd->device_id].curDevHdl);
! /* Set regLockFlasgs to REGION_TYPE_BYPASS */
! ddi_put8(acc_handle,
&scsi_raid_io->RaidContext.regLockFlags, 0);
! ddi_put64(acc_handle,
! &scsi_raid_io->RaidContext.regLockRowLBA,
! 0);
! ddi_put32(acc_handle,
! &scsi_raid_io->RaidContext.regLockLength,
! 0);
! ddi_put8(acc_handle,
! &scsi_raid_io->RaidContext.RAIDFlags,
! MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
! MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
! ddi_put16(acc_handle,
! &scsi_raid_io->RaidContext.timeoutValue,
! local_map_ptr->raidMap.fpPdIoTimeoutSec);
! ddi_put16(acc_handle,
! &scsi_raid_io->RaidContext.ldTargetId,
! acmd->device_id);
! ddi_put8(acc_handle,
&scsi_raid_io->LUN[1], acmd->lun);
/* Release SYNC MAP UPDATE lock */
mutex_exit(&instance->sync_map_mtx);
} else {
! ddi_put8(acc_handle, &scsi_raid_io->Function,
! MPI2_FUNCTION_LD_IO_REQUEST);
! ddi_put8(acc_handle,
&scsi_raid_io->LUN[1], acmd->lun);
! ddi_put16(acc_handle,
&scsi_raid_io->DevHandle, acmd->device_id);
ReqDescUnion->SCSIIO.RequestFlags =
! (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
! MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
}
! /*
! * Set sense buffer physical address/length in
! * scsi_io_request.
! */
! ddi_put32(acc_handle,
&scsi_raid_io->SenseBufferLowAddress,
cmd->sense_phys_addr1);
! ddi_put8(acc_handle,
&scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
! /* Construct SGL */
! ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
! (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
scsi_raid_io, &datalen);
! ddi_put32(acc_handle,
&scsi_raid_io->DataLength, datalen);
con_log(CL_ANN, (CE_CONT,
"tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
*** 1870,1882 ****
}
break;
}
}
- #ifdef lint
- context = context;
- #endif
return (cmd);
}
/*
--- 1876,1885 ----
*** 1981,1998 ****
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
atomic_add_16(&instance->fw_outstanding, 1);
struct scsi_pkt *pkt;
! con_log(CL_ANN1, (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
con_log(CL_DLEVEL1, (CE_CONT,
! " [req desc Words] %llx \n", req_desc->Words));
con_log(CL_DLEVEL1, (CE_CONT,
! " [req desc low part] %x \n", req_desc->Words));
con_log(CL_DLEVEL1, (CE_CONT,
! " [req desc high part] %x \n", (req_desc->Words >> 32)));
pkt = cmd->pkt;
if (pkt) {
con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
"ISSUED CMD TO FW : called : cmd:"
--- 1984,2003 ----
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
atomic_add_16(&instance->fw_outstanding, 1);
struct scsi_pkt *pkt;
! con_log(CL_ANN1,
! (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
con_log(CL_DLEVEL1, (CE_CONT,
! " [req desc Words] %" PRIx64 " \n", req_desc->Words));
con_log(CL_DLEVEL1, (CE_CONT,
! " [req desc low part] %x \n",
! (uint_t)(req_desc->Words & 0xffffffffff)));
con_log(CL_DLEVEL1, (CE_CONT,
! " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
pkt = cmd->pkt;
if (pkt) {
con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
"ISSUED CMD TO FW : called : cmd:"
*** 2032,2069 ****
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
struct mrsas_header *hdr;
hdr = (struct mrsas_header *)&cmd->frame->hdr;
! con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X", cmd->SMID));
if (instance->adapterresetinprogress) {
cmd->drv_pkt_time = ddi_get16
(cmd->frame_dma_obj.acc_handle, &hdr->timeout);
if (cmd->drv_pkt_time < debug_timeout_g)
cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
! "RESET-IN-PROGRESS, issue cmd & return.\n"));
mutex_enter(&instance->reg_write_mtx);
WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
mutex_exit(&instance->reg_write_mtx);
return (DDI_SUCCESS);
} else {
! con_log(CL_ANN1, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: pushing the pkt\n"));
push_pending_mfi_pkt(instance, cmd);
}
con_log(CL_DLEVEL2, (CE_NOTE,
! "HighQport offset :%lx",
! (uint32_t *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
con_log(CL_DLEVEL2, (CE_NOTE,
! "LowQport offset :%lx",
! (uint32_t *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
cmd->sync_cmd = MRSAS_TRUE;
cmd->cmd_status = ENODATA;
--- 2037,2077 ----
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
struct mrsas_header *hdr;
hdr = (struct mrsas_header *)&cmd->frame->hdr;
! con_log(CL_ANN,
! (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
! cmd->SMID));
if (instance->adapterresetinprogress) {
cmd->drv_pkt_time = ddi_get16
(cmd->frame_dma_obj.acc_handle, &hdr->timeout);
if (cmd->drv_pkt_time < debug_timeout_g)
cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
! "RESET-IN-PROGRESS, issue cmd & return."));
mutex_enter(&instance->reg_write_mtx);
WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
mutex_exit(&instance->reg_write_mtx);
return (DDI_SUCCESS);
} else {
! con_log(CL_ANN1, (CE_NOTE,
! "tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
push_pending_mfi_pkt(instance, cmd);
}
con_log(CL_DLEVEL2, (CE_NOTE,
! "HighQport offset :%p",
! (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
con_log(CL_DLEVEL2, (CE_NOTE,
! "LowQport offset :%p",
! (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
cmd->sync_cmd = MRSAS_TRUE;
cmd->cmd_status = ENODATA;
*** 2071,2083 ****
WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
mutex_exit(&instance->reg_write_mtx);
con_log(CL_ANN1, (CE_NOTE,
! " req desc high part %x \n", (req_desc->Words >> 32)));
! con_log(CL_ANN1, (CE_NOTE,
! " req desc low part %x \n", req_desc->Words));
mutex_enter(&instance->int_cmd_mtx);
for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
}
--- 2079,2091 ----
WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
mutex_exit(&instance->reg_write_mtx);
con_log(CL_ANN1, (CE_NOTE,
! " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
! con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
! (uint_t)(req_desc->Words & 0xffffffff)));
mutex_enter(&instance->int_cmd_mtx);
for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
}
*** 2101,2111 ****
int i;
uint16_t flags;
uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
struct mrsas_header *frame_hdr;
! con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X", cmd->SMID));
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
--- 2109,2121 ----
int i;
uint16_t flags;
uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
struct mrsas_header *frame_hdr;
! con_log(CL_ANN,
! (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
! cmd->SMID));
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
*** 2112,2125 ****
MFI_CMD_STATUS_POLL_MODE);
flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
con_log(CL_ANN1, (CE_NOTE,
! " req desc low part %x \n", req_desc->Words));
! con_log(CL_ANN1, (CE_NOTE,
! " req desc high part %x \n", (req_desc->Words >> 32)));
/* issue the frame using inbound queue port */
mutex_enter(&instance->reg_write_mtx);
WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
--- 2122,2135 ----
MFI_CMD_STATUS_POLL_MODE);
flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
+ con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
+ (uint_t)(req_desc->Words & 0xffffffff)));
con_log(CL_ANN1, (CE_NOTE,
! " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
/* issue the frame using inbound queue port */
mutex_enter(&instance->reg_write_mtx);
WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
*** 2133,2177 ****
}
if (ddi_get8(cmd->frame_dma_obj.acc_handle,
&frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
con_log(CL_ANN1, (CE_NOTE,
! " cmd failed %x \n", (req_desc->Words)));
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
void
tbolt_enable_intr(struct mrsas_instance *instance)
{
- uint32_t mask;
-
/* TODO: For Thunderbolt/Invader also clear intr on enable */
! //writel(~0, ®s->outbound_intr_status);
! //readl(®s->outbound_intr_status);
WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
/* dummy read to force PCI flush */
! mask = RD_OB_INTR_MASK(instance);
}
void
tbolt_disable_intr(struct mrsas_instance *instance)
{
uint32_t mask = 0xFFFFFFFF;
- uint32_t status;
-
WR_OB_INTR_MASK(mask, instance);
/* Dummy readl to force pci flush */
! status = RD_OB_INTR_MASK(instance);
}
int
tbolt_intr_ack(struct mrsas_instance *instance)
--- 2143,2183 ----
}
if (ddi_get8(cmd->frame_dma_obj.acc_handle,
&frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
con_log(CL_ANN1, (CE_NOTE,
! " cmd failed %" PRIx64, (req_desc->Words)));
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
void
tbolt_enable_intr(struct mrsas_instance *instance)
{
/* TODO: For Thunderbolt/Invader also clear intr on enable */
! /* writel(~0, ®s->outbound_intr_status); */
! /* readl(®s->outbound_intr_status); */
WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
/* dummy read to force PCI flush */
! (void) RD_OB_INTR_MASK(instance);
}
void
tbolt_disable_intr(struct mrsas_instance *instance)
{
uint32_t mask = 0xFFFFFFFF;
WR_OB_INTR_MASK(mask, instance);
/* Dummy readl to force pci flush */
! (void) RD_OB_INTR_MASK(instance);
}
int
tbolt_intr_ack(struct mrsas_instance *instance)
*** 2179,2199 ****
uint32_t status;
/* check if it is our interrupt */
status = RD_OB_INTR_STATUS(instance);
con_log(CL_ANN1, (CE_NOTE,
! "chkpnt: Entered tbolt_intr_ack status = %d \n", status));
if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
return (DDI_INTR_UNCLAIMED);
}
if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
/* clear the interrupt by writing back the same value */
WR_OB_INTR_STATUS(status, instance);
/* dummy READ */
! RD_OB_INTR_STATUS(instance);
}
return (DDI_INTR_CLAIMED);
}
/*
--- 2185,2210 ----
uint32_t status;
/* check if it is our interrupt */
status = RD_OB_INTR_STATUS(instance);
con_log(CL_ANN1, (CE_NOTE,
! "chkpnt: Entered tbolt_intr_ack status = %d", status));
if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
return (DDI_INTR_UNCLAIMED);
}
+ if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
+ ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
+ return (DDI_INTR_UNCLAIMED);
+ }
+
if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
/* clear the interrupt by writing back the same value */
WR_OB_INTR_STATUS(status, instance);
/* dummy READ */
! (void) RD_OB_INTR_STATUS(instance);
}
return (DDI_INTR_CLAIMED);
}
/*
*** 2298,2320 ****
{
Mpi2RaidSCSIIORequest_t *scsi_raid_io;
Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
uint32_t index;
if (!instance->tbolt) {
! con_log(CL_ANN, (CE_NOTE, "Not MFA enabled.\n"));
return;
}
index = cmd->index;
! ReqDescUnion =
! mr_sas_get_request_descriptor(instance, index, cmd);
if (!ReqDescUnion) {
! con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]%x"));
return;
}
con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
--- 2309,2332 ----
{
Mpi2RaidSCSIIORequest_t *scsi_raid_io;
Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
uint32_t index;
+ ddi_acc_handle_t acc_handle =
+ instance->mpi2_frame_pool_dma_obj.acc_handle;
if (!instance->tbolt) {
! con_log(CL_ANN, (CE_NOTE, "Not MFA enabled."));
return;
}
index = cmd->index;
! ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
if (!ReqDescUnion) {
! con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
return;
}
con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
*** 2326,2378 ****
ReqDescUnion->SCSIIO.SMID = cmd->SMID;
cmd->request_desc = ReqDescUnion;
! // get raid message frame pointer
scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
! Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
sgl_ptr_end += instance->max_sge_in_main_msg - 1;
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &sgl_ptr_end->Flags, 0);
}
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->Function,
MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->SGLOffset0,
offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->ChainOffset,
(U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
! ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->SenseBufferLowAddress,
cmd->sense_phys_addr1);
scsi_raid_io_sgl_ieee =
(Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
! ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io_sgl_ieee->Address,
(U64)cmd->frame_phys_addr);
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io_sgl_ieee->Flags,
! (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
! ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io_sgl_ieee->Length, 1024); //MEGASAS_MAX_SZ_CHAIN_FRAME
con_log(CL_ANN1, (CE_NOTE,
! "[MFI CMD PHY ADDRESS]:%x",
scsi_raid_io_sgl_ieee->Address));
con_log(CL_ANN1, (CE_NOTE,
"[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
scsi_raid_io_sgl_ieee->Flags));
--- 2338,2384 ----
ReqDescUnion->SCSIIO.SMID = cmd->SMID;
cmd->request_desc = ReqDescUnion;
! /* get raid message frame pointer */
scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
! Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)
! &scsi_raid_io->SGL.IeeeChain;
sgl_ptr_end += instance->max_sge_in_main_msg - 1;
! ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
}
! ddi_put8(acc_handle, &scsi_raid_io->Function,
MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
! ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
! ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
(U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
! ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
cmd->sense_phys_addr1);
scsi_raid_io_sgl_ieee =
(Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
! ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
(U64)cmd->frame_phys_addr);
! ddi_put8(acc_handle,
! &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
! /* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
! ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024);
con_log(CL_ANN1, (CE_NOTE,
! "[MFI CMD PHY ADDRESS]:%" PRIx64,
scsi_raid_io_sgl_ieee->Address));
con_log(CL_ANN1, (CE_NOTE,
"[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
scsi_raid_io_sgl_ieee->Flags));
*** 2389,2406 ****
struct scsa_cmd *acmd;
struct scsi_pkt *pkt;
struct scsi_arq_status *arqstat;
Mpi2RaidSCSIIORequest_t *scsi_raid_io;
LD_LOAD_BALANCE_INFO *lbinfo;
! int i;
scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
! status = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->RaidContext.status);
! extStatus = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->RaidContext.extStatus);
con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
if (status != MFI_STAT_OK) {
--- 2395,2411 ----
struct scsa_cmd *acmd;
struct scsi_pkt *pkt;
struct scsi_arq_status *arqstat;
Mpi2RaidSCSIIORequest_t *scsi_raid_io;
LD_LOAD_BALANCE_INFO *lbinfo;
! ddi_acc_handle_t acc_handle =
! instance->mpi2_frame_pool_dma_obj.acc_handle;
scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
! status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status);
! extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus);
con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
if (status != MFI_STAT_OK) {
*** 2411,2458 ****
"IO Cmd Success SMID %x", cmd->SMID));
}
/* regular commands */
! switch (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->Function)) {
case MPI2_FUNCTION_SCSI_IO_REQUEST : /* Fast Path IO. */
acmd = (struct scsa_cmd *)cmd->cmd;
lbinfo = &instance->load_balance_info[acmd->device_id];
if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
! arm = lbinfo->raid1DevHandle[0] == scsi_raid_io->DevHandle ? 0 : 1;
lbinfo->scsi_pending_cmds[arm]--;
cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
}
con_log(CL_DLEVEL3, (CE_NOTE,
"FastPath IO Completion Success "));
! case MPI2_FUNCTION_LD_IO_REQUEST : {// Regular Path IO.
acmd = (struct scsa_cmd *)cmd->cmd;
pkt = (struct scsi_pkt *)CMD2PKT(acmd);
if (acmd->cmd_flags & CFLAG_DMAVALID) {
if (acmd->cmd_flags & CFLAG_CONSISTENT) {
(void) ddi_dma_sync(acmd->cmd_dmahandle,
! acmd->cmd_dma_offset,
! acmd->cmd_dma_len,
DDI_DMA_SYNC_FORCPU);
}
}
pkt->pkt_reason = CMD_CMPLT;
pkt->pkt_statistics = 0;
! pkt->pkt_state = STATE_GOT_BUS
! | STATE_GOT_TARGET | STATE_SENT_CMD
! | STATE_XFERRED_DATA | STATE_GOT_STATUS;
! con_log(CL_ANN, (CE_CONT,
! " CDB[0] = %x completed for %s: size %lx SMID %x cmd_status %x",
! pkt->pkt_cdbp[0],
((acmd->islogical) ? "LD" : "PD"),
acmd->cmd_dmacount, cmd->SMID, status));
if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
struct scsi_inquiry *inq;
--- 2416,2461 ----
"IO Cmd Success SMID %x", cmd->SMID));
}
/* regular commands */
! switch (ddi_get8(acc_handle, &scsi_raid_io->Function)) {
case MPI2_FUNCTION_SCSI_IO_REQUEST : /* Fast Path IO. */
acmd = (struct scsa_cmd *)cmd->cmd;
lbinfo = &instance->load_balance_info[acmd->device_id];
if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
! arm = lbinfo->raid1DevHandle[0] ==
! scsi_raid_io->DevHandle ? 0 : 1;
lbinfo->scsi_pending_cmds[arm]--;
cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
}
con_log(CL_DLEVEL3, (CE_NOTE,
"FastPath IO Completion Success "));
+ /* FALLTHRU */
! case MPI2_FUNCTION_LD_IO_REQUEST : { /* Regular Path IO. */
acmd = (struct scsa_cmd *)cmd->cmd;
pkt = (struct scsi_pkt *)CMD2PKT(acmd);
if (acmd->cmd_flags & CFLAG_DMAVALID) {
if (acmd->cmd_flags & CFLAG_CONSISTENT) {
(void) ddi_dma_sync(acmd->cmd_dmahandle,
! acmd->cmd_dma_offset, acmd->cmd_dma_len,
DDI_DMA_SYNC_FORCPU);
}
}
pkt->pkt_reason = CMD_CMPLT;
pkt->pkt_statistics = 0;
! pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
! STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
! con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: "
! "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0],
((acmd->islogical) ? "LD" : "PD"),
acmd->cmd_dmacount, cmd->SMID, status));
if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
struct scsi_inquiry *inq;
*** 2463,2488 ****
acmd->cmd_buf->b_un.b_addr;
/* don't expose physical drives to OS */
if (acmd->islogical &&
(status == MFI_STAT_OK)) {
! display_scsi_inquiry(
! (caddr_t)inq);
! }
#ifdef PDSUPPORT
! else if ((status ==
! MFI_STAT_OK) && inq->inq_dtype ==
! DTYPE_DIRECT) {
!
! display_scsi_inquiry(
! (caddr_t)inq);
! }
#endif
! else {
/* for physical disk */
! status =
! MFI_STAT_DEVICE_NOT_FOUND;
}
}
}
switch (status) {
--- 2466,2484 ----
acmd->cmd_buf->b_un.b_addr;
/* don't expose physical drives to OS */
if (acmd->islogical &&
(status == MFI_STAT_OK)) {
! display_scsi_inquiry((caddr_t)inq);
#ifdef PDSUPPORT
! } else if ((status == MFI_STAT_OK) &&
! inq->inq_dtype == DTYPE_DIRECT) {
! display_scsi_inquiry((caddr_t)inq);
#endif
! } else {
/* for physical disk */
! status = MFI_STAT_DEVICE_NOT_FOUND;
}
}
}
switch (status) {
*** 2503,2517 ****
case MFI_STAT_SCSI_DONE_WITH_ERROR:
con_log(CL_ANN, (CE_WARN,
"tbolt_complete_cmd: scsi_done with error"));
pkt->pkt_reason = CMD_CMPLT;
! ((struct scsi_status *)
! pkt->pkt_scbp)->sts_chk = 1;
if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
! con_log(CL_ANN, (CE_WARN, "TEST_UNIT_READY fail"));
} else {
pkt->pkt_state |= STATE_ARQ_DONE;
arqstat = (void *)(pkt->pkt_scbp);
arqstat->sts_rqpkt_reason = CMD_CMPLT;
arqstat->sts_rqpkt_resid = 0;
--- 2499,2513 ----
case MFI_STAT_SCSI_DONE_WITH_ERROR:
con_log(CL_ANN, (CE_WARN,
"tbolt_complete_cmd: scsi_done with error"));
pkt->pkt_reason = CMD_CMPLT;
! ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
! con_log(CL_ANN,
! (CE_WARN, "TEST_UNIT_READY fail"));
} else {
pkt->pkt_state |= STATE_ARQ_DONE;
arqstat = (void *)(pkt->pkt_scbp);
arqstat->sts_rqpkt_reason = CMD_CMPLT;
arqstat->sts_rqpkt_resid = 0;
*** 2519,2550 ****
STATE_GOT_BUS | STATE_GOT_TARGET
| STATE_SENT_CMD
| STATE_XFERRED_DATA;
*(uint8_t *)&arqstat->sts_rqpkt_status =
STATUS_GOOD;
! con_log(CL_ANN1, (CE_NOTE,
! "Copying Sense data %x",
cmd->SMID));
! ddi_rep_get8(
! instance->
! mpi2_frame_pool_dma_obj.acc_handle,
! (uint8_t *)
! &(arqstat->sts_sensedata),
cmd->sense1,
sizeof (struct scsi_extended_sense),
DDI_DEV_AUTOINCR);
}
break;
case MFI_STAT_LD_OFFLINE:
cmn_err(CE_WARN,
"tbolt_complete_cmd: ld offline "
! "CDB[0]=0x%x targetId=0x%x devhandle=0x%x\n", //UNDO:
! ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->CDB.CDB32[0]),
! ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->RaidContext.ldTargetId),
! ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->DevHandle) );
pkt->pkt_reason = CMD_DEV_GONE;
pkt->pkt_statistics = STAT_DISCON;
break;
case MFI_STAT_DEVICE_NOT_FOUND:
con_log(CL_ANN, (CE_CONT,
--- 2515,2548 ----
STATE_GOT_BUS | STATE_GOT_TARGET
| STATE_SENT_CMD
| STATE_XFERRED_DATA;
*(uint8_t *)&arqstat->sts_rqpkt_status =
STATUS_GOOD;
! con_log(CL_ANN1,
! (CE_NOTE, "Copying Sense data %x",
cmd->SMID));
! ddi_rep_get8(acc_handle,
! (uint8_t *)&(arqstat->sts_sensedata),
cmd->sense1,
sizeof (struct scsi_extended_sense),
DDI_DEV_AUTOINCR);
}
break;
case MFI_STAT_LD_OFFLINE:
cmn_err(CE_WARN,
"tbolt_complete_cmd: ld offline "
! "CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
! /* UNDO: */
! ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]),
!
! ddi_get16(acc_handle,
! &scsi_raid_io->RaidContext.ldTargetId),
!
! ddi_get16(acc_handle, &scsi_raid_io->DevHandle));
!
pkt->pkt_reason = CMD_DEV_GONE;
pkt->pkt_statistics = STAT_DISCON;
break;
case MFI_STAT_DEVICE_NOT_FOUND:
con_log(CL_ANN, (CE_CONT,
*** 2554,2580 ****
break;
case MFI_STAT_LD_LBA_OUT_OF_RANGE:
pkt->pkt_state |= STATE_ARQ_DONE;
pkt->pkt_reason = CMD_CMPLT;
! ((struct scsi_status *)
! pkt->pkt_scbp)->sts_chk = 1;
arqstat = (void *)(pkt->pkt_scbp);
arqstat->sts_rqpkt_reason = CMD_CMPLT;
arqstat->sts_rqpkt_resid = 0;
arqstat->sts_rqpkt_state |= STATE_GOT_BUS
| STATE_GOT_TARGET | STATE_SENT_CMD
| STATE_XFERRED_DATA;
! *(uint8_t *)&arqstat->sts_rqpkt_status =
! STATUS_GOOD;
arqstat->sts_sensedata.es_valid = 1;
! arqstat->sts_sensedata.es_key =
! KEY_ILLEGAL_REQUEST;
! arqstat->sts_sensedata.es_class =
! CLASS_EXTENDED_SENSE;
/*
* LOGICAL BLOCK ADDRESS OUT OF RANGE:
* ASC: 0x21h; ASCQ: 0x00h;
*/
--- 2552,2574 ----
break;
case MFI_STAT_LD_LBA_OUT_OF_RANGE:
pkt->pkt_state |= STATE_ARQ_DONE;
pkt->pkt_reason = CMD_CMPLT;
! ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
arqstat = (void *)(pkt->pkt_scbp);
arqstat->sts_rqpkt_reason = CMD_CMPLT;
arqstat->sts_rqpkt_resid = 0;
arqstat->sts_rqpkt_state |= STATE_GOT_BUS
| STATE_GOT_TARGET | STATE_SENT_CMD
| STATE_XFERRED_DATA;
! *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
arqstat->sts_sensedata.es_valid = 1;
! arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST;
! arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
/*
* LOGICAL BLOCK ADDRESS OUT OF RANGE:
* ASC: 0x21h; ASCQ: 0x00h;
*/
*** 2592,2657 ****
break;
}
atomic_add_16(&instance->fw_outstanding, (-1));
/* Call the callback routine */
! if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
! pkt->pkt_comp) {
(*pkt->pkt_comp)(pkt);
- }
con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->RaidContext.status, 0);
! ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_raid_io->RaidContext.extStatus, 0);
return_raid_msg_pkt(instance, cmd);
break;
}
! case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: // MFA command.
! if (cmd->frame->dcmd.opcode
! == MR_DCMD_LD_MAP_GET_INFO &&
! cmd->frame->dcmd.mbox.b[1]
! == 1) {
mutex_enter(&instance->sync_map_mtx);
con_log(CL_ANN, (CE_NOTE,
"LDMAP sync command SMID RECEIVED 0x%X",
cmd->SMID));
if (cmd->frame->hdr.cmd_status != 0) {
cmn_err(CE_WARN,
! "map sync failed, status = 0x%x.\n",cmd->frame->hdr.cmd_status);
! }
! else {
instance->map_id++;
cmn_err(CE_NOTE,
! "map sync received, switched map_id to %ld \n",instance->map_id);
}
! if (MR_ValidateMapInfo(instance->ld_map[(instance->map_id & 1)], instance->load_balance_info))
instance->fast_path_io = 1;
! else
instance->fast_path_io = 0;
con_log(CL_ANN, (CE_NOTE,
! "instance->fast_path_io %d \n",instance->fast_path_io));
instance->unroll.syncCmd = 0;
! if(instance->map_update_cmd == cmd) {
return_raid_msg_pkt(instance, cmd);
atomic_add_16(&instance->fw_outstanding, (-1));
! mrsas_tbolt_sync_map_info(instance);
}
! cmn_err(CE_NOTE, "LDMAP sync completed.\n");
mutex_exit(&instance->sync_map_mtx);
break;
}
if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
--- 2586,2661 ----
break;
}
atomic_add_16(&instance->fw_outstanding, (-1));
+ (void) mrsas_common_check(instance, cmd);
+ if (acmd->cmd_dmahandle) {
+ if (mrsas_check_dma_handle(acmd->cmd_dmahandle) !=
+ DDI_SUCCESS) {
+ ddi_fm_service_impact(instance->dip,
+ DDI_SERVICE_UNAFFECTED);
+ pkt->pkt_reason = CMD_TRAN_ERR;
+ pkt->pkt_statistics = 0;
+ }
+ }
+
/* Call the callback routine */
! if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp)
(*pkt->pkt_comp)(pkt);
con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
! ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0);
! ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0);
return_raid_msg_pkt(instance, cmd);
break;
}
! case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFA command. */
! if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO &&
! cmd->frame->dcmd.mbox.b[1] == 1) {
mutex_enter(&instance->sync_map_mtx);
con_log(CL_ANN, (CE_NOTE,
"LDMAP sync command SMID RECEIVED 0x%X",
cmd->SMID));
if (cmd->frame->hdr.cmd_status != 0) {
cmn_err(CE_WARN,
! "map sync failed, status = 0x%x.",
! cmd->frame->hdr.cmd_status);
! } else {
instance->map_id++;
cmn_err(CE_NOTE,
! "map sync received, switched map_id to %"
! PRIu64 " \n", instance->map_id);
}
! if (MR_ValidateMapInfo(instance->ld_map[
! (instance->map_id & 1)],
! instance->load_balance_info)) {
instance->fast_path_io = 1;
! } else {
instance->fast_path_io = 0;
+ }
con_log(CL_ANN, (CE_NOTE,
! "instance->fast_path_io %d",
! instance->fast_path_io));
instance->unroll.syncCmd = 0;
! if (instance->map_update_cmd == cmd) {
return_raid_msg_pkt(instance, cmd);
atomic_add_16(&instance->fw_outstanding, (-1));
! (void) mrsas_tbolt_sync_map_info(instance);
}
! cmn_err(CE_NOTE, "LDMAP sync completed.");
mutex_exit(&instance->sync_map_mtx);
break;
}
if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
*** 2658,2695 ****
con_log(CL_ANN1, (CE_CONT,
"AEN command SMID RECEIVED 0x%X",
cmd->SMID));
if ((instance->aen_cmd == cmd) &&
(instance->aen_cmd->abort_aen)) {
! con_log(CL_ANN, (CE_WARN,
! "mrsas_softintr: "
"aborted_aen returned"));
! }
! else
! {
atomic_add_16(&instance->fw_outstanding, (-1));
service_mfi_aen(instance, cmd);
}
}
! if (cmd->sync_cmd == MRSAS_TRUE ) {
con_log(CL_ANN1, (CE_CONT,
"Sync-mode Command Response SMID RECEIVED 0x%X",
cmd->SMID));
tbolt_complete_cmd_in_sync_mode(instance, cmd);
! }
! else
! {
con_log(CL_ANN, (CE_CONT,
"tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
cmd->SMID));
}
break;
default:
/* free message */
! con_log(CL_ANN, (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
break;
}
}
uint_t
--- 2662,2698 ----
con_log(CL_ANN1, (CE_CONT,
"AEN command SMID RECEIVED 0x%X",
cmd->SMID));
if ((instance->aen_cmd == cmd) &&
(instance->aen_cmd->abort_aen)) {
! con_log(CL_ANN, (CE_WARN, "mrsas_softintr: "
"aborted_aen returned"));
! } else {
atomic_add_16(&instance->fw_outstanding, (-1));
service_mfi_aen(instance, cmd);
}
}
! if (cmd->sync_cmd == MRSAS_TRUE) {
con_log(CL_ANN1, (CE_CONT,
"Sync-mode Command Response SMID RECEIVED 0x%X",
cmd->SMID));
tbolt_complete_cmd_in_sync_mode(instance, cmd);
! } else {
con_log(CL_ANN, (CE_CONT,
"tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
cmd->SMID));
}
break;
default:
+ mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
+ ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
+
/* free message */
! con_log(CL_ANN,
! (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
break;
}
}
uint_t
*** 2699,2711 ****
Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
Mpi2ReplyDescriptorsUnion_t *desc;
uint16_t smid;
union desc_value d_val;
struct mrsas_cmd *cmd;
- uint32_t i;
- Mpi2RaidSCSIIORequest_t *scsi_raid_io;
- uint8_t status;
struct mrsas_header *hdr;
struct scsi_pkt *pkt;
(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
--- 2702,2711 ----
*** 2722,2734 ****
MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
return (DDI_INTR_UNCLAIMED);
! con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %llx Words = %llx \n",
! desc, desc->Words));
d_val.word = desc->Words;
/* Read Reply descriptor */
while ((d_val.u1.low != 0xffffffff) &&
--- 2722,2744 ----
MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
return (DDI_INTR_UNCLAIMED);
! if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
! != DDI_SUCCESS) {
! mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
! ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
! con_log(CL_ANN1,
! (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
! "FMA check, returning DDI_INTR_UNCLAIMED"));
! return (DDI_INTR_CLAIMED);
! }
+ con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %p Words = %" PRIx64,
+ (void *)desc, desc->Words));
+
d_val.word = desc->Words;
/* Read Reply descriptor */
while ((d_val.u1.low != 0xffffffff) &&
*** 2739,2785 ****
smid = replyDesc->SMID;
if (!smid || smid > instance->max_fw_cmds + 1) {
con_log(CL_ANN1, (CE_NOTE,
! "Reply Desc at Break = %llx Words = %llx \n",
! desc, desc->Words));
break;
}
cmd = instance->cmd_list[smid - 1];
! if(!cmd ) {
! con_log(CL_ANN1, (CE_NOTE,
! "mr_sas_tbolt_process_outstanding_cmd: Invalid command "
! " or Poll commad Received in completion path\n"));
! }
! else {
mutex_enter(&instance->cmd_pend_mtx);
if (cmd->sync_cmd == MRSAS_TRUE) {
hdr = (struct mrsas_header *)&cmd->frame->hdr;
if (hdr) {
! con_log(CL_ANN1, (CE_NOTE,
! "mr_sas_tbolt_process_outstanding_cmd:"
! " mlist_del_init(&cmd->list).\n"));
mlist_del_init(&cmd->list);
}
} else {
pkt = cmd->pkt;
if (pkt) {
! con_log(CL_ANN1, (CE_NOTE,
! "mr_sas_tbolt_process_outstanding_cmd:"
! "mlist_del_init(&cmd->list).\n"));
mlist_del_init(&cmd->list);
}
}
mutex_exit(&instance->cmd_pend_mtx);
tbolt_complete_cmd(instance, cmd);
}
! // set it back to all 0xfffffffff.
! desc->Words = (uint64_t)~0;
instance->reply_read_index++;
if (instance->reply_read_index >= (instance->reply_q_depth)) {
con_log(CL_ANN1, (CE_NOTE, "wrap around"));
--- 2749,2794 ----
smid = replyDesc->SMID;
if (!smid || smid > instance->max_fw_cmds + 1) {
con_log(CL_ANN1, (CE_NOTE,
! "Reply Desc at Break = %p Words = %" PRIx64,
! (void *)desc, desc->Words));
break;
}
cmd = instance->cmd_list[smid - 1];
! if (!cmd) {
! con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_"
! "outstanding_cmd: Invalid command "
! " or Poll commad Received in completion path"));
! } else {
mutex_enter(&instance->cmd_pend_mtx);
if (cmd->sync_cmd == MRSAS_TRUE) {
hdr = (struct mrsas_header *)&cmd->frame->hdr;
if (hdr) {
! con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
! "tbolt_process_outstanding_cmd:"
! " mlist_del_init(&cmd->list)."));
mlist_del_init(&cmd->list);
}
} else {
pkt = cmd->pkt;
if (pkt) {
! con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
! "tbolt_process_outstanding_cmd:"
! "mlist_del_init(&cmd->list)."));
mlist_del_init(&cmd->list);
}
}
mutex_exit(&instance->cmd_pend_mtx);
tbolt_complete_cmd(instance, cmd);
}
! /* set it back to all 1s. */
! desc->Words = -1LL;
instance->reply_read_index++;
if (instance->reply_read_index >= (instance->reply_q_depth)) {
con_log(CL_ANN1, (CE_NOTE, "wrap around"));
*** 2795,2806 ****
replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
d_val.word = desc->Words;
con_log(CL_ANN1, (CE_NOTE,
! "Next Reply Desc = %llx Words = %llx\n",
! desc, desc->Words));
replyType = replyDesc->ReplyFlags &
MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
--- 2804,2815 ----
replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
d_val.word = desc->Words;
con_log(CL_ANN1, (CE_NOTE,
! "Next Reply Desc = %p Words = %" PRIx64,
! (void *)desc, desc->Words));
replyType = replyDesc->ReplyFlags &
MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
*** 2888,2905 ****
ci = instance->ld_map[(instance->map_id & 1)];
ci_h = instance->ld_map_phy[(instance->map_id & 1)];
if (!ci) {
! cmn_err(CE_WARN,
! "Failed to alloc mem for ld_map_info");
return_raid_msg_pkt(instance, cmd);
return (-1);
}
! memset(ci, 0, sizeof (*ci));
! memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
dcmd->cmd = MFI_CMD_OP_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ;
--- 2897,2913 ----
ci = instance->ld_map[(instance->map_id & 1)];
ci_h = instance->ld_map_phy[(instance->map_id & 1)];
if (!ci) {
! cmn_err(CE_WARN, "Failed to alloc mem for ld_map_info");
return_raid_msg_pkt(instance, cmd);
return (-1);
}
! bzero(ci, sizeof (*ci));
! bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
dcmd->cmd = MFI_CMD_OP_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ;
*** 2913,2927 ****
mr_sas_tbolt_build_mfi_cmd(instance, cmd);
if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
ret = 0;
! con_log(CL_ANN1, (CE_NOTE,
! "Get LD Map Info success\n"));
} else {
! cmn_err(CE_WARN,
! "Get LD Map Info failed\n");
ret = -1;
}
return_raid_msg_pkt(instance, cmd);
--- 2921,2933 ----
mr_sas_tbolt_build_mfi_cmd(instance, cmd);
if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
ret = 0;
! con_log(CL_ANN1, (CE_NOTE, "Get LD Map Info success"));
} else {
! cmn_err(CE_WARN, "Get LD Map Info failed");
ret = -1;
}
return_raid_msg_pkt(instance, cmd);
*** 2943,2982 ****
"i=%d, %x:%x",
i, d_val.u1.high, d_val.u1.low));
}
}
! /**
* mrsas_tbolt_command_create - Create command for fast path.
* @io_info: MegaRAID IO request packet pointer.
* @ref_tag: Reference tag for RD/WRPROTECT
*
* Create the command for fast path.
*/
void
! mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],struct IO_REQUEST_INFO *io_info,Mpi2RaidSCSIIORequest_t *scsi_io_request, U32 ref_tag)
{
uint16_t EEDPFlags;
uint32_t Control;
! // Prepare 32-byte CDB if DIF is supported on this device
! con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB\n"));
! memset(cdb, 0, 32);
cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
! if (io_info->isRead) {
cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
! }
! else {
cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
- }
! cdb[10] = MRSAS_RD_WR_PROTECT; // Verify with in linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL
/* LOGICAL BLOCK ADDRESS */
cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
--- 2949,2992 ----
"i=%d, %x:%x",
i, d_val.u1.high, d_val.u1.low));
}
}
! /*
* mrsas_tbolt_command_create - Create command for fast path.
* @io_info: MegaRAID IO request packet pointer.
* @ref_tag: Reference tag for RD/WRPROTECT
*
* Create the command for fast path.
*/
void
! mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],
! struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request,
! U32 ref_tag)
{
uint16_t EEDPFlags;
uint32_t Control;
! ddi_acc_handle_t acc_handle =
! instance->mpi2_frame_pool_dma_obj.acc_handle;
! /* Prepare 32-byte CDB if DIF is supported on this device */
! con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB"));
+ bzero(cdb, 32);
+
cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
! if (io_info->isRead)
cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
! else
cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
! /* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
! cdb[10] = MRSAS_RD_WR_PROTECT;
/* LOGICAL BLOCK ADDRESS */
cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
*** 2985,3048 ****
cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
cdb[19] = (U8)((io_info->pdBlock) & 0xff);
/* Logical block reference tag */
! ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
! BIG_ENDIAN(ref_tag));
! ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask,
! 0xffff);
! ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_io_request->DataLength,
((io_info->numBlocks)*512));
! ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_io_request->IoFlags,32); /* Specify 32-byte cdb */
/* Transfer length */
cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
cdb[31] = (U8)((io_info->numBlocks) & 0xff);
/* set SCSI IO EEDPFlags */
! EEDPFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_io_request->EEDPFlags);
! Control = ddi_get32(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_io_request->Control);
! // set SCSI IO EEDPFlags bits
if (io_info->isRead) {
! // For READ commands, the EEDPFlags shall be set to specify to
! // Increment the Primary Reference Tag, to Check the Reference
! // Tag, and to Check and Remove the Protection Information
! // fields.
EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
! }
! else {
! // For WRITE commands, the EEDPFlags shall be set to specify to
! // Increment the Primary Reference Tag, and to Insert
! // Protection Information fields.
EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
}
Control |= (0x4 << 26);
! ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_io_request->EEDPFlags, EEDPFlags);
! ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_io_request->Control, Control);
! ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
! &scsi_io_request->EEDPBlockSize,
! MRSAS_EEDPBLOCKSIZE);
}
/*
* mrsas_tbolt_set_pd_lba - Sets PD LBA
--- 2995,3053 ----
cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
cdb[19] = (U8)((io_info->pdBlock) & 0xff);
/* Logical block reference tag */
! ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
! BE_32(ref_tag));
! ddi_put16(acc_handle,
! &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff);
! ddi_put32(acc_handle, &scsi_io_request->DataLength,
((io_info->numBlocks)*512));
! /* Specify 32-byte cdb */
! ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32);
/* Transfer length */
cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
cdb[31] = (U8)((io_info->numBlocks) & 0xff);
/* set SCSI IO EEDPFlags */
! EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags);
! Control = ddi_get32(acc_handle, &scsi_io_request->Control);
! /* set SCSI IO EEDPFlags bits */
if (io_info->isRead) {
! /*
! * For READ commands, the EEDPFlags shall be set to specify to
! * Increment the Primary Reference Tag, to Check the Reference
! * Tag, and to Check and Remove the Protection Information
! * fields.
! */
EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
! } else {
! /*
! * For WRITE commands, the EEDPFlags shall be set to specify to
! * Increment the Primary Reference Tag, and to Insert
! * Protection Information fields.
! */
EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
}
Control |= (0x4 << 26);
! ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags);
! ddi_put32(acc_handle, &scsi_io_request->Control, Control);
! ddi_put32(acc_handle,
! &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE);
}
/*
* mrsas_tbolt_set_pd_lba - Sets PD LBA
*** 3050,3083 ****
* @cdb_len: cdb length
* @start_blk: Start block of IO
*
* Used to set the PD LBA in CDB for FP IOs
*/
! void
! mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk, U32 num_blocks, U8 DifCapable)
{
U8 cdb_len = *cdb_len_ptr;
U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
/* Some drives don't support 16/12 byte CDB's, convert to 10 */
if (((cdb_len == 12) || (cdb_len == 16)) &&
(start_blk <= 0xffffffff)) {
if (cdb_len == 16) {
! con_log(CL_ANN, (CE_NOTE, "Converting READ/WRITE(16) to READ10\n"));
opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
flagvals = cdb[1];
groupnum = cdb[14];
control = cdb[15];
} else {
! con_log(CL_ANN, (CE_NOTE, "Converting READ/WRITE(12) to READ10\n"));
opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
flagvals = cdb[1];
groupnum = cdb[10];
control = cdb[11];
}
! memset(cdb, 0, sizeof(cdb));
cdb[0] = opcode;
cdb[1] = flagvals;
cdb[6] = groupnum;
cdb[9] = control;
--- 3055,3091 ----
* @cdb_len: cdb length
* @start_blk: Start block of IO
*
* Used to set the PD LBA in CDB for FP IOs
*/
! static void
! mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk,
! U32 num_blocks)
{
U8 cdb_len = *cdb_len_ptr;
U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
/* Some drives don't support 16/12 byte CDB's, convert to 10 */
if (((cdb_len == 12) || (cdb_len == 16)) &&
(start_blk <= 0xffffffff)) {
if (cdb_len == 16) {
! con_log(CL_ANN,
! (CE_NOTE, "Converting READ/WRITE(16) to READ10"));
opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
flagvals = cdb[1];
groupnum = cdb[14];
control = cdb[15];
} else {
! con_log(CL_ANN,
! (CE_NOTE, "Converting READ/WRITE(12) to READ10"));
opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
flagvals = cdb[1];
groupnum = cdb[10];
control = cdb[11];
}
! bzero(cdb, sizeof (cdb));
cdb[0] = opcode;
cdb[1] = flagvals;
cdb[6] = groupnum;
cdb[9] = control;
*** 3085,3095 ****
cdb[8] = (U8)(num_blocks & 0xff);
cdb[7] = (U8)((num_blocks >> 8) & 0xff);
cdb_len = 10;
} else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
/* Convert to 16 byte CDB for large LBA's */
! con_log(CL_ANN, (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB\n"));
switch (cdb_len) {
case 6:
opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
control = cdb[5];
break;
--- 3093,3104 ----
cdb[8] = (U8)(num_blocks & 0xff);
cdb[7] = (U8)((num_blocks >> 8) & 0xff);
cdb_len = 10;
} else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
/* Convert to 16 byte CDB for large LBA's */
! con_log(CL_ANN,
! (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB"));
switch (cdb_len) {
case 6:
opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
control = cdb[5];
break;
*** 3105,3115 ****
groupnum = cdb[10];
control = cdb[11];
break;
}
! memset(cdb, 0, sizeof(cdb));
cdb[0] = opcode;
cdb[1] = flagvals;
cdb[14] = groupnum;
cdb[15] = control;
--- 3114,3124 ----
groupnum = cdb[10];
control = cdb[11];
break;
}
! bzero(cdb, sizeof (cdb));
cdb[0] = opcode;
cdb[1] = flagvals;
cdb[14] = groupnum;
cdb[15] = control;
*** 3125,3135 ****
} else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
/* convert to 10 byte CDB */
opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
control = cdb[5];
! memset(cdb, 0, sizeof(cdb));
cdb[0] = opcode;
cdb[9] = control;
/* Set transfer length */
cdb[8] = (U8)(num_blocks & 0xff);
--- 3134,3144 ----
} else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
/* convert to 10 byte CDB */
opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
control = cdb[5];
! bzero(cdb, sizeof (cdb));
cdb[0] = opcode;
cdb[9] = control;
/* Set transfer length */
cdb[8] = (U8)(num_blocks & 0xff);
*** 3177,3252 ****
*cdb_len_ptr = cdb_len;
}
! U8
mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
{
MR_FW_RAID_MAP_ALL *ld_map;
if (!mrsas_tbolt_get_ld_map_info(instance)) {
ld_map = instance->ld_map[(instance->map_id & 1)];
! con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d\n",
ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
! if (MR_ValidateMapInfo(instance->ld_map[(instance->map_id & 1)], instance->load_balance_info)) {
! con_log(CL_ANN, (CE_CONT,
! "MR_ValidateMapInfo success"));
instance->fast_path_io = 1;
! con_log(CL_ANN, (CE_NOTE,
! "instance->fast_path_io %d \n",instance->fast_path_io));
return (DDI_SUCCESS);
}
}
instance->fast_path_io = 0;
cmn_err(CE_WARN, "MR_ValidateMapInfo failed");
con_log(CL_ANN, (CE_NOTE,
! "instance->fast_path_io %d \n",instance->fast_path_io));
-
return (DDI_FAILURE);
}
/*
* Marks HBA as bad. This will be called either when an
* IO packet times out even after 3 FW resets
* or FW is found to be fault even after 3 continuous resets.
*/
! int
mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
{
! cmn_err(CE_WARN, "TBOLT Kill adapter called\n");
if (instance->deadadapter == 1)
! return (DDI_FAILURE);
con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
"Writing to doorbell with MFI_STOP_ADP "));
mutex_enter(&instance->ocr_flags_mtx);
instance->deadadapter = 1;
mutex_exit(&instance->ocr_flags_mtx);
instance->func_ptr->disable_intr(instance);
WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
/* Flush */
! RD_RESERVED0_REGISTER(instance);
(void) mrsas_print_pending_cmds(instance);
! mrsas_complete_pending_cmds(instance);
! return (DDI_SUCCESS);
}
! void mrsas_reset_reply_desc(struct mrsas_instance *instance)
{
int i;
MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
! instance->reply_read_index= 0;
/* initializing reply address to 0xFFFFFFFF */
reply_desc = instance->reply_frame_pool;
for (i = 0; i < instance->reply_q_depth; i++) {
--- 3186,3264 ----
*cdb_len_ptr = cdb_len;
}
! static int
mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
{
MR_FW_RAID_MAP_ALL *ld_map;
if (!mrsas_tbolt_get_ld_map_info(instance)) {
ld_map = instance->ld_map[(instance->map_id & 1)];
! con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d",
ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
! if (MR_ValidateMapInfo(instance->ld_map[
! (instance->map_id & 1)], instance->load_balance_info)) {
! con_log(CL_ANN,
! (CE_CONT, "MR_ValidateMapInfo success"));
instance->fast_path_io = 1;
! con_log(CL_ANN,
! (CE_NOTE, "instance->fast_path_io %d",
! instance->fast_path_io));
return (DDI_SUCCESS);
}
}
instance->fast_path_io = 0;
cmn_err(CE_WARN, "MR_ValidateMapInfo failed");
con_log(CL_ANN, (CE_NOTE,
! "instance->fast_path_io %d", instance->fast_path_io));
return (DDI_FAILURE);
}
+
/*
* Marks HBA as bad. This will be called either when an
* IO packet times out even after 3 FW resets
* or FW is found to be fault even after 3 continuous resets.
*/
! void
mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
{
! cmn_err(CE_NOTE, "TBOLT Kill adapter called");
if (instance->deadadapter == 1)
! return;
con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
"Writing to doorbell with MFI_STOP_ADP "));
mutex_enter(&instance->ocr_flags_mtx);
instance->deadadapter = 1;
mutex_exit(&instance->ocr_flags_mtx);
instance->func_ptr->disable_intr(instance);
WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
/* Flush */
! (void) RD_RESERVED0_REGISTER(instance);
(void) mrsas_print_pending_cmds(instance);
! (void) mrsas_complete_pending_cmds(instance);
}
!
! void
! mrsas_reset_reply_desc(struct mrsas_instance *instance)
{
int i;
MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
! instance->reply_read_index = 0;
/* initializing reply address to 0xFFFFFFFF */
reply_desc = instance->reply_frame_pool;
for (i = 0; i < instance->reply_q_depth; i++) {
*** 3256,3276 ****
}
int
mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
{
! uint32_t status=0x00;
uint32_t retry = 0;
- uint32_t seq_num;
uint32_t cur_abs_reg_val;
uint32_t fw_state;
- union mrsas_evt_class_locale class_locale;
uint32_t abs_state;
uint32_t i;
con_log(CL_ANN, (CE_NOTE,
! "mrsas_tbolt_reset_ppc entered\n "));
if (instance->deadadapter == 1) {
cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
"no more resets as HBA has been marked dead ");
return (DDI_FAILURE);
--- 3268,3286 ----
}
int
mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
{
! uint32_t status = 0x00;
uint32_t retry = 0;
uint32_t cur_abs_reg_val;
uint32_t fw_state;
uint32_t abs_state;
uint32_t i;
con_log(CL_ANN, (CE_NOTE,
! "mrsas_tbolt_reset_ppc entered"));
if (instance->deadadapter == 1) {
cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
"no more resets as HBA has been marked dead ");
return (DDI_FAILURE);
*** 3282,3297 ****
"adpterresetinprogress flag set, time %llx", gethrtime()));
mutex_exit(&instance->ocr_flags_mtx);
instance->func_ptr->disable_intr(instance);
! /*Add delay inorder to complete the ioctl & io cmds in-flight */
! for (i = 0; i<3000; i++) {
drv_usecwait(MILLISEC); /* wait for 1000 usecs */
}
! instance->reply_read_index= 0;
retry_reset:
con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
":Resetting TBOLT "));
--- 3292,3307 ----
"adpterresetinprogress flag set, time %llx", gethrtime()));
mutex_exit(&instance->ocr_flags_mtx);
instance->func_ptr->disable_intr(instance);
! /* Add delay inorder to complete the ioctl & io cmds in-flight */
! for (i = 0; i < 3000; i++) {
drv_usecwait(MILLISEC); /* wait for 1000 usecs */
}
! instance->reply_read_index = 0;
retry_reset:
con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
":Resetting TBOLT "));
*** 3301,3351 ****
WR_TBOLT_IB_WRITE_SEQ(2, instance);
WR_TBOLT_IB_WRITE_SEQ(7, instance);
WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
con_log(CL_ANN1, (CE_NOTE,
"mrsas_tbolt_reset_ppc: magic number written "
! "to write sequence register\n"));
delay(100 * drv_usectohz(MILLISEC));
status = RD_TBOLT_HOST_DIAG(instance);
con_log(CL_ANN1, (CE_NOTE,
"mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
! "to write sequence register\n"));
while (status & DIAG_TBOLT_RESET_ADAPTER) {
delay(100 * drv_usectohz(MILLISEC));
status = RD_TBOLT_HOST_DIAG(instance);
if (retry++ == 100) {
cmn_err(CE_WARN,
"mrsas_tbolt_reset_ppc:"
"resetadapter bit is set already "
! "check retry count %d\n", retry);
return (DDI_FAILURE);
}
}
WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
delay(100 * drv_usectohz(MILLISEC));
ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
(uint8_t *)((uintptr_t)(instance)->regmap +
! RESET_TBOLT_STATUS_OFF),4,DDI_DEV_AUTOINCR);
while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
delay(100 * drv_usectohz(MILLISEC));
ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
(uint8_t *)((uintptr_t)(instance)->regmap +
! RESET_TBOLT_STATUS_OFF),4,DDI_DEV_AUTOINCR);
if (retry++ == 100) {
/* Dont call kill adapter here */
/* RESET BIT ADAPTER is cleared by firmare */
! //mrsas_tbolt_kill_adapter(instance);
! cmn_err(CE_WARN, "mr_sas %d: %s(): RESET FAILED; return failure!!!", instance->instance, __func__);
return (DDI_FAILURE);
}
}
! con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
"Calling mfi_state_transition_to_ready"));
abs_state = instance->func_ptr->read_fw_status_reg(instance);
retry = 0;
--- 3311,3364 ----
WR_TBOLT_IB_WRITE_SEQ(2, instance);
WR_TBOLT_IB_WRITE_SEQ(7, instance);
WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
con_log(CL_ANN1, (CE_NOTE,
"mrsas_tbolt_reset_ppc: magic number written "
! "to write sequence register"));
delay(100 * drv_usectohz(MILLISEC));
status = RD_TBOLT_HOST_DIAG(instance);
con_log(CL_ANN1, (CE_NOTE,
"mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
! "to write sequence register"));
while (status & DIAG_TBOLT_RESET_ADAPTER) {
delay(100 * drv_usectohz(MILLISEC));
status = RD_TBOLT_HOST_DIAG(instance);
if (retry++ == 100) {
cmn_err(CE_WARN,
"mrsas_tbolt_reset_ppc:"
"resetadapter bit is set already "
! "check retry count %d", retry);
return (DDI_FAILURE);
}
}
WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
delay(100 * drv_usectohz(MILLISEC));
ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
(uint8_t *)((uintptr_t)(instance)->regmap +
! RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
delay(100 * drv_usectohz(MILLISEC));
ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
(uint8_t *)((uintptr_t)(instance)->regmap +
! RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
if (retry++ == 100) {
/* Dont call kill adapter here */
/* RESET BIT ADAPTER is cleared by firmare */
! /* mrsas_tbolt_kill_adapter(instance); */
! cmn_err(CE_WARN,
! "mr_sas %d: %s(): RESET FAILED; return failure!!!",
! instance->instance, __func__);
return (DDI_FAILURE);
}
}
! con_log(CL_ANN,
! (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
"Calling mfi_state_transition_to_ready"));
abs_state = instance->func_ptr->read_fw_status_reg(instance);
retry = 0;
*** 3352,3363 ****
while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
delay(100 * drv_usectohz(MILLISEC));
abs_state = instance->func_ptr->read_fw_status_reg(instance);
}
if (abs_state <= MFI_STATE_FW_INIT) {
! cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
! "state = 0x%x, RETRY RESET.\n", abs_state);
goto retry_reset;
}
/* Mark HBA as bad, if FW is fault after 3 continuous resets */
if (mfi_state_transition_to_ready(instance) ||
--- 3365,3377 ----
while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
delay(100 * drv_usectohz(MILLISEC));
abs_state = instance->func_ptr->read_fw_status_reg(instance);
}
if (abs_state <= MFI_STATE_FW_INIT) {
! cmn_err(CE_WARN,
! "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
! "state = 0x%x, RETRY RESET.", abs_state);
goto retry_reset;
}
/* Mark HBA as bad, if FW is fault after 3 continuous resets */
if (mfi_state_transition_to_ready(instance) ||
*** 3370,3384 ****
"mrsas_tbolt_reset_ppc :before fake: FW is not ready "
"FW state = 0x%x", fw_state));
if (debug_tbolt_fw_faults_after_ocr_g == 1)
fw_state = MFI_STATE_FAULT;
! con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc : FW is not ready "
"FW state = 0x%x", fw_state));
if (fw_state == MFI_STATE_FAULT) {
! // increment the count
instance->fw_fault_count_after_ocr++;
if (instance->fw_fault_count_after_ocr
< MAX_FW_RESET_COUNT) {
cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
"FW is in fault after OCR count %d "
--- 3384,3399 ----
"mrsas_tbolt_reset_ppc :before fake: FW is not ready "
"FW state = 0x%x", fw_state));
if (debug_tbolt_fw_faults_after_ocr_g == 1)
fw_state = MFI_STATE_FAULT;
! con_log(CL_ANN,
! (CE_NOTE, "mrsas_tbolt_reset_ppc : FW is not ready "
"FW state = 0x%x", fw_state));
if (fw_state == MFI_STATE_FAULT) {
! /* increment the count */
instance->fw_fault_count_after_ocr++;
if (instance->fw_fault_count_after_ocr
< MAX_FW_RESET_COUNT) {
cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
"FW is in fault after OCR count %d "
*** 3388,3453 ****
} else {
cmn_err(CE_WARN, "mrsas %d: %s:"
"Max Reset Count exceeded >%d"
"Mark HBA as bad, KILL adapter",
! instance->instance, __func__, MAX_FW_RESET_COUNT);
mrsas_tbolt_kill_adapter(instance);
return (DDI_FAILURE);
}
}
}
! // reset the counter as FW is up after OCR
instance->fw_fault_count_after_ocr = 0;
mrsas_reset_reply_desc(instance);
con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
"Calling mrsas_issue_init_mpi2"));
abs_state = mrsas_issue_init_mpi2(instance);
! if(abs_state == DDI_FAILURE) {
cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
"INIT failed Retrying Reset");
goto retry_reset;
}
con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
"mrsas_issue_init_mpi2 Done"));
con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
! "Calling mrsas_print_pending_cmd\n"));
! mrsas_print_pending_cmds(instance);
con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
! "mrsas_print_pending_cmd done\n"));
instance->func_ptr->enable_intr(instance);
instance->fw_outstanding = 0;
con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
"Calling mrsas_issue_pending_cmds"));
! mrsas_issue_pending_cmds(instance);
con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
! "issue_pending_cmds done.\n"));
con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
"Calling aen registration"));
instance->aen_cmd->retry_count_for_ocr = 0;
instance->aen_cmd->drv_pkt_time = 0;
instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
! con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n"));
mutex_enter(&instance->ocr_flags_mtx);
instance->adapterresetinprogress = 0;
mutex_exit(&instance->ocr_flags_mtx);
con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
"adpterresetinprogress flag unset"));
! con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done\n"));
return (DDI_SUCCESS);
}
--- 3403,3469 ----
} else {
cmn_err(CE_WARN, "mrsas %d: %s:"
"Max Reset Count exceeded >%d"
"Mark HBA as bad, KILL adapter",
! instance->instance, __func__,
! MAX_FW_RESET_COUNT);
mrsas_tbolt_kill_adapter(instance);
return (DDI_FAILURE);
}
}
}
! /* reset the counter as FW is up after OCR */
instance->fw_fault_count_after_ocr = 0;
mrsas_reset_reply_desc(instance);
con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
"Calling mrsas_issue_init_mpi2"));
abs_state = mrsas_issue_init_mpi2(instance);
! if (abs_state == (uint32_t)DDI_FAILURE) {
cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
"INIT failed Retrying Reset");
goto retry_reset;
}
con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
"mrsas_issue_init_mpi2 Done"));
con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
! "Calling mrsas_print_pending_cmd"));
! (void) mrsas_print_pending_cmds(instance);
con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
! "mrsas_print_pending_cmd done"));
instance->func_ptr->enable_intr(instance);
instance->fw_outstanding = 0;
con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
"Calling mrsas_issue_pending_cmds"));
! (void) mrsas_issue_pending_cmds(instance);
con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
! "issue_pending_cmds done."));
con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
"Calling aen registration"));
instance->aen_cmd->retry_count_for_ocr = 0;
instance->aen_cmd->drv_pkt_time = 0;
instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
! con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag."));
mutex_enter(&instance->ocr_flags_mtx);
instance->adapterresetinprogress = 0;
mutex_exit(&instance->ocr_flags_mtx);
con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
"adpterresetinprogress flag unset"));
! con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done"));
return (DDI_SUCCESS);
}
*** 3458,3468 ****
* Issues an internal command (DCMD) to get the FW's controller PD
* list structure. This information is mainly used to find out SYSTEM
* supported by the FW.
*/
! int
mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
{
int ret = 0, i;
struct mrsas_cmd *cmd = NULL;
struct mrsas_dcmd_frame *dcmd;
--- 3474,3484 ----
* Issues an internal command (DCMD) to get the FW's controller PD
* list structure. This information is mainly used to find out SYSTEM
* supported by the FW.
*/
! static int
mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
{
int ret = 0, i;
struct mrsas_cmd *cmd = NULL;
struct mrsas_dcmd_frame *dcmd;
*** 3475,3491 ****
uint32_t size_map_info;
cmd = get_raid_msg_pkt(instance);
if (cmd == NULL) {
! cmn_err(CE_WARN,
! "Failed to get a cmd from free-pool in mrsas_tbolt_sync_map_info(). ");
return (DDI_FAILURE);
}
/* Clear the frame buffer and assign back the context id */
! (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
cmd->index);
bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
--- 3491,3507 ----
uint32_t size_map_info;
cmd = get_raid_msg_pkt(instance);
if (cmd == NULL) {
! cmn_err(CE_WARN, "Failed to get a cmd from free-pool in "
! "mrsas_tbolt_sync_map_info(). ");
return (DDI_FAILURE);
}
/* Clear the frame buffer and assign back the context id */
! bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
cmd->index);
bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
*** 3495,3543 ****
dcmd = &cmd->frame->dcmd;
size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
! con_log(CL_ANN, (CE_NOTE,
! "size_sync_info =0x%x ; ld count = 0x%x \n ",
size_sync_info, num_lds));
ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
! memset(ci, 0, sizeof(MR_FW_RAID_MAP_ALL));
ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
! (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
ld_sync = (LD_TARGET_SYNC *)ci;
for (i = 0; i < num_lds; i++, ld_sync++) {
raid = MR_LdRaidGet(i, map);
! con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x\n",
i, raid->seqNum, raid->flags.ldSyncRequired));
ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
! con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x \n",
i, ld_sync->ldTargetId));
ld_sync->seqNum = raid->seqNum;
}
! size_map_info = sizeof(MR_FW_RAID_MAP) +
! (sizeof(MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
dcmd->cmd = MFI_CMD_OP_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_WRITE;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = size_map_info;
! dcmd->mbox.b[0] = num_lds;
dcmd->mbox.b[1] = 1; /* Pend */
dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
dcmd->sgl.sge32[0].phys_addr = ci_h;
dcmd->sgl.sge32[0].length = size_map_info;
--- 3511,3560 ----
dcmd = &cmd->frame->dcmd;
size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
! con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x",
size_sync_info, num_lds));
ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
! bzero(ci, sizeof (MR_FW_RAID_MAP_ALL));
ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
! bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
ld_sync = (LD_TARGET_SYNC *)ci;
for (i = 0; i < num_lds; i++, ld_sync++) {
raid = MR_LdRaidGet(i, map);
! con_log(CL_ANN1,
! (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
i, raid->seqNum, raid->flags.ldSyncRequired));
ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
! con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x",
i, ld_sync->ldTargetId));
ld_sync->seqNum = raid->seqNum;
}
! size_map_info = sizeof (MR_FW_RAID_MAP) +
! (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
dcmd->cmd = MFI_CMD_OP_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_WRITE;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = size_map_info;
! ASSERT(num_lds <= 255);
! dcmd->mbox.b[0] = (U8)num_lds;
dcmd->mbox.b[1] = 1; /* Pend */
dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
dcmd->sgl.sge32[0].phys_addr = ci_h;
dcmd->sgl.sge32[0].length = size_map_info;
*** 3546,3556 ****
mr_sas_tbolt_build_mfi_cmd(instance, cmd);
instance->func_ptr->issue_cmd(cmd, instance);
instance->unroll.syncCmd = 1;
! con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x",cmd->SMID));
return (ret);
}
/*
--- 3563,3573 ----
mr_sas_tbolt_build_mfi_cmd(instance, cmd);
instance->func_ptr->issue_cmd(cmd, instance);
instance->unroll.syncCmd = 1;
! con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID));
return (ret);
}
/*
*** 3573,3583 ****
cmn_err(CE_WARN,
"Failed to get a cmd from free-pool abort_syncmap_cmd().");
return (DDI_FAILURE);
}
/* Clear the frame buffer and assign back the context id */
! (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
cmd->index);
abort_fr = &cmd->frame->abort;
--- 3590,3600 ----
cmn_err(CE_WARN,
"Failed to get a cmd from free-pool abort_syncmap_cmd().");
return (DDI_FAILURE);
}
/* Clear the frame buffer and assign back the context id */
! bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
cmd->index);
abort_fr = &cmd->frame->abort;
*** 3621,3633 ****
{
struct scsi_device *sd;
dev_info_t *child;
int rval, dtype;
struct mrsas_tbolt_pd_info *pds = NULL;
- uint64_t *wwn;
-
con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
tgt, lun));
if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
if (ldip) {
--- 3638,3648 ----
*** 3636,3646 ****
if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
rval = mrsas_service_evt(instance, tgt, 1,
MRSAS_EVT_UNCONFIG_TGT, NULL);
con_log(CL_ANN1, (CE_WARN,
"mr_sas:DELETING STALE ENTRY rval = %d "
! "tgt id = %d ", rval, tgt));
return (NDI_FAILURE);
}
return (NDI_SUCCESS);
}
--- 3651,3661 ----
if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
rval = mrsas_service_evt(instance, tgt, 1,
MRSAS_EVT_UNCONFIG_TGT, NULL);
con_log(CL_ANN1, (CE_WARN,
"mr_sas:DELETING STALE ENTRY rval = %d "
! "tgt id = %d", rval, tgt));
return (NDI_FAILURE);
}
return (NDI_SUCCESS);
}
*** 3647,3660 ****
pds = (struct mrsas_tbolt_pd_info *)
kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
mrsas_tbolt_get_pd_info(instance, pds, tgt);
dtype = pds->scsiDevType;
! /* Check for Disk*/
if ((dtype == DTYPE_DIRECT)) {
if ((dtype == DTYPE_DIRECT) &&
(LE_16(pds->fwState) != PD_SYSTEM)) {
return (NDI_FAILURE);
}
sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
sd->sd_address.a_hba_tran = instance->tran;
sd->sd_address.a_target = (uint16_t)tgt;
--- 3662,3676 ----
pds = (struct mrsas_tbolt_pd_info *)
kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
mrsas_tbolt_get_pd_info(instance, pds, tgt);
dtype = pds->scsiDevType;
! /* Check for Disk */
if ((dtype == DTYPE_DIRECT)) {
if ((dtype == DTYPE_DIRECT) &&
(LE_16(pds->fwState) != PD_SYSTEM)) {
+ kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
return (NDI_FAILURE);
}
sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
sd->sd_address.a_hba_tran = instance->tran;
sd->sd_address.a_target = (uint16_t)tgt;
*** 3665,3676 ****
con_log(CL_DLEVEL1, (CE_NOTE,
"Phys. device found: tgt %d dtype %d: %s",
tgt, dtype, sd->sd_inq->inq_vid));
} else {
rval = NDI_FAILURE;
! con_log(CL_DLEVEL1, (CE_NOTE,
! "Phys. device Not found scsi_hba_probe Failed: tgt %d dtype %d: %s",
tgt, dtype, sd->sd_inq->inq_vid));
}
/* sd_unprobe is blank now. Free buffer manually */
if (sd->sd_inq) {
--- 3681,3692 ----
con_log(CL_DLEVEL1, (CE_NOTE,
"Phys. device found: tgt %d dtype %d: %s",
tgt, dtype, sd->sd_inq->inq_vid));
} else {
rval = NDI_FAILURE;
! con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found "
! "scsi_hba_probe Failed: tgt %d dtype %d: %s",
tgt, dtype, sd->sd_inq->inq_vid));
}
/* sd_unprobe is blank now. Free buffer manually */
if (sd->sd_inq) {
*** 3689,3715 ****
kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
rval));
return (rval);
}
static void
! mrsas_tbolt_get_pd_info(struct mrsas_instance *instance, struct mrsas_tbolt_pd_info *pds,
! int tgt)
{
struct mrsas_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
dma_obj_t dcmd_dma_obj;
cmd = get_raid_msg_pkt(instance);
if (!cmd) {
! con_log(CL_ANN1, (CE_WARN, "Failed to get a cmd for get pd info"));
return;
}
/* Clear the frame buffer and assign back the context id */
! memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
cmd->index);
dcmd = &cmd->frame->dcmd;
--- 3705,3733 ----
kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
rval));
return (rval);
}
+
static void
! mrsas_tbolt_get_pd_info(struct mrsas_instance *instance,
! struct mrsas_tbolt_pd_info *pds, int tgt)
{
struct mrsas_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
dma_obj_t dcmd_dma_obj;
cmd = get_raid_msg_pkt(instance);
if (!cmd) {
! con_log(CL_ANN1,
! (CE_WARN, "Failed to get a cmd for get pd info"));
return;
}
/* Clear the frame buffer and assign back the context id */
! bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
cmd->index);
dcmd = &cmd->frame->dcmd;
*** 3720,3735 ****
dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
dcmd_dma_obj.dma_attr.dma_attr_align = 1;
(void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
DDI_STRUCTURE_LE_ACC);
! (void) memset(dcmd_dma_obj.buffer, 0, sizeof (struct mrsas_tbolt_pd_info));
! (void) memset(dcmd->mbox.b, 0, 12);
ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
! ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, MFI_FRAME_DIR_READ);
ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
sizeof (struct mrsas_tbolt_pd_info));
ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
MR_DCMD_PD_GET_INFO);
--- 3738,3754 ----
dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
dcmd_dma_obj.dma_attr.dma_attr_align = 1;
(void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
DDI_STRUCTURE_LE_ACC);
! bzero(dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info));
! bzero(dcmd->mbox.b, 12);
ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
! ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
! MFI_FRAME_DIR_READ);
ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
sizeof (struct mrsas_tbolt_pd_info));
ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
MR_DCMD_PD_GET_INFO);