Print this page
@@ -14,47 +14,66 @@
* Rasheed
* Shakeel Bukhari
*/
-#include <stddef.h>
#include <sys/types.h>
#include <sys/file.h>
#include <sys/atomic.h>
#include <sys/scsi/scsi.h>
#include <sys/byteorder.h>
#include "ld_pd_map.h"
#include "mr_sas.h"
#include "fusion.h"
+/*
+ * FMA header files
+ */
+#include <sys/ddifm.h>
+#include <sys/fm/protocol.h>
+#include <sys/fm/util.h>
+#include <sys/fm/io/ddi.h>
-// Pre-TB command size and TB command size.
-#define MR_COMMAND_SIZE (64*20) // 1280 bytes
+
+/* Pre-TB command size and TB command size. */
+#define MR_COMMAND_SIZE (64*20) /* 1280 bytes */
MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
-U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *in_info);
+U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *);
extern ddi_dma_attr_t mrsas_generic_dma_attr;
extern uint32_t mrsas_tbolt_max_cap_maxxfer;
extern struct ddi_device_acc_attr endian_attr;
extern int debug_level_g;
extern unsigned int enable_fp;
volatile int dump_io_wait_time = 90;
extern void
io_timeout_checker(void *arg);
-extern int
-mfi_state_transition_to_ready(struct mrsas_instance *instance);
extern volatile int debug_timeout_g;
extern int mrsas_issue_pending_cmds(struct mrsas_instance *);
extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
extern void push_pending_mfi_pkt(struct mrsas_instance *,
struct mrsas_cmd *);
extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
-static volatile int debug_tbolt_fw_faults_after_ocr_g = 0;
+/* Local static prototypes. */
+static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *,
+ struct scsi_address *, struct scsi_pkt *, uchar_t *);
+static void mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr,
+ U64 start_blk, U32 num_blocks);
+static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
+static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
+static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
+static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *);
+#ifdef PDSUPPORT
+static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
+ struct mrsas_tbolt_pd_info *, int);
+#endif /* PDSUPPORT */
+static int debug_tbolt_fw_faults_after_ocr_g = 0;
+
/*
* destroy_mfi_mpi_frame_pool
*/
void
destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
@@ -64,13 +83,14 @@
struct mrsas_cmd *cmd;
/* return all mfi frames to pool */
for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
cmd = instance->cmd_list[i];
- if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
+ if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) {
(void) mrsas_free_dma_obj(instance,
cmd->frame_dma_obj);
+ }
cmd->frame_dma_obj_status = DMA_OBJ_FREED;
}
}
/*
@@ -93,10 +113,11 @@
*/
void
mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
{
int i;
+
if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
(void) mrsas_free_dma_obj(instance,
instance->mfi_internal_dma_obj);
instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
}
@@ -143,11 +164,10 @@
*/
int
create_mpi2_frame_pool(struct mrsas_instance *instance)
{
int i = 0;
- int cookie_cnt;
uint16_t max_cmd;
uint32_t sgl_sz;
uint32_t raid_msg_size;
uint32_t total_size;
uint32_t offset;
@@ -158,16 +178,16 @@
max_cmd = instance->max_fw_cmds;
sgl_sz = 1024;
raid_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
- // Allocating additional 256 bytes to accomodate SMID 0.
+ /* Allocating additional 256 bytes to accomodate SMID 0. */
total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
(max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
- "max_cmd %x ", max_cmd));
+ "max_cmd %x", max_cmd));
con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
"request message frame pool size %x", total_size));
/*
@@ -201,16 +221,14 @@
(uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
instance->io_request_frames_phy =
(uint32_t)
instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
- con_log(CL_DLEVEL3, (CE_NOTE,
- "io_request_frames 0x%x",
- instance->io_request_frames));
+ con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p",
+ (void *)instance->io_request_frames));
- con_log(CL_DLEVEL3, (CE_NOTE,
- "io_request_frames_phy 0x%x",
+ con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x",
instance->io_request_frames_phy));
io_req_base = (uint8_t *)instance->io_request_frames +
MRSAS_THUNDERBOLT_MSG_SIZE;
io_req_base_phys = instance->io_request_frames_phy +
@@ -226,54 +244,43 @@
cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
((uint8_t *)io_req_base + offset);
cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
- cmd->sgl = (Mpi2SGEIOUnion_t *)
- ((uint8_t *)io_req_base +
+ cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base +
(max_cmd * raid_msg_size) + i * sgl_sz);
- cmd->sgl_phys_addr =
- (io_req_base_phys +
+ cmd->sgl_phys_addr = (io_req_base_phys +
(max_cmd * raid_msg_size) + i * sgl_sz);
- cmd->sense1 = (uint8_t *)
- ((uint8_t *)io_req_base +
+ cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base +
(max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
(i * SENSE_LENGTH));
- cmd->sense_phys_addr1 =
- (io_req_base_phys +
+ cmd->sense_phys_addr1 = (io_req_base_phys +
(max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
(i * SENSE_LENGTH));
- cmd->SMID = i+1;
+ cmd->SMID = i + 1;
- con_log(CL_DLEVEL3, (CE_NOTE,
- "Frame Pool Addr [%x]0x%x",
- cmd->index, cmd->scsi_io_request));
+ con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p",
+ cmd->index, (void *)cmd->scsi_io_request));
- con_log(CL_DLEVEL3, (CE_NOTE,
- "Frame Pool Phys Addr [%x]0x%x",
+ con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x",
cmd->index, cmd->scsi_io_request_phys_addr));
- con_log(CL_DLEVEL3, (CE_NOTE,
- "Sense Addr [%x]0x%x",
- cmd->index, cmd->sense1));
+ con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p",
+ cmd->index, (void *)cmd->sense1));
- con_log(CL_DLEVEL3, (CE_NOTE,
- "Sense Addr Phys [%x]0x%x",
+ con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x",
cmd->index, cmd->sense_phys_addr1));
+ con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p",
+ cmd->index, (void *)cmd->sgl));
- con_log(CL_DLEVEL3, (CE_NOTE,
- "Sgl bufffers [%x]0x%x",
- cmd->index, cmd->sgl));
-
- con_log(CL_DLEVEL3, (CE_NOTE,
- "Sgl bufffers phys [%x]0x%x",
+ con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x",
cmd->index, cmd->sgl_phys_addr));
}
return (DDI_SUCCESS);
@@ -297,11 +304,12 @@
instance->mfi_internal_dma_obj.size = internal_buf_size;
instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
- instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
+ instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
+ 0xFFFFFFFFU;
instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
(uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
cmn_err(CE_WARN,
@@ -310,12 +318,12 @@
}
bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
- instance->internal_buf = (caddr_t)(((unsigned long)
- instance->mfi_internal_dma_obj.buffer));
+ instance->internal_buf =
+ (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer));
instance->internal_buf_dmac_add =
instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
instance->internal_buf_size = internal_buf_size;
/* allocate evt_detail */
@@ -326,12 +334,11 @@
instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
(uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
- cmn_err(CE_WARN,
- "mrsas_tbolt_alloc_additional_dma_buffer: "
+ cmn_err(CE_WARN, "mrsas_tbolt_alloc_additional_dma_buffer: "
"could not allocate data transfer buffer.");
goto fail_tbolt_additional_buff;
}
bzero(instance->mfi_evt_detail_obj.buffer,
@@ -345,11 +352,12 @@
for (i = 0; i < 2; i++) {
/* allocate the data transfer buffer */
instance->ld_map_obj[i].size = instance->size_map_info;
instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
- instance->ld_map_obj[i].dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
+ instance->ld_map_obj[i].dma_attr.dma_attr_count_max =
+ 0xFFFFFFFFU;
instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
(uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
@@ -358,23 +366,22 @@
goto fail_tbolt_additional_buff;
}
instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
- (void) memset(instance->ld_map_obj[i].buffer, 0,
- instance->size_map_info);
+ bzero(instance->ld_map_obj[i].buffer, instance->size_map_info);
- instance->ld_map[i] = (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
- instance->ld_map_phy[i] =
- (uint32_t)instance->ld_map_obj[i].dma_cookie[0].dmac_address;
+ instance->ld_map[i] =
+ (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
+ instance->ld_map_phy[i] = (uint32_t)instance->
+ ld_map_obj[i].dma_cookie[0].dmac_address;
con_log(CL_DLEVEL3, (CE_NOTE,
"ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
con_log(CL_DLEVEL3, (CE_NOTE,
"size_map_info 0x%x", instance->size_map_info));
-
}
return (DDI_SUCCESS);
fail_tbolt_additional_buff:
@@ -382,32 +389,31 @@
return (DDI_FAILURE);
}
MRSAS_REQUEST_DESCRIPTOR_UNION *
-mr_sas_get_request_descriptor(struct mrsas_instance *instance,
- uint16_t index, struct mrsas_cmd *cmd)
+mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index)
{
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
if (index > instance->max_fw_cmds) {
con_log(CL_ANN1, (CE_NOTE,
"Invalid SMID 0x%x request for descriptor", index));
con_log(CL_ANN1, (CE_NOTE,
- "max_fw_cmds : 0x%x\n", instance->max_fw_cmds));
+ "max_fw_cmds : 0x%x", instance->max_fw_cmds));
return (NULL);
}
req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
((char *)instance->request_message_pool +
(sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
con_log(CL_ANN1, (CE_NOTE,
- "request descriptor : 0x%08lx\n", (unsigned long)req_desc));
+ "request descriptor : 0x%08lx", (unsigned long)req_desc));
con_log(CL_ANN1, (CE_NOTE,
- "request descriptor base phy : 0x%08lx\n",
+ "request descriptor base phy : 0x%08lx",
(unsigned long)instance->request_message_pool_phy));
return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
}
@@ -417,34 +423,32 @@
*/
int
alloc_req_rep_desc(struct mrsas_instance *instance)
{
uint32_t request_q_sz, reply_q_sz;
- int i, max_request_q_sz, max_reply_q_sz;
- uint64_t request_desc;
+ int i, max_reply_q_sz;
MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
- uint64_t *reply_ptr;
/*
* ThunderBolt(TB) There's no longer producer consumer mechanism.
* Once we have an interrupt we are supposed to scan through the list of
* reply descriptors and process them accordingly. We would be needing
* to allocate memory for 1024 reply descriptors
*/
/* Allocate Reply Descriptors */
- con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x\n",
- sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
+ con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
+ (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
- // reply queue size should be multiple of 16
+ /* reply queue size should be multiple of 16 */
max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
reply_q_sz = 8 * max_reply_q_sz;
- con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x\n",
- sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
+ con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
+ (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
instance->reply_desc_dma_obj.size = reply_q_sz;
instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
@@ -459,21 +463,21 @@
}
bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
- // virtual address of reply queue
+ /* virtual address of reply queue */
instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
instance->reply_desc_dma_obj.buffer);
instance->reply_q_depth = max_reply_q_sz;
con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
instance->reply_q_depth));
- con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%x",
- instance->reply_frame_pool));
+ con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
+ (void *)instance->reply_frame_pool));
/* initializing reply address to 0xFFFFFFFF */
reply_desc = instance->reply_frame_pool;
for (i = 0; i < instance->reply_q_depth; i++) {
@@ -494,24 +498,25 @@
con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
instance->reply_pool_limit_phy));
- con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x\n",
- sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
+ con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
+ (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
/* Allocate Request Descriptors */
- con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x\n",
- sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
+ con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
+ (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
request_q_sz = 8 *
(instance->max_fw_cmds);
instance->request_desc_dma_obj.size = request_q_sz;
instance->request_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
- instance->request_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
+ instance->request_desc_dma_obj.dma_attr.dma_attr_count_max =
+ 0xFFFFFFFFU;
instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
(uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
@@ -528,12 +533,10 @@
(instance->request_desc_dma_obj.buffer);
instance->request_message_pool_phy =
(uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
- max_request_q_sz = instance->max_fw_cmds;
-
return (DDI_SUCCESS);
fail_undo_reply_queue:
if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
(void) mrsas_free_dma_obj(instance,
@@ -544,11 +547,13 @@
return (DDI_FAILURE);
}
/*
* mrsas_alloc_cmd_pool_tbolt
- * TODO: merge tbolt-specific codee into mrsas_alloc_cmd_pool() to have single routine
+ *
+ * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
+ * routine
*/
int
mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
{
int i;
@@ -570,36 +575,27 @@
* instance->cmd_list is an array of struct mrsas_cmd pointers.
* Allocate the dynamic array first and then allocate individual
* commands.
*/
instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
- if (instance->cmd_list == NULL) {
- con_log(CL_NONE, (CE_WARN,
- "Failed to allocate memory for cmd_list"));
- return (DDI_FAILURE);
- }
/* create a frame pool and assign one frame to each cmd */
for (count = 0; count < max_cmd; count++) {
- instance->cmd_list[count] = kmem_zalloc(sizeof (struct mrsas_cmd),
- KM_SLEEP);
- if (instance->cmd_list[count] == NULL) {
- con_log(CL_NONE, (CE_WARN,
- "Failed to allocate memory for mrsas_cmd"));
- goto mrsas_undo_cmds;
+ instance->cmd_list[count] =
+ kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
}
- }
/* add all the commands to command pool */
INIT_LIST_HEAD(&instance->cmd_pool_list);
INIT_LIST_HEAD(&instance->cmd_pend_list);
INIT_LIST_HEAD(&instance->cmd_app_pool_list);
reserve_cmd = MRSAS_APP_RESERVED_CMDS;
- for (i = 1; i < reserve_cmd; i++) { //cmd index 0 reservered for IOC INIT
+ /* cmd index 0 reservered for IOC INIT */
+ for (i = 1; i < reserve_cmd; i++) {
cmd = instance->cmd_list[i];
cmd->index = i;
mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
}
@@ -614,19 +610,21 @@
mrsas_undo_cmds:
if (count > 0) {
/* free each cmd */
for (i = 0; i < count; i++) {
- if (instance->cmd_list[i] != NULL)
- kmem_free(instance->cmd_list[i],sizeof (struct mrsas_cmd));
+ if (instance->cmd_list[i] != NULL) {
+ kmem_free(instance->cmd_list[i],
+ sizeof (struct mrsas_cmd));
+ }
instance->cmd_list[i] = NULL;
}
}
mrsas_undo_cmd_list:
if (instance->cmd_list != NULL)
- kmem_free(instance->cmd_list,sz);
+ kmem_free(instance->cmd_list, sz);
instance->cmd_list = NULL;
return (DDI_FAILURE);
}
@@ -664,11 +662,11 @@
* ThunderBolt(TB) memory allocations for commands/messages/frames.
*/
int
alloc_space_for_mpi2(struct mrsas_instance *instance)
{
- /* Allocate command pool ( memory for cmd_list & individual commands )*/
+ /* Allocate command pool (memory for cmd_list & individual commands) */
if (mrsas_alloc_cmd_pool_tbolt(instance)) {
cmn_err(CE_WARN, "Error creating cmd pool");
return (DDI_FAILURE);
}
@@ -764,11 +762,11 @@
/*
* mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
*/
int
-mrsas_init_adapter_tbolt (struct mrsas_instance *instance)
+mrsas_init_adapter_tbolt(struct mrsas_instance *instance)
{
/*
* Reduce the max supported cmds by 1. This is to ensure that the
* reply_q_sz (1 more than the max cmd that driver may send)
@@ -783,35 +781,34 @@
con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
" instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
/* create a pool of commands */
- if ( alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
+ if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
cmn_err(CE_WARN,
" alloc_space_for_mpi2() failed.");
return (DDI_FAILURE);
}
/* Send ioc init message */
- if ( mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
+ /* NOTE: the issue_init call does FMA checking already. */
+ if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
cmn_err(CE_WARN,
" mrsas_issue_init_mpi2() failed.");
goto fail_init_fusion;
}
instance->unroll.alloc_space_mpi2 = 1;
con_log(CL_ANN, (CE_NOTE,
- "mrsas_init_adapter_tbolt: SUCCESSFULL\n"));
+ "mrsas_init_adapter_tbolt: SUCCESSFUL"));
return (DDI_SUCCESS);
fail_init_fusion:
-
-fail_undo_alloc_mpi2:
free_space_for_mpi2(instance);
return (DDI_FAILURE);
}
@@ -838,82 +835,77 @@
(uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
cmn_err(CE_WARN, "mr_sas_issue_init_mpi2 "
"could not allocate data transfer buffer.");
return (DDI_FAILURE);
}
- (void) memset(init2_dma_obj.buffer, 2,
- sizeof (Mpi2IOCInitRequest_t));
+ (void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t));
con_log(CL_ANN1, (CE_NOTE,
- "mrsas_issue_init_mpi2 _phys adr: %x \n",
+ "mrsas_issue_init_mpi2 _phys adr: %x",
init2_dma_obj.dma_cookie[0].dmac_address));
/* Initialize and send ioc init message */
- ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj,
- init2_dma_obj.acc_handle);
+ ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj);
if (ret_val == DDI_FAILURE) {
con_log(CL_ANN1, (CE_WARN,
- "mrsas_issue_init_mpi2: Failed\n"));
+ "mrsas_issue_init_mpi2: Failed"));
goto fail_init_mpi2;
}
/* free IOC init DMA buffer */
if (mrsas_free_dma_obj(instance, init2_dma_obj)
!= DDI_SUCCESS) {
con_log(CL_ANN1, (CE_WARN,
- "mrsas_issue_init_mpi2: Free Failed\n"));
+ "mrsas_issue_init_mpi2: Free Failed"));
return (DDI_FAILURE);
}
-
/* Get/Check and sync ld_map info */
instance->map_id = 0;
- if( mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS )
- mrsas_tbolt_sync_map_info(instance);
+ if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS)
+ (void) mrsas_tbolt_sync_map_info(instance);
+
+ /* No mrsas_cmd to send, so send NULL. */
+ if (mrsas_common_check(instance, NULL) != DDI_SUCCESS)
+ goto fail_init_mpi2;
+
con_log(CL_ANN, (CE_NOTE,
- "mrsas_issue_init_mpi2: SUCCESSFULL\n"));
+ "mrsas_issue_init_mpi2: SUCCESSFUL"));
return (DDI_SUCCESS);
fail_init_mpi2:
- mrsas_free_dma_obj(instance, init2_dma_obj);
+ (void) mrsas_free_dma_obj(instance, init2_dma_obj);
return (DDI_FAILURE);
}
-int
-mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj,
- ddi_acc_handle_t accessp)
+static int
+mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj)
{
- int numbytes, i;
- int ret = DDI_SUCCESS;
+ int numbytes;
uint16_t flags;
- int status;
- timespec_t time;
- uint64_t mSec;
- uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
struct mrsas_init_frame2 *mfiFrameInit2;
struct mrsas_header *frame_hdr;
Mpi2IOCInitRequest_t *init;
struct mrsas_cmd *cmd = NULL;
struct mrsas_drv_ver drv_ver_info;
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
-
con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
#ifdef DEBUG
con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
- sizeof (*mfiFrameInit2)));
- con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", sizeof (*init)));
+ (int)sizeof (*mfiFrameInit2)));
+ con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
- sizeof (struct mrsas_init_frame2)));
+ (int)sizeof (struct mrsas_init_frame2)));
con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
- sizeof (Mpi2IOCInitRequest_t)));
+ (int)sizeof (Mpi2IOCInitRequest_t)));
#endif
init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
numbytes = sizeof (*init);
bzero(init, numbytes);
@@ -967,11 +959,11 @@
cmd->retry_count_for_ocr = 0;
cmd->pkt = NULL;
cmd->drv_pkt_time = 0;
mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
- con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%x", mfiFrameInit2));
+ con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2));
frame_hdr = &cmd->frame->hdr;
ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
MFI_CMD_STATUS_POLL_MODE);
@@ -983,11 +975,11 @@
ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
con_log(CL_ANN, (CE_CONT,
"mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
- // Init the MFI Header
+ /* Init the MFI Header */
ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
&mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
@@ -1004,17 +996,17 @@
ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
&mfiFrameInit2->data_xfer_len,
sizeof (Mpi2IOCInitRequest_t));
con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
- init->ReplyDescriptorPostQueueAddress));
+ (int)init->ReplyDescriptorPostQueueAddress));
- /* fill driver version information*/
+ /* fill driver version information */
fill_up_drv_ver(&drv_ver_info);
/* allocate the driver version data transfer buffer */
- instance->drv_ver_dma_obj.size = sizeof(drv_ver_info.drv_ver);
+ instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
@@ -1023,29 +1015,30 @@
(uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
cmn_err(CE_WARN,
"fusion init: Could not allocate driver version buffer.");
return (DDI_FAILURE);
}
- /* copy driver version to dma buffer*/
- (void) memset(instance->drv_ver_dma_obj.buffer, 0,sizeof(drv_ver_info.drv_ver));
+ /* copy driver version to dma buffer */
+ bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver));
ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
(uint8_t *)drv_ver_info.drv_ver,
(uint8_t *)instance->drv_ver_dma_obj.buffer,
- sizeof(drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
+ sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
- /*send driver version physical address to firmware*/
- ddi_put64(cmd->frame_dma_obj.acc_handle,
- &mfiFrameInit2->driverversion, instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
+ /* send driver version physical address to firmware */
+ ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion,
+ instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
mfiFrameInit2->queue_info_new_phys_addr_lo,
- sizeof (Mpi2IOCInitRequest_t)));
+ (int)sizeof (Mpi2IOCInitRequest_t)));
con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
- cmd->scsi_io_request_phys_addr, sizeof (struct mrsas_init_frame2)));
+ cmd->scsi_io_request_phys_addr,
+ (int)sizeof (struct mrsas_init_frame2)));
/* disable interrupts before sending INIT2 frame */
instance->func_ptr->disable_intr(instance);
req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
@@ -1064,11 +1057,10 @@
frame_hdr->cmd_status));
if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
&mfiFrameInit2->cmd_status) == 0) {
con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
- ret = DDI_SUCCESS;
} else {
con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
mrsas_dump_reply_desc(instance);
goto fail_ioc_init;
}
@@ -1075,25 +1067,24 @@
mrsas_dump_reply_desc(instance);
instance->unroll.verBuff = 1;
- con_log(CL_ANN, (CE_NOTE,
- "mrsas_tbolt_ioc_init: SUCCESSFULL\n"));
+ con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL"));
-
return (DDI_SUCCESS);
fail_ioc_init:
- mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
+ (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
return (DDI_FAILURE);
}
-int wait_for_outstanding_poll_io(struct mrsas_instance *instance)
+int
+wait_for_outstanding_poll_io(struct mrsas_instance *instance)
{
int i;
uint32_t wait_time = dump_io_wait_time;
for (i = 0; i < wait_time; i++) {
/*
@@ -1122,14 +1113,11 @@
mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
{
struct mrsas_instance *instance = ADDR2MR(ap);
struct scsa_cmd *acmd = PKT2CMD(pkt);
struct mrsas_cmd *cmd = NULL;
- int rval, i;
uchar_t cmd_done = 0;
- Mpi2RaidSCSIIORequest_t *scsi_raid_io;
- uint32_t msecs = 120 * MILLISEC;
con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
if (instance->deadadapter == 1) {
cmn_err(CE_WARN,
"mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
@@ -1143,11 +1131,11 @@
if (instance->adapterresetinprogress) {
con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
"returning mfi_pkt and setting TRAN_BUSY\n"));
return (TRAN_BUSY);
}
- rval = mrsas_tbolt_prepare_pkt(acmd);
+ (void) mrsas_tbolt_prepare_pkt(acmd);
cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
/*
* Check if the command is already completed by the mrsas_build_cmd()
@@ -1172,39 +1160,39 @@
if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
if (instance->fw_outstanding > instance->max_fw_cmds) {
cmn_err(CE_WARN,
- "Command Queue Full... Returning BUSY \n");
+ "Command Queue Full... Returning BUSY");
return_raid_msg_pkt(instance, cmd);
return (TRAN_BUSY);
}
/* Synchronize the Cmd frame for the controller */
(void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
DDI_DMA_SYNC_FORDEV);
con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
- "cmd->index:0x%x SMID %0x%x\n", pkt->pkt_cdbp[0], cmd->index, cmd->SMID));
+ "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0],
+ cmd->index, cmd->SMID));
instance->func_ptr->issue_cmd(cmd, instance);
-
- return (TRAN_ACCEPT);
-
} else {
instance->func_ptr->issue_cmd(cmd, instance);
(void) wait_for_outstanding_poll_io(instance);
- return (TRAN_ACCEPT);
+ (void) mrsas_common_check(instance, cmd);
}
+
+ return (TRAN_ACCEPT);
}
/*
* prepare the pkt:
* the pkt may have been resubmitted or just reused so
* initialize some fields and do some checks.
*/
-int
+static int
mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
{
struct scsi_pkt *pkt = CMD2PKT(acmd);
@@ -1233,39 +1221,36 @@
Mpi2RaidSCSIIORequest_t *scsi_raid_io,
uint32_t *datalen)
{
uint32_t MaxSGEs;
int sg_to_process;
- uint32_t i, j, SGEdwords = 0;
+ uint32_t i, j;
uint32_t numElements, endElement;
Mpi25IeeeSgeChain64_t *ieeeChainElement = NULL;
Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee = NULL;
- uint32_t SGLFlags = 0;
+ ddi_acc_handle_t acc_handle =
+ instance->mpi2_frame_pool_dma_obj.acc_handle;
con_log(CL_ANN1, (CE_NOTE,
"chkpnt: Building Chained SGL :%d", __LINE__));
/* Calulate SGE size in number of Words(32bit) */
/* Clear the datalen before updating it. */
*datalen = 0;
- SGEdwords = sizeof (Mpi25IeeeSgeChain64_t) / 4;
-
MaxSGEs = instance->max_sge_in_main_msg;
- ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->SGLFlags,
+ ddi_put16(acc_handle, &scsi_raid_io->SGLFlags,
MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
- // set data transfer flag.
+ /* set data transfer flag. */
if (acmd->cmd_flags & CFLAG_DMASEND) {
- ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->Control,
+ ddi_put32(acc_handle, &scsi_raid_io->Control,
MPI2_SCSIIO_CONTROL_WRITE);
} else {
- ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->Control, MPI2_SCSIIO_CONTROL_READ);
+ ddi_put32(acc_handle, &scsi_raid_io->Control,
+ MPI2_SCSIIO_CONTROL_READ);
}
numElements = acmd->cmd_cookiecnt;
@@ -1275,12 +1260,12 @@
con_log(CL_ANN, (CE_NOTE,
"[Max SGE Count Exceeded]:%x", numElements));
return (numElements);
}
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->RaidContext.numSGE, (uint8_t)numElements);
+ ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE,
+ (uint8_t)numElements);
/* set end element in main message frame */
endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
/* prepare the scatter-gather list for the firmware */
@@ -1289,90 +1274,86 @@
if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
sgl_ptr_end += instance->max_sge_in_main_msg - 1;
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &sgl_ptr_end->Flags, 0);
+ ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
}
for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
- ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io_sgl_ieee->Address,
+ ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
acmd->cmd_dmacookies[i].dmac_laddress);
- ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io_sgl_ieee->Length,
+ ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
acmd->cmd_dmacookies[i].dmac_size);
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io_sgl_ieee->Flags, 0);
+ ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
- if (i == (numElements - 1))
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io_sgl_ieee->Flags, IEEE_SGE_FLAGS_END_OF_LIST);
+ if (i == (numElements - 1)) {
+ ddi_put8(acc_handle,
+ &scsi_raid_io_sgl_ieee->Flags,
+ IEEE_SGE_FLAGS_END_OF_LIST);
}
+ }
*datalen += acmd->cmd_dmacookies[i].dmac_size;
#ifdef DEBUG
- con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]:%llx",
+ con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
scsi_raid_io_sgl_ieee->Address));
con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
scsi_raid_io_sgl_ieee->Length));
con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
scsi_raid_io_sgl_ieee->Flags));
#endif
}
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->ChainOffset, 0);
+ ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0);
/* check if chained SGL required */
if (i < numElements) {
con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
- uint16_t ioFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->IoFlags);
+ uint16_t ioFlags =
+ ddi_get16(acc_handle, &scsi_raid_io->IoFlags);
- if ((ioFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->ChainOffset, (U8)instance->chain_offset_io_req);
- else
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
+ if ((ioFlags &
+ MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
+ MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
+ ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
+ (U8)instance->chain_offset_io_req);
+ } else {
+ ddi_put8(acc_handle,
&scsi_raid_io->ChainOffset, 0);
}
- else {
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->ChainOffset, (U8)instance->chain_offset_io_req);
+ } else {
+ ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
+ (U8)instance->chain_offset_io_req);
}
/* prepare physical chain element */
ieeeChainElement = scsi_raid_io_sgl_ieee;
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &ieeeChainElement->NextChainOffset, 0);
+ ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0);
- if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER)
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &ieeeChainElement->Flags, IEEE_SGE_FLAGS_CHAIN_ELEMENT );
- else
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &ieeeChainElement->Flags,
+ if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
+ ddi_put8(acc_handle, &ieeeChainElement->Flags,
+ IEEE_SGE_FLAGS_CHAIN_ELEMENT);
+ } else {
+ ddi_put8(acc_handle, &ieeeChainElement->Flags,
(IEEE_SGE_FLAGS_CHAIN_ELEMENT |
MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
+ }
- ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &ieeeChainElement->Length,
+ ddi_put32(acc_handle, &ieeeChainElement->Length,
(sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
- ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &ieeeChainElement->Address,
+ ddi_put64(acc_handle, &ieeeChainElement->Address,
(U64)cmd->sgl_phys_addr);
sg_to_process = numElements - i;
con_log(CL_ANN1, (CE_NOTE,
@@ -1383,32 +1364,31 @@
/* build rest of the SGL in chained buffer */
for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
- ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io_sgl_ieee->Address,
+ ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
acmd->cmd_dmacookies[i].dmac_laddress);
- ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io_sgl_ieee->Length,
+ ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
acmd->cmd_dmacookies[i].dmac_size);
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io_sgl_ieee->Flags, 0);
+ ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
- if (i == (numElements - 1))
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io_sgl_ieee->Flags, IEEE_SGE_FLAGS_END_OF_LIST);
+ if (i == (numElements - 1)) {
+ ddi_put8(acc_handle,
+ &scsi_raid_io_sgl_ieee->Flags,
+ IEEE_SGE_FLAGS_END_OF_LIST);
}
+ }
*datalen += acmd->cmd_dmacookies[i].dmac_size;
#if DEBUG
con_log(CL_DLEVEL1, (CE_NOTE,
- "[SGL Address]:%llx",
+ "[SGL Address]: %" PRIx64,
scsi_raid_io_sgl_ieee->Address));
con_log(CL_DLEVEL1, (CE_NOTE,
"[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
con_log(CL_DLEVEL1, (CE_NOTE,
"[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
@@ -1423,35 +1403,28 @@
/*
* build_cmd
*/
-struct mrsas_cmd *
+static struct mrsas_cmd *
mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
struct scsi_pkt *pkt, uchar_t *cmd_done)
{
uint8_t fp_possible = 0;
- uint16_t flags = 0;
- uint32_t i, index;
- uint32_t context;
- uint32_t sge_bytes;
- uint8_t ChainOffsetValue;
- uint32_t SGLFlags;
- uint32_t lba_count=0;
- uint32_t start_lba_hi=0;
- uint32_t start_lba_lo=0;
- ddi_acc_handle_t acc_handle;
+ uint32_t index;
+ uint32_t lba_count = 0;
+ uint32_t start_lba_hi = 0;
+ uint32_t start_lba_lo = 0;
+ ddi_acc_handle_t acc_handle =
+ instance->mpi2_frame_pool_dma_obj.acc_handle;
struct mrsas_cmd *cmd = NULL;
struct scsa_cmd *acmd = PKT2CMD(pkt);
MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
Mpi2RaidSCSIIORequest_t *scsi_raid_io;
- Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
uint32_t datalen;
struct IO_REQUEST_INFO io_info;
MR_FW_RAID_MAP_ALL *local_map_ptr;
- MR_LD_RAID *raid;
- U32 ld;
uint16_t pd_cmd_cdblen;
con_log(CL_DLEVEL1, (CE_NOTE,
"chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
@@ -1465,11 +1438,11 @@
if (!(cmd = get_raid_msg_pkt(instance))) {
return (NULL);
}
index = cmd->index;
- ReqDescUnion = mr_sas_get_request_descriptor(instance, index, cmd);
+ ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
ReqDescUnion->Words = 0;
ReqDescUnion->SCSIIO.SMID = cmd->SMID;
ReqDescUnion->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -1491,38 +1464,34 @@
(void) ddi_dma_sync(acmd->cmd_dmahandle,
acmd->cmd_dma_offset, acmd->cmd_dma_len,
DDI_DMA_SYNC_FORCPU);
}
} else {
- con_log(CL_ANN, (CE_NOTE, "NO DMA\n"));
+ con_log(CL_ANN, (CE_NOTE, "NO DMA"));
}
- // get SCSI_IO raid message frame pointer
+ /* get SCSI_IO raid message frame pointer */
scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
/* zero out SCSI_IO raid message frame */
- memset(scsi_raid_io, 0, sizeof(Mpi2RaidSCSIIORequest_t));
+ bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t));
- /*Set the ldTargetId set by BuildRaidContext() */
- ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->RaidContext.ldTargetId,
+ /* Set the ldTargetId set by BuildRaidContext() */
+ ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId,
acmd->device_id);
/* Copy CDB to scsi_io_request message frame */
- ddi_rep_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- (uint8_t *)pkt->pkt_cdbp,
- (uint8_t *)scsi_raid_io->CDB.CDB32,
+ ddi_rep_put8(acc_handle,
+ (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32,
acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
/*
- * Just the CDB length,rest of the Flags are zero
+ * Just the CDB length, rest of the Flags are zero
* This will be modified later.
*/
- ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->IoFlags,
- acmd->cmd_cdblen);
+ ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen);
pd_cmd_cdblen = acmd->cmd_cdblen;
switch (pkt->pkt_cdbp[0]) {
case SCMD_READ:
@@ -1535,24 +1504,26 @@
case SCMD_WRITE_G5:
if (acmd->islogical) {
/* Initialize sense Information */
if (cmd->sense1 == NULL) {
- con_log(CL_ANN, (CE_NOTE,
- "tbolt_build_cmd: Sense buffer ptr NULL \n"));
+ con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: "
+ "Sense buffer ptr NULL "));
}
bzero(cmd->sense1, SENSE_LENGTH);
- con_log(CL_DLEVEL2, (CE_NOTE,
- "tbolt_build_cmd CDB[0] = %x\n", pkt->pkt_cdbp[0]));
+ con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd "
+ "CDB[0] = %x\n", pkt->pkt_cdbp[0]));
- if (acmd->cmd_cdblen == CDB_GROUP0) { /* 6-byte cdb */
+ if (acmd->cmd_cdblen == CDB_GROUP0) {
+ /* 6-byte cdb */
lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
- start_lba_lo =
- ((uint32_t)(pkt->pkt_cdbp[3]) |
+ start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) |
((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
- ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F) << 16));
- } else if (acmd->cmd_cdblen == CDB_GROUP1) { /* 10-byte cdb */
+ ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
+ << 16));
+ } else if (acmd->cmd_cdblen == CDB_GROUP1) {
+ /* 10-byte cdb */
lba_count =
(((uint16_t)(pkt->pkt_cdbp[8])) |
((uint16_t)(pkt->pkt_cdbp[7]) << 8));
start_lba_lo =
@@ -1559,11 +1530,12 @@
(((uint32_t)(pkt->pkt_cdbp[5])) |
((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
((uint32_t)(pkt->pkt_cdbp[2]) << 24));
- } else if (acmd->cmd_cdblen == CDB_GROUP5) { /* 12-byte cdb */
+ } else if (acmd->cmd_cdblen == CDB_GROUP5) {
+ /* 12-byte cdb */
lba_count = (
((uint32_t)(pkt->pkt_cdbp[9])) |
((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
((uint32_t)(pkt->pkt_cdbp[6]) << 24));
@@ -1572,11 +1544,12 @@
(((uint32_t)(pkt->pkt_cdbp[5])) |
((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
((uint32_t)(pkt->pkt_cdbp[2]) << 24));
- } else if (acmd->cmd_cdblen == CDB_GROUP4) { /* 16-byte cdb */
+ } else if (acmd->cmd_cdblen == CDB_GROUP4) {
+ /* 16-byte cdb */
lba_count = (
((uint32_t)(pkt->pkt_cdbp[13])) |
((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
((uint32_t)(pkt->pkt_cdbp[10]) << 24));
@@ -1593,178 +1566,199 @@
((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
((uint32_t)(pkt->pkt_cdbp[2]) << 24));
}
if (instance->tbolt &&
- ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer) )
- cmn_err(CE_WARN," IO SECTOR COUNT exceeds controller limit 0x%x sectors\n", lba_count);
+ ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) {
+ cmn_err(CE_WARN, " IO SECTOR COUNT exceeds "
+ "controller limit 0x%x sectors",
+ lba_count);
+ }
- memset(&io_info, 0, sizeof (struct IO_REQUEST_INFO));
- io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo;
+ bzero(&io_info, sizeof (struct IO_REQUEST_INFO));
+ io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) |
+ start_lba_lo;
io_info.numBlocks = lba_count;
io_info.ldTgtId = acmd->device_id;
if (acmd->cmd_flags & CFLAG_DMASEND)
io_info.isRead = 0;
else
io_info.isRead = 1;
- /*Aquire SYNC MAP UPDATE lock */
+ /* Acquire SYNC MAP UPDATE lock */
mutex_enter(&instance->sync_map_mtx);
- local_map_ptr = instance->ld_map[(instance->map_id & 1)];
+ local_map_ptr =
+ instance->ld_map[(instance->map_id & 1)];
- if ( (MR_TargetIdToLdGet(acmd->device_id, local_map_ptr) >= MAX_LOGICAL_DRIVES) || !instance->fast_path_io ){
- cmn_err(CE_NOTE,
- "Fast Path NOT Possible, targetId >= MAX_LOGICAL_DRIVES || !instance->fast_path_io\n");
+ if ((MR_TargetIdToLdGet(
+ acmd->device_id, local_map_ptr) >=
+ MAX_LOGICAL_DRIVES) || !instance->fast_path_io) {
+ cmn_err(CE_NOTE, "Fast Path NOT Possible, "
+ "targetId >= MAX_LOGICAL_DRIVES || "
+ "!instance->fast_path_io");
fp_possible = 0;
- /* Set Regionlock flags to BYPASS
- io_request->RaidContext.regLockFlags = 0; */
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
+ /* Set Regionlock flags to BYPASS */
+ /* io_request->RaidContext.regLockFlags = 0; */
+ ddi_put8(acc_handle,
&scsi_raid_io->RaidContext.regLockFlags, 0);
} else {
if (MR_BuildRaidContext(instance, &io_info,
&scsi_raid_io->RaidContext, local_map_ptr))
fp_possible = io_info.fpOkForIo;
}
- if (!enable_fp) {
+ if (!enable_fp)
fp_possible = 0;
- }
- con_log(CL_ANN1, (CE_NOTE,
- "enable_fp %d instance->fast_path_io %d fp_possible %d \n",
+
+ con_log(CL_ANN1, (CE_NOTE, "enable_fp %d "
+ "instance->fast_path_io %d fp_possible %d",
enable_fp, instance->fast_path_io, fp_possible));
if (fp_possible) {
/* Check for DIF enabled LD */
if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
/* Prepare 32 Byte CDB for DIF capable Disk */
mrsas_tbolt_prepare_cdb(instance,
scsi_raid_io->CDB.CDB32,
- &io_info,
- scsi_raid_io,
- start_lba_lo);
+ &io_info, scsi_raid_io, start_lba_lo);
} else {
mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
- (uint8_t *)&pd_cmd_cdblen, io_info.pdBlock, io_info.numBlocks, 0);
- ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->IoFlags,
- pd_cmd_cdblen);
+ (uint8_t *)&pd_cmd_cdblen,
+ io_info.pdBlock, io_info.numBlocks);
+ ddi_put16(acc_handle,
+ &scsi_raid_io->IoFlags, pd_cmd_cdblen);
}
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->Function,
+ ddi_put8(acc_handle, &scsi_raid_io->Function,
MPI2_FUNCTION_SCSI_IO_REQUEST);
ReqDescUnion->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
- uint8_t regLockFlags = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
+ uint8_t regLockFlags = ddi_get8(acc_handle,
&scsi_raid_io->RaidContext.regLockFlags);
- uint16_t IoFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
+ uint16_t IoFlags = ddi_get16(acc_handle,
&scsi_raid_io->IoFlags);
if (regLockFlags == REGION_TYPE_UNUSED)
ReqDescUnion->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
+ MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
- regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE);
+ IoFlags |=
+ MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
+ regLockFlags |=
+ (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
+ MR_RL_FLAGS_SEQ_NUM_ENABLE);
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
+ ddi_put8(acc_handle,
&scsi_raid_io->ChainOffset, 0);
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->RaidContext.nsegType, ((0x01 << MPI2_NSEG_FLAGS_SHIFT) | MPI2_TYPE_CUDA));
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->RaidContext.regLockFlags, regLockFlags);
- ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
+ ddi_put8(acc_handle,
+ &scsi_raid_io->RaidContext.nsegType,
+ ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
+ MPI2_TYPE_CUDA));
+ ddi_put8(acc_handle,
+ &scsi_raid_io->RaidContext.regLockFlags,
+ regLockFlags);
+ ddi_put16(acc_handle,
&scsi_raid_io->IoFlags, IoFlags);
}
- if ((instance->load_balance_info[acmd->device_id].loadBalanceFlag) && (io_info.isRead)) {
- io_info.devHandle = get_updated_dev_handle(&instance->load_balance_info[acmd->device_id], &io_info);
- cmd->load_balance_flag |= MEGASAS_LOAD_BALANCE_FLAG;
- } else
- cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
+ if ((instance->load_balance_info[
+ acmd->device_id].loadBalanceFlag) &&
+ (io_info.isRead)) {
+ io_info.devHandle =
+ get_updated_dev_handle(&instance->
+ load_balance_info[acmd->device_id],
+ &io_info);
+ cmd->load_balance_flag |=
+ MEGASAS_LOAD_BALANCE_FLAG;
+ } else {
+ cmd->load_balance_flag &=
+ ~MEGASAS_LOAD_BALANCE_FLAG;
+ }
ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
- ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->DevHandle,
+ ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
io_info.devHandle);
} else {
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->Function,
+ ddi_put8(acc_handle, &scsi_raid_io->Function,
MPI2_FUNCTION_LD_IO_REQUEST);
- ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
+ ddi_put16(acc_handle,
&scsi_raid_io->DevHandle, acmd->device_id);
ReqDescUnion->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->RaidContext.timeoutValue, local_map_ptr->raidMap.fpPdIoTimeoutSec);
+ ddi_put16(acc_handle,
+ &scsi_raid_io->RaidContext.timeoutValue,
+ local_map_ptr->raidMap.fpPdIoTimeoutSec);
if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
- uint8_t regLockFlags = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
+ uint8_t regLockFlags = ddi_get8(acc_handle,
&scsi_raid_io->RaidContext.regLockFlags);
- if (regLockFlags == REGION_TYPE_UNUSED)
+ if (regLockFlags == REGION_TYPE_UNUSED) {
ReqDescUnion->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
+ MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ }
- regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | MR_RL_FLAGS_SEQ_NUM_ENABLE);
+ regLockFlags |=
+ (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
+ MR_RL_FLAGS_SEQ_NUM_ENABLE);
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->RaidContext.nsegType, ((0x01 << MPI2_NSEG_FLAGS_SHIFT) | MPI2_TYPE_CUDA));
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->RaidContext.regLockFlags, regLockFlags);
+ ddi_put8(acc_handle,
+ &scsi_raid_io->RaidContext.nsegType,
+ ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
+ MPI2_TYPE_CUDA));
+ ddi_put8(acc_handle,
+ &scsi_raid_io->RaidContext.regLockFlags,
+ regLockFlags);
}
-
} /* Not FP */
- /*Release SYNC MAP UPDATE lock */
+ /* Release SYNC MAP UPDATE lock */
mutex_exit(&instance->sync_map_mtx);
- /* Set sense buffer physical address/length in scsi_io_request.*/
- ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->SenseBufferLowAddress,
+ /*
+ * Set sense buffer physical address/length in scsi_io_request.
+ */
+ ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
cmd->sense_phys_addr1);
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->SenseBufferLength,
+ ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength,
SENSE_LENGTH);
- /* Construct SGL*/
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->SGLOffset0,
+ /* Construct SGL */
+ ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
- mr_sas_tbolt_build_sgl(instance, acmd, cmd,
+ (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
scsi_raid_io, &datalen);
- ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->DataLength, datalen);
+ ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen);
break;
-
- }
- else {
#ifndef PDSUPPORT /* if PDSUPPORT, skip break and fall through */
+ } else {
break;
#endif
}
/* fall through For all non-rd/wr cmds */
default:
switch (pkt->pkt_cdbp[0]) {
- case 0x35: { // SCMD_SYNCHRONIZE_CACHE
+ case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
return_raid_msg_pkt(instance, cmd);
*cmd_done = 1;
return (NULL);
}
@@ -1785,78 +1779,90 @@
}
break;
}
default: {
- /* Here we need to handle PASSTHRU for
- Logical Devices. Like Inquiry etc.*/
+ /*
+ * Here we need to handle PASSTHRU for
+ * Logical Devices. Like Inquiry etc.
+ */
- if(!(acmd->islogical)) {
+ if (!(acmd->islogical)) {
- /* Aquire SYNC MAP UPDATE lock */
+ /* Acquire SYNC MAP UPDATE lock */
mutex_enter(&instance->sync_map_mtx);
- local_map_ptr = instance->ld_map[(instance->map_id & 1)];
+ local_map_ptr =
+ instance->ld_map[(instance->map_id & 1)];
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->Function, MPI2_FUNCTION_SCSI_IO_REQUEST);
+ ddi_put8(acc_handle, &scsi_raid_io->Function,
+ MPI2_FUNCTION_SCSI_IO_REQUEST);
ReqDescUnion->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->DevHandle,
- local_map_ptr->raidMap.devHndlInfo[acmd->device_id].curDevHdl);
+ ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
+ local_map_ptr->raidMap.
+ devHndlInfo[acmd->device_id].curDevHdl);
- /*Set regLockFlasgs to REGION_TYPE_BYPASS */
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
+ /* Set regLockFlasgs to REGION_TYPE_BYPASS */
+ ddi_put8(acc_handle,
&scsi_raid_io->RaidContext.regLockFlags, 0);
- ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->RaidContext.regLockRowLBA, 0);
- ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->RaidContext.regLockLength, 0);
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->RaidContext.RAIDFlags,
- MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
- ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->RaidContext.timeoutValue, local_map_ptr->raidMap.fpPdIoTimeoutSec);
- ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->RaidContext.ldTargetId, acmd->device_id);
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
+ ddi_put64(acc_handle,
+ &scsi_raid_io->RaidContext.regLockRowLBA,
+ 0);
+ ddi_put32(acc_handle,
+ &scsi_raid_io->RaidContext.regLockLength,
+ 0);
+ ddi_put8(acc_handle,
+ &scsi_raid_io->RaidContext.RAIDFlags,
+ MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
+ MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
+ ddi_put16(acc_handle,
+ &scsi_raid_io->RaidContext.timeoutValue,
+ local_map_ptr->raidMap.fpPdIoTimeoutSec);
+ ddi_put16(acc_handle,
+ &scsi_raid_io->RaidContext.ldTargetId,
+ acmd->device_id);
+ ddi_put8(acc_handle,
&scsi_raid_io->LUN[1], acmd->lun);
/* Release SYNC MAP UPDATE lock */
mutex_exit(&instance->sync_map_mtx);
} else {
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->Function, MPI2_FUNCTION_LD_IO_REQUEST);
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
+ ddi_put8(acc_handle, &scsi_raid_io->Function,
+ MPI2_FUNCTION_LD_IO_REQUEST);
+ ddi_put8(acc_handle,
&scsi_raid_io->LUN[1], acmd->lun);
- ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
+ ddi_put16(acc_handle,
&scsi_raid_io->DevHandle, acmd->device_id);
ReqDescUnion->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
+ MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
}
- /* Set sense buffer physical address/length in scsi_io_request.*/
- ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
+ /*
+ * Set sense buffer physical address/length in
+ * scsi_io_request.
+ */
+ ddi_put32(acc_handle,
&scsi_raid_io->SenseBufferLowAddress,
cmd->sense_phys_addr1);
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
+ ddi_put8(acc_handle,
&scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
- /* Construct SGL*/
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->SGLOffset0,
+ /* Construct SGL */
+ ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
- mr_sas_tbolt_build_sgl(instance, acmd, cmd,
+ (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
scsi_raid_io, &datalen);
- ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
+ ddi_put32(acc_handle,
&scsi_raid_io->DataLength, datalen);
con_log(CL_ANN, (CE_CONT,
"tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
@@ -1870,13 +1876,10 @@
}
break;
}
}
-#ifdef lint
- context = context;
-#endif
return (cmd);
}
/*
@@ -1981,18 +1984,20 @@
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
atomic_add_16(&instance->fw_outstanding, 1);
struct scsi_pkt *pkt;
- con_log(CL_ANN1, (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
+ con_log(CL_ANN1,
+ (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
con_log(CL_DLEVEL1, (CE_CONT,
- " [req desc Words] %llx \n", req_desc->Words));
+ " [req desc Words] %" PRIx64 " \n", req_desc->Words));
con_log(CL_DLEVEL1, (CE_CONT,
- " [req desc low part] %x \n", req_desc->Words));
+ " [req desc low part] %x \n",
+ (uint_t)(req_desc->Words & 0xffffffffff)));
con_log(CL_DLEVEL1, (CE_CONT,
- " [req desc high part] %x \n", (req_desc->Words >> 32)));
+ " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
pkt = cmd->pkt;
if (pkt) {
con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
"ISSUED CMD TO FW : called : cmd:"
@@ -2032,38 +2037,41 @@
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
struct mrsas_header *hdr;
hdr = (struct mrsas_header *)&cmd->frame->hdr;
- con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X", cmd->SMID));
+ con_log(CL_ANN,
+ (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
+ cmd->SMID));
if (instance->adapterresetinprogress) {
cmd->drv_pkt_time = ddi_get16
(cmd->frame_dma_obj.acc_handle, &hdr->timeout);
if (cmd->drv_pkt_time < debug_timeout_g)
cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
- "RESET-IN-PROGRESS, issue cmd & return.\n"));
+ "RESET-IN-PROGRESS, issue cmd & return."));
mutex_enter(&instance->reg_write_mtx);
WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
mutex_exit(&instance->reg_write_mtx);
return (DDI_SUCCESS);
} else {
- con_log(CL_ANN1, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: pushing the pkt\n"));
+ con_log(CL_ANN1, (CE_NOTE,
+ "tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
push_pending_mfi_pkt(instance, cmd);
}
con_log(CL_DLEVEL2, (CE_NOTE,
- "HighQport offset :%lx",
- (uint32_t *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
+ "HighQport offset :%p",
+ (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
con_log(CL_DLEVEL2, (CE_NOTE,
- "LowQport offset :%lx",
- (uint32_t *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
+ "LowQport offset :%p",
+ (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
cmd->sync_cmd = MRSAS_TRUE;
cmd->cmd_status = ENODATA;
@@ -2071,13 +2079,13 @@
WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
mutex_exit(&instance->reg_write_mtx);
con_log(CL_ANN1, (CE_NOTE,
- " req desc high part %x \n", (req_desc->Words >> 32)));
- con_log(CL_ANN1, (CE_NOTE,
- " req desc low part %x \n", req_desc->Words));
+ " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
+ con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
+ (uint_t)(req_desc->Words & 0xffffffff)));
mutex_enter(&instance->int_cmd_mtx);
for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
}
@@ -2101,11 +2109,13 @@
int i;
uint16_t flags;
uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
struct mrsas_header *frame_hdr;
- con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X", cmd->SMID));
+ con_log(CL_ANN,
+ (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
+ cmd->SMID));
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
@@ -2112,14 +2122,14 @@
MFI_CMD_STATUS_POLL_MODE);
flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
+ con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
+ (uint_t)(req_desc->Words & 0xffffffff)));
con_log(CL_ANN1, (CE_NOTE,
- " req desc low part %x \n", req_desc->Words));
- con_log(CL_ANN1, (CE_NOTE,
- " req desc high part %x \n", (req_desc->Words >> 32)));
+ " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
/* issue the frame using inbound queue port */
mutex_enter(&instance->reg_write_mtx);
WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
@@ -2133,45 +2143,41 @@
}
if (ddi_get8(cmd->frame_dma_obj.acc_handle,
&frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
con_log(CL_ANN1, (CE_NOTE,
- " cmd failed %x \n", (req_desc->Words)));
+ " cmd failed %" PRIx64, (req_desc->Words)));
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
void
tbolt_enable_intr(struct mrsas_instance *instance)
{
- uint32_t mask;
-
/* TODO: For Thunderbolt/Invader also clear intr on enable */
- //writel(~0, ®s->outbound_intr_status);
- //readl(®s->outbound_intr_status);
+ /* writel(~0, ®s->outbound_intr_status); */
+ /* readl(®s->outbound_intr_status); */
WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
/* dummy read to force PCI flush */
- mask = RD_OB_INTR_MASK(instance);
+ (void) RD_OB_INTR_MASK(instance);
}
void
tbolt_disable_intr(struct mrsas_instance *instance)
{
uint32_t mask = 0xFFFFFFFF;
- uint32_t status;
-
WR_OB_INTR_MASK(mask, instance);
/* Dummy readl to force pci flush */
- status = RD_OB_INTR_MASK(instance);
+ (void) RD_OB_INTR_MASK(instance);
}
int
tbolt_intr_ack(struct mrsas_instance *instance)
@@ -2179,21 +2185,26 @@
uint32_t status;
/* check if it is our interrupt */
status = RD_OB_INTR_STATUS(instance);
con_log(CL_ANN1, (CE_NOTE,
- "chkpnt: Entered tbolt_intr_ack status = %d \n", status));
+ "chkpnt: Entered tbolt_intr_ack status = %d", status));
if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
return (DDI_INTR_UNCLAIMED);
}
+ if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
+ ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
+ return (DDI_INTR_UNCLAIMED);
+ }
+
if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
/* clear the interrupt by writing back the same value */
WR_OB_INTR_STATUS(status, instance);
/* dummy READ */
- RD_OB_INTR_STATUS(instance);
+ (void) RD_OB_INTR_STATUS(instance);
}
return (DDI_INTR_CLAIMED);
}
/*
@@ -2298,23 +2309,24 @@
{
Mpi2RaidSCSIIORequest_t *scsi_raid_io;
Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
uint32_t index;
+ ddi_acc_handle_t acc_handle =
+ instance->mpi2_frame_pool_dma_obj.acc_handle;
if (!instance->tbolt) {
- con_log(CL_ANN, (CE_NOTE, "Not MFA enabled.\n"));
+ con_log(CL_ANN, (CE_NOTE, "Not MFA enabled."));
return;
}
index = cmd->index;
- ReqDescUnion =
- mr_sas_get_request_descriptor(instance, index, cmd);
+ ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
if (!ReqDescUnion) {
- con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]%x"));
+ con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
return;
}
con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
@@ -2326,53 +2338,47 @@
ReqDescUnion->SCSIIO.SMID = cmd->SMID;
cmd->request_desc = ReqDescUnion;
- // get raid message frame pointer
+ /* get raid message frame pointer */
scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
- Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
+ Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)
+ &scsi_raid_io->SGL.IeeeChain;
sgl_ptr_end += instance->max_sge_in_main_msg - 1;
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &sgl_ptr_end->Flags, 0);
+ ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
}
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->Function,
+ ddi_put8(acc_handle, &scsi_raid_io->Function,
MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->SGLOffset0,
+ ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->ChainOffset,
+ ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
(U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
- ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->SenseBufferLowAddress,
+ ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
cmd->sense_phys_addr1);
scsi_raid_io_sgl_ieee =
(Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
- ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io_sgl_ieee->Address,
+ ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
(U64)cmd->frame_phys_addr);
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io_sgl_ieee->Flags,
- (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+ ddi_put8(acc_handle,
+ &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
- ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io_sgl_ieee->Length, 1024); //MEGASAS_MAX_SZ_CHAIN_FRAME
+ /* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
+ ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024);
con_log(CL_ANN1, (CE_NOTE,
- "[MFI CMD PHY ADDRESS]:%x",
+ "[MFI CMD PHY ADDRESS]:%" PRIx64,
scsi_raid_io_sgl_ieee->Address));
con_log(CL_ANN1, (CE_NOTE,
"[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
scsi_raid_io_sgl_ieee->Flags));
@@ -2389,18 +2395,17 @@
struct scsa_cmd *acmd;
struct scsi_pkt *pkt;
struct scsi_arq_status *arqstat;
Mpi2RaidSCSIIORequest_t *scsi_raid_io;
LD_LOAD_BALANCE_INFO *lbinfo;
- int i;
+ ddi_acc_handle_t acc_handle =
+ instance->mpi2_frame_pool_dma_obj.acc_handle;
scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
- status = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->RaidContext.status);
- extStatus = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->RaidContext.extStatus);
+ status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status);
+ extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus);
con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
if (status != MFI_STAT_OK) {
@@ -2411,48 +2416,46 @@
"IO Cmd Success SMID %x", cmd->SMID));
}
/* regular commands */
- switch (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->Function)) {
+ switch (ddi_get8(acc_handle, &scsi_raid_io->Function)) {
case MPI2_FUNCTION_SCSI_IO_REQUEST : /* Fast Path IO. */
acmd = (struct scsa_cmd *)cmd->cmd;
lbinfo = &instance->load_balance_info[acmd->device_id];
if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
- arm = lbinfo->raid1DevHandle[0] == scsi_raid_io->DevHandle ? 0 : 1;
+ arm = lbinfo->raid1DevHandle[0] ==
+ scsi_raid_io->DevHandle ? 0 : 1;
lbinfo->scsi_pending_cmds[arm]--;
cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
}
con_log(CL_DLEVEL3, (CE_NOTE,
"FastPath IO Completion Success "));
+ /* FALLTHRU */
- case MPI2_FUNCTION_LD_IO_REQUEST : {// Regular Path IO.
+ case MPI2_FUNCTION_LD_IO_REQUEST : { /* Regular Path IO. */
acmd = (struct scsa_cmd *)cmd->cmd;
pkt = (struct scsi_pkt *)CMD2PKT(acmd);
if (acmd->cmd_flags & CFLAG_DMAVALID) {
if (acmd->cmd_flags & CFLAG_CONSISTENT) {
(void) ddi_dma_sync(acmd->cmd_dmahandle,
- acmd->cmd_dma_offset,
- acmd->cmd_dma_len,
+ acmd->cmd_dma_offset, acmd->cmd_dma_len,
DDI_DMA_SYNC_FORCPU);
}
}
pkt->pkt_reason = CMD_CMPLT;
pkt->pkt_statistics = 0;
- pkt->pkt_state = STATE_GOT_BUS
- | STATE_GOT_TARGET | STATE_SENT_CMD
- | STATE_XFERRED_DATA | STATE_GOT_STATUS;
+ pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
+ STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
- con_log(CL_ANN, (CE_CONT,
- " CDB[0] = %x completed for %s: size %lx SMID %x cmd_status %x",
- pkt->pkt_cdbp[0],
+ con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: "
+ "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0],
((acmd->islogical) ? "LD" : "PD"),
acmd->cmd_dmacount, cmd->SMID, status));
if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
struct scsi_inquiry *inq;
@@ -2463,26 +2466,19 @@
acmd->cmd_buf->b_un.b_addr;
/* don't expose physical drives to OS */
if (acmd->islogical &&
(status == MFI_STAT_OK)) {
- display_scsi_inquiry(
- (caddr_t)inq);
- }
+ display_scsi_inquiry((caddr_t)inq);
#ifdef PDSUPPORT
- else if ((status ==
- MFI_STAT_OK) && inq->inq_dtype ==
- DTYPE_DIRECT) {
-
- display_scsi_inquiry(
- (caddr_t)inq);
- }
+ } else if ((status == MFI_STAT_OK) &&
+ inq->inq_dtype == DTYPE_DIRECT) {
+ display_scsi_inquiry((caddr_t)inq);
#endif
- else {
+ } else {
/* for physical disk */
- status =
- MFI_STAT_DEVICE_NOT_FOUND;
+ status = MFI_STAT_DEVICE_NOT_FOUND;
}
}
}
switch (status) {
@@ -2503,15 +2499,15 @@
case MFI_STAT_SCSI_DONE_WITH_ERROR:
con_log(CL_ANN, (CE_WARN,
"tbolt_complete_cmd: scsi_done with error"));
pkt->pkt_reason = CMD_CMPLT;
- ((struct scsi_status *)
- pkt->pkt_scbp)->sts_chk = 1;
+ ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
- con_log(CL_ANN, (CE_WARN, "TEST_UNIT_READY fail"));
+ con_log(CL_ANN,
+ (CE_WARN, "TEST_UNIT_READY fail"));
} else {
pkt->pkt_state |= STATE_ARQ_DONE;
arqstat = (void *)(pkt->pkt_scbp);
arqstat->sts_rqpkt_reason = CMD_CMPLT;
arqstat->sts_rqpkt_resid = 0;
@@ -2519,32 +2515,34 @@
STATE_GOT_BUS | STATE_GOT_TARGET
| STATE_SENT_CMD
| STATE_XFERRED_DATA;
*(uint8_t *)&arqstat->sts_rqpkt_status =
STATUS_GOOD;
- con_log(CL_ANN1, (CE_NOTE,
- "Copying Sense data %x",
+ con_log(CL_ANN1,
+ (CE_NOTE, "Copying Sense data %x",
cmd->SMID));
- ddi_rep_get8(
- instance->
- mpi2_frame_pool_dma_obj.acc_handle,
- (uint8_t *)
- &(arqstat->sts_sensedata),
+ ddi_rep_get8(acc_handle,
+ (uint8_t *)&(arqstat->sts_sensedata),
cmd->sense1,
sizeof (struct scsi_extended_sense),
DDI_DEV_AUTOINCR);
}
break;
case MFI_STAT_LD_OFFLINE:
cmn_err(CE_WARN,
"tbolt_complete_cmd: ld offline "
- "CDB[0]=0x%x targetId=0x%x devhandle=0x%x\n", //UNDO:
- ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->CDB.CDB32[0]),
- ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->RaidContext.ldTargetId),
- ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->DevHandle) );
+ "CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
+ /* UNDO: */
+ ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]),
+
+ ddi_get16(acc_handle,
+ &scsi_raid_io->RaidContext.ldTargetId),
+
+ ddi_get16(acc_handle, &scsi_raid_io->DevHandle));
+
pkt->pkt_reason = CMD_DEV_GONE;
pkt->pkt_statistics = STAT_DISCON;
break;
case MFI_STAT_DEVICE_NOT_FOUND:
con_log(CL_ANN, (CE_CONT,
@@ -2554,27 +2552,23 @@
break;
case MFI_STAT_LD_LBA_OUT_OF_RANGE:
pkt->pkt_state |= STATE_ARQ_DONE;
pkt->pkt_reason = CMD_CMPLT;
- ((struct scsi_status *)
- pkt->pkt_scbp)->sts_chk = 1;
+ ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
arqstat = (void *)(pkt->pkt_scbp);
arqstat->sts_rqpkt_reason = CMD_CMPLT;
arqstat->sts_rqpkt_resid = 0;
arqstat->sts_rqpkt_state |= STATE_GOT_BUS
| STATE_GOT_TARGET | STATE_SENT_CMD
| STATE_XFERRED_DATA;
- *(uint8_t *)&arqstat->sts_rqpkt_status =
- STATUS_GOOD;
+ *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
arqstat->sts_sensedata.es_valid = 1;
- arqstat->sts_sensedata.es_key =
- KEY_ILLEGAL_REQUEST;
- arqstat->sts_sensedata.es_class =
- CLASS_EXTENDED_SENSE;
+ arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST;
+ arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
/*
* LOGICAL BLOCK ADDRESS OUT OF RANGE:
* ASC: 0x21h; ASCQ: 0x00h;
*/
@@ -2592,66 +2586,76 @@
break;
}
atomic_add_16(&instance->fw_outstanding, (-1));
+ (void) mrsas_common_check(instance, cmd);
+ if (acmd->cmd_dmahandle) {
+ if (mrsas_check_dma_handle(acmd->cmd_dmahandle) !=
+ DDI_SUCCESS) {
+ ddi_fm_service_impact(instance->dip,
+ DDI_SERVICE_UNAFFECTED);
+ pkt->pkt_reason = CMD_TRAN_ERR;
+ pkt->pkt_statistics = 0;
+ }
+ }
+
/* Call the callback routine */
- if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
- pkt->pkt_comp) {
+ if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp)
(*pkt->pkt_comp)(pkt);
- }
con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->RaidContext.status, 0);
+ ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0);
- ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_raid_io->RaidContext.extStatus, 0);
+ ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0);
return_raid_msg_pkt(instance, cmd);
break;
}
- case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: // MFA command.
+ case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFA command. */
- if (cmd->frame->dcmd.opcode
- == MR_DCMD_LD_MAP_GET_INFO &&
- cmd->frame->dcmd.mbox.b[1]
- == 1) {
+ if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO &&
+ cmd->frame->dcmd.mbox.b[1] == 1) {
mutex_enter(&instance->sync_map_mtx);
con_log(CL_ANN, (CE_NOTE,
"LDMAP sync command SMID RECEIVED 0x%X",
cmd->SMID));
if (cmd->frame->hdr.cmd_status != 0) {
cmn_err(CE_WARN,
- "map sync failed, status = 0x%x.\n",cmd->frame->hdr.cmd_status);
- }
- else {
+ "map sync failed, status = 0x%x.",
+ cmd->frame->hdr.cmd_status);
+ } else {
instance->map_id++;
cmn_err(CE_NOTE,
- "map sync received, switched map_id to %ld \n",instance->map_id);
+ "map sync received, switched map_id to %"
+ PRIu64 " \n", instance->map_id);
}
- if (MR_ValidateMapInfo(instance->ld_map[(instance->map_id & 1)], instance->load_balance_info))
+ if (MR_ValidateMapInfo(instance->ld_map[
+ (instance->map_id & 1)],
+ instance->load_balance_info)) {
instance->fast_path_io = 1;
- else
+ } else {
instance->fast_path_io = 0;
+ }
con_log(CL_ANN, (CE_NOTE,
- "instance->fast_path_io %d \n",instance->fast_path_io));
+ "instance->fast_path_io %d",
+ instance->fast_path_io));
instance->unroll.syncCmd = 0;
- if(instance->map_update_cmd == cmd) {
+ if (instance->map_update_cmd == cmd) {
return_raid_msg_pkt(instance, cmd);
atomic_add_16(&instance->fw_outstanding, (-1));
- mrsas_tbolt_sync_map_info(instance);
+ (void) mrsas_tbolt_sync_map_info(instance);
}
- cmn_err(CE_NOTE, "LDMAP sync completed.\n");
+ cmn_err(CE_NOTE, "LDMAP sync completed.");
mutex_exit(&instance->sync_map_mtx);
break;
}
if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
@@ -2658,38 +2662,37 @@
con_log(CL_ANN1, (CE_CONT,
"AEN command SMID RECEIVED 0x%X",
cmd->SMID));
if ((instance->aen_cmd == cmd) &&
(instance->aen_cmd->abort_aen)) {
- con_log(CL_ANN, (CE_WARN,
- "mrsas_softintr: "
+ con_log(CL_ANN, (CE_WARN, "mrsas_softintr: "
"aborted_aen returned"));
- }
- else
- {
+ } else {
atomic_add_16(&instance->fw_outstanding, (-1));
service_mfi_aen(instance, cmd);
}
}
- if (cmd->sync_cmd == MRSAS_TRUE ) {
+ if (cmd->sync_cmd == MRSAS_TRUE) {
con_log(CL_ANN1, (CE_CONT,
"Sync-mode Command Response SMID RECEIVED 0x%X",
cmd->SMID));
tbolt_complete_cmd_in_sync_mode(instance, cmd);
- }
- else
- {
+ } else {
con_log(CL_ANN, (CE_CONT,
"tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
cmd->SMID));
}
break;
default:
+ mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
+ ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
+
/* free message */
- con_log(CL_ANN, (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
+ con_log(CL_ANN,
+ (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
break;
}
}
uint_t
@@ -2699,13 +2702,10 @@
Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
Mpi2ReplyDescriptorsUnion_t *desc;
uint16_t smid;
union desc_value d_val;
struct mrsas_cmd *cmd;
- uint32_t i;
- Mpi2RaidSCSIIORequest_t *scsi_raid_io;
- uint8_t status;
struct mrsas_header *hdr;
struct scsi_pkt *pkt;
(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
@@ -2722,13 +2722,23 @@
MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
return (DDI_INTR_UNCLAIMED);
- con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %llx Words = %llx \n",
- desc, desc->Words));
+ if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
+ != DDI_SUCCESS) {
+ mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
+ ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
+ con_log(CL_ANN1,
+ (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
+ "FMA check, returning DDI_INTR_UNCLAIMED"));
+ return (DDI_INTR_CLAIMED);
+ }
+ con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %p Words = %" PRIx64,
+ (void *)desc, desc->Words));
+
d_val.word = desc->Words;
/* Read Reply descriptor */
while ((d_val.u1.low != 0xffffffff) &&
@@ -2739,47 +2749,46 @@
smid = replyDesc->SMID;
if (!smid || smid > instance->max_fw_cmds + 1) {
con_log(CL_ANN1, (CE_NOTE,
- "Reply Desc at Break = %llx Words = %llx \n",
- desc, desc->Words));
+ "Reply Desc at Break = %p Words = %" PRIx64,
+ (void *)desc, desc->Words));
break;
}
cmd = instance->cmd_list[smid - 1];
- if(!cmd ) {
- con_log(CL_ANN1, (CE_NOTE,
- "mr_sas_tbolt_process_outstanding_cmd: Invalid command "
- " or Poll commad Received in completion path\n"));
- }
- else {
+ if (!cmd) {
+ con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_"
+ "outstanding_cmd: Invalid command "
+ " or Poll commad Received in completion path"));
+ } else {
mutex_enter(&instance->cmd_pend_mtx);
if (cmd->sync_cmd == MRSAS_TRUE) {
hdr = (struct mrsas_header *)&cmd->frame->hdr;
if (hdr) {
- con_log(CL_ANN1, (CE_NOTE,
- "mr_sas_tbolt_process_outstanding_cmd:"
- " mlist_del_init(&cmd->list).\n"));
+ con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
+ "tbolt_process_outstanding_cmd:"
+ " mlist_del_init(&cmd->list)."));
mlist_del_init(&cmd->list);
}
} else {
pkt = cmd->pkt;
if (pkt) {
- con_log(CL_ANN1, (CE_NOTE,
- "mr_sas_tbolt_process_outstanding_cmd:"
- "mlist_del_init(&cmd->list).\n"));
+ con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
+ "tbolt_process_outstanding_cmd:"
+ "mlist_del_init(&cmd->list)."));
mlist_del_init(&cmd->list);
}
}
mutex_exit(&instance->cmd_pend_mtx);
tbolt_complete_cmd(instance, cmd);
}
- // set it back to all 0xfffffffff.
- desc->Words = (uint64_t)~0;
+ /* set it back to all 1s. */
+ desc->Words = -1LL;
instance->reply_read_index++;
if (instance->reply_read_index >= (instance->reply_q_depth)) {
con_log(CL_ANN1, (CE_NOTE, "wrap around"));
@@ -2795,12 +2804,12 @@
replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
d_val.word = desc->Words;
con_log(CL_ANN1, (CE_NOTE,
- "Next Reply Desc = %llx Words = %llx\n",
- desc, desc->Words));
+ "Next Reply Desc = %p Words = %" PRIx64,
+ (void *)desc, desc->Words));
replyType = replyDesc->ReplyFlags &
MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
@@ -2888,18 +2897,17 @@
ci = instance->ld_map[(instance->map_id & 1)];
ci_h = instance->ld_map_phy[(instance->map_id & 1)];
if (!ci) {
- cmn_err(CE_WARN,
- "Failed to alloc mem for ld_map_info");
+ cmn_err(CE_WARN, "Failed to alloc mem for ld_map_info");
return_raid_msg_pkt(instance, cmd);
return (-1);
}
- memset(ci, 0, sizeof (*ci));
- memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
+ bzero(ci, sizeof (*ci));
+ bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
dcmd->cmd = MFI_CMD_OP_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ;
@@ -2913,15 +2921,13 @@
mr_sas_tbolt_build_mfi_cmd(instance, cmd);
if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
ret = 0;
- con_log(CL_ANN1, (CE_NOTE,
- "Get LD Map Info success\n"));
+ con_log(CL_ANN1, (CE_NOTE, "Get LD Map Info success"));
} else {
- cmn_err(CE_WARN,
- "Get LD Map Info failed\n");
+ cmn_err(CE_WARN, "Get LD Map Info failed");
ret = -1;
}
return_raid_msg_pkt(instance, cmd);
@@ -2943,40 +2949,44 @@
"i=%d, %x:%x",
i, d_val.u1.high, d_val.u1.low));
}
}
-/**
+/*
* mrsas_tbolt_command_create - Create command for fast path.
* @io_info: MegaRAID IO request packet pointer.
* @ref_tag: Reference tag for RD/WRPROTECT
*
* Create the command for fast path.
*/
void
-mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],struct IO_REQUEST_INFO *io_info,Mpi2RaidSCSIIORequest_t *scsi_io_request, U32 ref_tag)
+mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],
+ struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request,
+ U32 ref_tag)
{
uint16_t EEDPFlags;
uint32_t Control;
- // Prepare 32-byte CDB if DIF is supported on this device
- con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB\n"));
+ ddi_acc_handle_t acc_handle =
+ instance->mpi2_frame_pool_dma_obj.acc_handle;
- memset(cdb, 0, 32);
+ /* Prepare 32-byte CDB if DIF is supported on this device */
+ con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB"));
+ bzero(cdb, 32);
+
cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
- if (io_info->isRead) {
+ if (io_info->isRead)
cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
- }
- else {
+ else
cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
- }
- cdb[10] = MRSAS_RD_WR_PROTECT; // Verify with in linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL
+ /* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
+ cdb[10] = MRSAS_RD_WR_PROTECT;
/* LOGICAL BLOCK ADDRESS */
cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
@@ -2985,64 +2995,59 @@
cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
cdb[19] = (U8)((io_info->pdBlock) & 0xff);
/* Logical block reference tag */
- ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
- BIG_ENDIAN(ref_tag));
+ ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
+ BE_32(ref_tag));
- ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask,
- 0xffff);
+ ddi_put16(acc_handle,
+ &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff);
- ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_io_request->DataLength,
+ ddi_put32(acc_handle, &scsi_io_request->DataLength,
((io_info->numBlocks)*512));
- ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_io_request->IoFlags,32); /* Specify 32-byte cdb */
+ /* Specify 32-byte cdb */
+ ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32);
/* Transfer length */
cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
cdb[31] = (U8)((io_info->numBlocks) & 0xff);
/* set SCSI IO EEDPFlags */
- EEDPFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_io_request->EEDPFlags);
- Control = ddi_get32(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_io_request->Control);
+ EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags);
+ Control = ddi_get32(acc_handle, &scsi_io_request->Control);
- // set SCSI IO EEDPFlags bits
+ /* set SCSI IO EEDPFlags bits */
if (io_info->isRead) {
- // For READ commands, the EEDPFlags shall be set to specify to
- // Increment the Primary Reference Tag, to Check the Reference
- // Tag, and to Check and Remove the Protection Information
- // fields.
+ /*
+ * For READ commands, the EEDPFlags shall be set to specify to
+ * Increment the Primary Reference Tag, to Check the Reference
+ * Tag, and to Check and Remove the Protection Information
+ * fields.
+ */
EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
- }
- else {
- // For WRITE commands, the EEDPFlags shall be set to specify to
- // Increment the Primary Reference Tag, and to Insert
- // Protection Information fields.
+ } else {
+ /*
+ * For WRITE commands, the EEDPFlags shall be set to specify to
+ * Increment the Primary Reference Tag, and to Insert
+ * Protection Information fields.
+ */
EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
}
Control |= (0x4 << 26);
- ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_io_request->EEDPFlags, EEDPFlags);
- ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_io_request->Control, Control);
- ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
- &scsi_io_request->EEDPBlockSize,
- MRSAS_EEDPBLOCKSIZE);
+ ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags);
+ ddi_put32(acc_handle, &scsi_io_request->Control, Control);
+ ddi_put32(acc_handle,
+ &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE);
}
/*
* mrsas_tbolt_set_pd_lba - Sets PD LBA
@@ -3050,34 +3055,37 @@
* @cdb_len: cdb length
* @start_blk: Start block of IO
*
* Used to set the PD LBA in CDB for FP IOs
*/
-void
-mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk, U32 num_blocks, U8 DifCapable)
+static void
+mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk,
+ U32 num_blocks)
{
U8 cdb_len = *cdb_len_ptr;
U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
/* Some drives don't support 16/12 byte CDB's, convert to 10 */
if (((cdb_len == 12) || (cdb_len == 16)) &&
(start_blk <= 0xffffffff)) {
if (cdb_len == 16) {
- con_log(CL_ANN, (CE_NOTE, "Converting READ/WRITE(16) to READ10\n"));
+ con_log(CL_ANN,
+ (CE_NOTE, "Converting READ/WRITE(16) to READ10"));
opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
flagvals = cdb[1];
groupnum = cdb[14];
control = cdb[15];
} else {
- con_log(CL_ANN, (CE_NOTE, "Converting READ/WRITE(12) to READ10\n"));
+ con_log(CL_ANN,
+ (CE_NOTE, "Converting READ/WRITE(12) to READ10"));
opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
flagvals = cdb[1];
groupnum = cdb[10];
control = cdb[11];
}
- memset(cdb, 0, sizeof(cdb));
+ bzero(cdb, sizeof (cdb));
cdb[0] = opcode;
cdb[1] = flagvals;
cdb[6] = groupnum;
cdb[9] = control;
@@ -3085,11 +3093,12 @@
cdb[8] = (U8)(num_blocks & 0xff);
cdb[7] = (U8)((num_blocks >> 8) & 0xff);
cdb_len = 10;
} else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
/* Convert to 16 byte CDB for large LBA's */
- con_log(CL_ANN, (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB\n"));
+ con_log(CL_ANN,
+ (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB"));
switch (cdb_len) {
case 6:
opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
control = cdb[5];
break;
@@ -3105,11 +3114,11 @@
groupnum = cdb[10];
control = cdb[11];
break;
}
- memset(cdb, 0, sizeof(cdb));
+ bzero(cdb, sizeof (cdb));
cdb[0] = opcode;
cdb[1] = flagvals;
cdb[14] = groupnum;
cdb[15] = control;
@@ -3125,11 +3134,11 @@
} else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
/* convert to 10 byte CDB */
opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
control = cdb[5];
- memset(cdb, 0, sizeof(cdb));
+ bzero(cdb, sizeof (cdb));
cdb[0] = opcode;
cdb[9] = control;
/* Set transfer length */
cdb[8] = (U8)(num_blocks & 0xff);
@@ -3177,76 +3186,79 @@
*cdb_len_ptr = cdb_len;
}
-U8
+static int
mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
{
MR_FW_RAID_MAP_ALL *ld_map;
if (!mrsas_tbolt_get_ld_map_info(instance)) {
ld_map = instance->ld_map[(instance->map_id & 1)];
- con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d\n",
+ con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d",
ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
- if (MR_ValidateMapInfo(instance->ld_map[(instance->map_id & 1)], instance->load_balance_info)) {
- con_log(CL_ANN, (CE_CONT,
- "MR_ValidateMapInfo success"));
+ if (MR_ValidateMapInfo(instance->ld_map[
+ (instance->map_id & 1)], instance->load_balance_info)) {
+ con_log(CL_ANN,
+ (CE_CONT, "MR_ValidateMapInfo success"));
instance->fast_path_io = 1;
- con_log(CL_ANN, (CE_NOTE,
- "instance->fast_path_io %d \n",instance->fast_path_io));
+ con_log(CL_ANN,
+ (CE_NOTE, "instance->fast_path_io %d",
+ instance->fast_path_io));
return (DDI_SUCCESS);
}
}
instance->fast_path_io = 0;
cmn_err(CE_WARN, "MR_ValidateMapInfo failed");
con_log(CL_ANN, (CE_NOTE,
- "instance->fast_path_io %d \n",instance->fast_path_io));
+ "instance->fast_path_io %d", instance->fast_path_io));
-
return (DDI_FAILURE);
}
+
/*
* Marks HBA as bad. This will be called either when an
* IO packet times out even after 3 FW resets
* or FW is found to be fault even after 3 continuous resets.
*/
-int
+void
mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
{
- cmn_err(CE_WARN, "TBOLT Kill adapter called\n");
+ cmn_err(CE_NOTE, "TBOLT Kill adapter called");
if (instance->deadadapter == 1)
- return (DDI_FAILURE);
+ return;
con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
"Writing to doorbell with MFI_STOP_ADP "));
mutex_enter(&instance->ocr_flags_mtx);
instance->deadadapter = 1;
mutex_exit(&instance->ocr_flags_mtx);
instance->func_ptr->disable_intr(instance);
WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
/* Flush */
- RD_RESERVED0_REGISTER(instance);
+ (void) RD_RESERVED0_REGISTER(instance);
(void) mrsas_print_pending_cmds(instance);
- mrsas_complete_pending_cmds(instance);
- return (DDI_SUCCESS);
+ (void) mrsas_complete_pending_cmds(instance);
}
-void mrsas_reset_reply_desc(struct mrsas_instance *instance)
+
+void
+mrsas_reset_reply_desc(struct mrsas_instance *instance)
{
int i;
MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
- instance->reply_read_index= 0;
+ instance->reply_read_index = 0;
/* initializing reply address to 0xFFFFFFFF */
reply_desc = instance->reply_frame_pool;
for (i = 0; i < instance->reply_q_depth; i++) {
@@ -3256,21 +3268,19 @@
}
int
mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
{
- uint32_t status=0x00;
+ uint32_t status = 0x00;
uint32_t retry = 0;
- uint32_t seq_num;
uint32_t cur_abs_reg_val;
uint32_t fw_state;
- union mrsas_evt_class_locale class_locale;
uint32_t abs_state;
uint32_t i;
con_log(CL_ANN, (CE_NOTE,
- "mrsas_tbolt_reset_ppc entered\n "));
+ "mrsas_tbolt_reset_ppc entered"));
if (instance->deadadapter == 1) {
cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
"no more resets as HBA has been marked dead ");
return (DDI_FAILURE);
@@ -3282,16 +3292,16 @@
"adpterresetinprogress flag set, time %llx", gethrtime()));
mutex_exit(&instance->ocr_flags_mtx);
instance->func_ptr->disable_intr(instance);
- /*Add delay inorder to complete the ioctl & io cmds in-flight */
- for (i = 0; i<3000; i++) {
+ /* Add delay inorder to complete the ioctl & io cmds in-flight */
+ for (i = 0; i < 3000; i++) {
drv_usecwait(MILLISEC); /* wait for 1000 usecs */
}
- instance->reply_read_index= 0;
+ instance->reply_read_index = 0;
retry_reset:
con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
":Resetting TBOLT "));
@@ -3301,51 +3311,54 @@
WR_TBOLT_IB_WRITE_SEQ(2, instance);
WR_TBOLT_IB_WRITE_SEQ(7, instance);
WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
con_log(CL_ANN1, (CE_NOTE,
"mrsas_tbolt_reset_ppc: magic number written "
- "to write sequence register\n"));
+ "to write sequence register"));
delay(100 * drv_usectohz(MILLISEC));
status = RD_TBOLT_HOST_DIAG(instance);
con_log(CL_ANN1, (CE_NOTE,
"mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
- "to write sequence register\n"));
+ "to write sequence register"));
while (status & DIAG_TBOLT_RESET_ADAPTER) {
delay(100 * drv_usectohz(MILLISEC));
status = RD_TBOLT_HOST_DIAG(instance);
if (retry++ == 100) {
cmn_err(CE_WARN,
"mrsas_tbolt_reset_ppc:"
"resetadapter bit is set already "
- "check retry count %d\n", retry);
+ "check retry count %d", retry);
return (DDI_FAILURE);
}
}
WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
delay(100 * drv_usectohz(MILLISEC));
ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
(uint8_t *)((uintptr_t)(instance)->regmap +
- RESET_TBOLT_STATUS_OFF),4,DDI_DEV_AUTOINCR);
+ RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
delay(100 * drv_usectohz(MILLISEC));
ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
(uint8_t *)((uintptr_t)(instance)->regmap +
- RESET_TBOLT_STATUS_OFF),4,DDI_DEV_AUTOINCR);
+ RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
if (retry++ == 100) {
/* Dont call kill adapter here */
/* RESET BIT ADAPTER is cleared by firmare */
- //mrsas_tbolt_kill_adapter(instance);
- cmn_err(CE_WARN, "mr_sas %d: %s(): RESET FAILED; return failure!!!", instance->instance, __func__);
+ /* mrsas_tbolt_kill_adapter(instance); */
+ cmn_err(CE_WARN,
+ "mr_sas %d: %s(): RESET FAILED; return failure!!!",
+ instance->instance, __func__);
return (DDI_FAILURE);
}
}
- con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
+ con_log(CL_ANN,
+ (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
"Calling mfi_state_transition_to_ready"));
abs_state = instance->func_ptr->read_fw_status_reg(instance);
retry = 0;
@@ -3352,12 +3365,13 @@
while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
delay(100 * drv_usectohz(MILLISEC));
abs_state = instance->func_ptr->read_fw_status_reg(instance);
}
if (abs_state <= MFI_STATE_FW_INIT) {
- cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
- "state = 0x%x, RETRY RESET.\n", abs_state);
+ cmn_err(CE_WARN,
+ "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
+ "state = 0x%x, RETRY RESET.", abs_state);
goto retry_reset;
}
/* Mark HBA as bad, if FW is fault after 3 continuous resets */
if (mfi_state_transition_to_ready(instance) ||
@@ -3370,15 +3384,16 @@
"mrsas_tbolt_reset_ppc :before fake: FW is not ready "
"FW state = 0x%x", fw_state));
if (debug_tbolt_fw_faults_after_ocr_g == 1)
fw_state = MFI_STATE_FAULT;
- con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc : FW is not ready "
+ con_log(CL_ANN,
+ (CE_NOTE, "mrsas_tbolt_reset_ppc : FW is not ready "
"FW state = 0x%x", fw_state));
if (fw_state == MFI_STATE_FAULT) {
- // increment the count
+ /* increment the count */
instance->fw_fault_count_after_ocr++;
if (instance->fw_fault_count_after_ocr
< MAX_FW_RESET_COUNT) {
cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
"FW is in fault after OCR count %d "
@@ -3388,66 +3403,67 @@
} else {
cmn_err(CE_WARN, "mrsas %d: %s:"
"Max Reset Count exceeded >%d"
"Mark HBA as bad, KILL adapter",
- instance->instance, __func__, MAX_FW_RESET_COUNT);
+ instance->instance, __func__,
+ MAX_FW_RESET_COUNT);
mrsas_tbolt_kill_adapter(instance);
return (DDI_FAILURE);
}
}
}
- // reset the counter as FW is up after OCR
+ /* reset the counter as FW is up after OCR */
instance->fw_fault_count_after_ocr = 0;
mrsas_reset_reply_desc(instance);
con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
"Calling mrsas_issue_init_mpi2"));
abs_state = mrsas_issue_init_mpi2(instance);
- if(abs_state == DDI_FAILURE) {
+ if (abs_state == (uint32_t)DDI_FAILURE) {
cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
"INIT failed Retrying Reset");
goto retry_reset;
}
con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
"mrsas_issue_init_mpi2 Done"));
con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
- "Calling mrsas_print_pending_cmd\n"));
- mrsas_print_pending_cmds(instance);
+ "Calling mrsas_print_pending_cmd"));
+ (void) mrsas_print_pending_cmds(instance);
con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
- "mrsas_print_pending_cmd done\n"));
+ "mrsas_print_pending_cmd done"));
instance->func_ptr->enable_intr(instance);
instance->fw_outstanding = 0;
con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
"Calling mrsas_issue_pending_cmds"));
- mrsas_issue_pending_cmds(instance);
+ (void) mrsas_issue_pending_cmds(instance);
con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
- "issue_pending_cmds done.\n"));
+ "issue_pending_cmds done."));
con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
"Calling aen registration"));
instance->aen_cmd->retry_count_for_ocr = 0;
instance->aen_cmd->drv_pkt_time = 0;
instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
- con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n"));
+ con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag."));
mutex_enter(&instance->ocr_flags_mtx);
instance->adapterresetinprogress = 0;
mutex_exit(&instance->ocr_flags_mtx);
con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
"adpterresetinprogress flag unset"));
- con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done\n"));
+ con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done"));
return (DDI_SUCCESS);
}
@@ -3458,11 +3474,11 @@
* Issues an internal command (DCMD) to get the FW's controller PD
* list structure. This information is mainly used to find out SYSTEM
* supported by the FW.
*/
-int
+static int
mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
{
int ret = 0, i;
struct mrsas_cmd *cmd = NULL;
struct mrsas_dcmd_frame *dcmd;
@@ -3475,17 +3491,17 @@
uint32_t size_map_info;
cmd = get_raid_msg_pkt(instance);
if (cmd == NULL) {
- cmn_err(CE_WARN,
- "Failed to get a cmd from free-pool in mrsas_tbolt_sync_map_info(). ");
+ cmn_err(CE_WARN, "Failed to get a cmd from free-pool in "
+ "mrsas_tbolt_sync_map_info(). ");
return (DDI_FAILURE);
}
/* Clear the frame buffer and assign back the context id */
- (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
+ bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
cmd->index);
bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
@@ -3495,49 +3511,50 @@
dcmd = &cmd->frame->dcmd;
size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
- con_log(CL_ANN, (CE_NOTE,
- "size_sync_info =0x%x ; ld count = 0x%x \n ",
+ con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x",
size_sync_info, num_lds));
ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
- memset(ci, 0, sizeof(MR_FW_RAID_MAP_ALL));
+ bzero(ci, sizeof (MR_FW_RAID_MAP_ALL));
ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
- (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
+ bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
ld_sync = (LD_TARGET_SYNC *)ci;
for (i = 0; i < num_lds; i++, ld_sync++) {
raid = MR_LdRaidGet(i, map);
- con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x\n",
+ con_log(CL_ANN1,
+ (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
i, raid->seqNum, raid->flags.ldSyncRequired));
ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
- con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x \n",
+ con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x",
i, ld_sync->ldTargetId));
ld_sync->seqNum = raid->seqNum;
}
- size_map_info = sizeof(MR_FW_RAID_MAP) +
- (sizeof(MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
+ size_map_info = sizeof (MR_FW_RAID_MAP) +
+ (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
dcmd->cmd = MFI_CMD_OP_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_WRITE;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = size_map_info;
- dcmd->mbox.b[0] = num_lds;
+ ASSERT(num_lds <= 255);
+ dcmd->mbox.b[0] = (U8)num_lds;
dcmd->mbox.b[1] = 1; /* Pend */
dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
dcmd->sgl.sge32[0].phys_addr = ci_h;
dcmd->sgl.sge32[0].length = size_map_info;
@@ -3546,11 +3563,11 @@
mr_sas_tbolt_build_mfi_cmd(instance, cmd);
instance->func_ptr->issue_cmd(cmd, instance);
instance->unroll.syncCmd = 1;
- con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x",cmd->SMID));
+ con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID));
return (ret);
}
/*
@@ -3573,11 +3590,11 @@
cmn_err(CE_WARN,
"Failed to get a cmd from free-pool abort_syncmap_cmd().");
return (DDI_FAILURE);
}
/* Clear the frame buffer and assign back the context id */
- (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
+ bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
cmd->index);
abort_fr = &cmd->frame->abort;
@@ -3621,13 +3638,11 @@
{
struct scsi_device *sd;
dev_info_t *child;
int rval, dtype;
struct mrsas_tbolt_pd_info *pds = NULL;
- uint64_t *wwn;
-
con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
tgt, lun));
if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
if (ldip) {
@@ -3636,11 +3651,11 @@
if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
rval = mrsas_service_evt(instance, tgt, 1,
MRSAS_EVT_UNCONFIG_TGT, NULL);
con_log(CL_ANN1, (CE_WARN,
"mr_sas:DELETING STALE ENTRY rval = %d "
- "tgt id = %d ", rval, tgt));
+ "tgt id = %d", rval, tgt));
return (NDI_FAILURE);
}
return (NDI_SUCCESS);
}
@@ -3647,14 +3662,15 @@
pds = (struct mrsas_tbolt_pd_info *)
kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
mrsas_tbolt_get_pd_info(instance, pds, tgt);
dtype = pds->scsiDevType;
- /* Check for Disk*/
+ /* Check for Disk */
if ((dtype == DTYPE_DIRECT)) {
if ((dtype == DTYPE_DIRECT) &&
(LE_16(pds->fwState) != PD_SYSTEM)) {
+ kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
return (NDI_FAILURE);
}
sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
sd->sd_address.a_hba_tran = instance->tran;
sd->sd_address.a_target = (uint16_t)tgt;
@@ -3665,12 +3681,12 @@
con_log(CL_DLEVEL1, (CE_NOTE,
"Phys. device found: tgt %d dtype %d: %s",
tgt, dtype, sd->sd_inq->inq_vid));
} else {
rval = NDI_FAILURE;
- con_log(CL_DLEVEL1, (CE_NOTE,
- "Phys. device Not found scsi_hba_probe Failed: tgt %d dtype %d: %s",
+ con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found "
+ "scsi_hba_probe Failed: tgt %d dtype %d: %s",
tgt, dtype, sd->sd_inq->inq_vid));
}
/* sd_unprobe is blank now. Free buffer manually */
if (sd->sd_inq) {
@@ -3689,27 +3705,29 @@
kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
rval));
return (rval);
}
+
static void
-mrsas_tbolt_get_pd_info(struct mrsas_instance *instance, struct mrsas_tbolt_pd_info *pds,
- int tgt)
+mrsas_tbolt_get_pd_info(struct mrsas_instance *instance,
+ struct mrsas_tbolt_pd_info *pds, int tgt)
{
struct mrsas_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
dma_obj_t dcmd_dma_obj;
cmd = get_raid_msg_pkt(instance);
if (!cmd) {
- con_log(CL_ANN1, (CE_WARN, "Failed to get a cmd for get pd info"));
+ con_log(CL_ANN1,
+ (CE_WARN, "Failed to get a cmd for get pd info"));
return;
}
/* Clear the frame buffer and assign back the context id */
- memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
+ bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
cmd->index);
dcmd = &cmd->frame->dcmd;
@@ -3720,16 +3738,17 @@
dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
dcmd_dma_obj.dma_attr.dma_attr_align = 1;
(void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
DDI_STRUCTURE_LE_ACC);
- (void) memset(dcmd_dma_obj.buffer, 0, sizeof (struct mrsas_tbolt_pd_info));
- (void) memset(dcmd->mbox.b, 0, 12);
+ bzero(dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info));
+ bzero(dcmd->mbox.b, 12);
ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
- ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, MFI_FRAME_DIR_READ);
+ ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
+ MFI_FRAME_DIR_READ);
ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
sizeof (struct mrsas_tbolt_pd_info));
ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
MR_DCMD_PD_GET_INFO);