1 /*
2 * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3 * i.e. Thunderbolt and Invader
4 *
5 * Solaris MegaRAID device driver for SAS2.0 controllers
6 * Copyright (c) 2008-2012, LSI Logic Corporation.
7 * All rights reserved.
8 *
9 * Version:
10 * Author:
11 * Swaminathan K S
12 * Arun Chandrashekhar
13 * Manju R
14 * Rasheed
15 * Shakeel Bukhari
16 */
17
18
19 #include <stddef.h>
20 #include <sys/types.h>
21 #include <sys/file.h>
22 #include <sys/atomic.h>
23 #include <sys/scsi/scsi.h>
24 #include <sys/byteorder.h>
25 #include "ld_pd_map.h"
26 #include "mr_sas.h"
27 #include "fusion.h"
28
29
30 // Pre-TB command size and TB command size.
31 #define MR_COMMAND_SIZE (64*20) // 1280 bytes
32 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
33 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
34 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
35 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *in_info);
36 extern ddi_dma_attr_t mrsas_generic_dma_attr;
37 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
38 extern struct ddi_device_acc_attr endian_attr;
39 extern int debug_level_g;
187 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
188 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
189
190 if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
191 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
192 cmn_err(CE_WARN,
193 "mr_sas: could not alloc mpi2 frame pool");
194 return (DDI_FAILURE);
195 }
196
197 bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
198 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
199
200 instance->io_request_frames =
201 (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
202 instance->io_request_frames_phy =
203 (uint32_t)
204 instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
205
206 con_log(CL_DLEVEL3, (CE_NOTE,
207 "io_request_frames 0x%x",
208 instance->io_request_frames));
209
210 con_log(CL_DLEVEL3, (CE_NOTE,
211 "io_request_frames_phy 0x%x",
212 instance->io_request_frames_phy));
213
214 io_req_base = (uint8_t *)instance->io_request_frames +
215 MRSAS_THUNDERBOLT_MSG_SIZE;
216 io_req_base_phys = instance->io_request_frames_phy +
217 MRSAS_THUNDERBOLT_MSG_SIZE;
218
219 con_log(CL_DLEVEL3, (CE_NOTE,
220 "io req_base_phys 0x%x", io_req_base_phys));
221
222 for (i = 0; i < max_cmd; i++) {
223 cmd = instance->cmd_list[i];
224
225 offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
226
227 cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
233 (max_cmd * raid_msg_size) + i * sgl_sz);
234
235 cmd->sgl_phys_addr =
236 (io_req_base_phys +
237 (max_cmd * raid_msg_size) + i * sgl_sz);
238
239 cmd->sense1 = (uint8_t *)
240 ((uint8_t *)io_req_base +
241 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
242 (i * SENSE_LENGTH));
243
244 cmd->sense_phys_addr1 =
245 (io_req_base_phys +
246 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
247 (i * SENSE_LENGTH));
248
249
250 cmd->SMID = i+1;
251
252 con_log(CL_DLEVEL3, (CE_NOTE,
253 "Frame Pool Addr [%x]0x%x",
254 cmd->index, cmd->scsi_io_request));
255
256 con_log(CL_DLEVEL3, (CE_NOTE,
257 "Frame Pool Phys Addr [%x]0x%x",
258 cmd->index, cmd->scsi_io_request_phys_addr));
259
260 con_log(CL_DLEVEL3, (CE_NOTE,
261 "Sense Addr [%x]0x%x",
262 cmd->index, cmd->sense1));
263
264 con_log(CL_DLEVEL3, (CE_NOTE,
265 "Sense Addr Phys [%x]0x%x",
266 cmd->index, cmd->sense_phys_addr1));
267
268
269 con_log(CL_DLEVEL3, (CE_NOTE,
270 "Sgl bufffers [%x]0x%x",
271 cmd->index, cmd->sgl));
272
273 con_log(CL_DLEVEL3, (CE_NOTE,
274 "Sgl bufffers phys [%x]0x%x",
275 cmd->index, cmd->sgl_phys_addr));
276 }
277
278 return (DDI_SUCCESS);
279
280 }
281
282
283 /*
284 * alloc_additional_dma_buffer for AEN
285 */
286 int
287 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
288 {
289 uint32_t internal_buf_size = PAGESIZE*2;
290 int i;
416 * Allocate Request and Reply Queue Descriptors.
417 */
418 int
419 alloc_req_rep_desc(struct mrsas_instance *instance)
420 {
421 uint32_t request_q_sz, reply_q_sz;
422 int i, max_request_q_sz, max_reply_q_sz;
423 uint64_t request_desc;
424 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
425 uint64_t *reply_ptr;
426
427 /*
428 * ThunderBolt(TB) There's no longer producer consumer mechanism.
429 * Once we have an interrupt we are supposed to scan through the list of
430 * reply descriptors and process them accordingly. We would be needing
431 * to allocate memory for 1024 reply descriptors
432 */
433
434 /* Allocate Reply Descriptors */
435 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x\n",
436 sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
437
438 // reply queue size should be multiple of 16
439 max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
440
441 reply_q_sz = 8 * max_reply_q_sz;
442
443
444 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x\n",
445 sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
446
447 instance->reply_desc_dma_obj.size = reply_q_sz;
448 instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
449 instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
450 instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
451 instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
452 instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
453
454 if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
455 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
456 cmn_err(CE_WARN,
457 "mr_sas: could not alloc reply queue");
458 return (DDI_FAILURE);
459 }
460
461 bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
462 instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
463
464 // virtual address of reply queue
465 instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
466 instance->reply_desc_dma_obj.buffer);
467
468 instance->reply_q_depth = max_reply_q_sz;
469
470 con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
471 instance->reply_q_depth));
472
473 con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%x",
474 instance->reply_frame_pool));
475
476 /* initializing reply address to 0xFFFFFFFF */
477 reply_desc = instance->reply_frame_pool;
478
479 for (i = 0; i < instance->reply_q_depth; i++) {
480 reply_desc->Words = (uint64_t)~0;
481 reply_desc++;
482 }
483
484
485 instance->reply_frame_pool_phy =
486 (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
487
488 con_log(CL_ANN1, (CE_NOTE,
489 "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
490
491
492 instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
493 reply_q_sz);
494
495 con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
496 instance->reply_pool_limit_phy));
497
498
499 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x\n",
500 sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
501
502 /* Allocate Request Descriptors */
503 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x\n",
504 sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
505
506 request_q_sz = 8 *
507 (instance->max_fw_cmds);
508
509 instance->request_desc_dma_obj.size = request_q_sz;
510 instance->request_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
511 instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
512 instance->request_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
513 instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
514 instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
515
516 if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
517 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
518 cmn_err(CE_WARN,
519 "mr_sas: could not alloc request queue desc");
520 goto fail_undo_reply_queue;
521 }
522
523 bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
524 instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
889 int numbytes, i;
890 int ret = DDI_SUCCESS;
891 uint16_t flags;
892 int status;
893 timespec_t time;
894 uint64_t mSec;
895 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
896 struct mrsas_init_frame2 *mfiFrameInit2;
897 struct mrsas_header *frame_hdr;
898 Mpi2IOCInitRequest_t *init;
899 struct mrsas_cmd *cmd = NULL;
900 struct mrsas_drv_ver drv_ver_info;
901 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
902
903
904 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
905
906
907 #ifdef DEBUG
908 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
909 sizeof (*mfiFrameInit2)));
910 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", sizeof (*init)));
911 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
912 sizeof (struct mrsas_init_frame2)));
913 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
914 sizeof (Mpi2IOCInitRequest_t)));
915 #endif
916
917 init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
918 numbytes = sizeof (*init);
919 bzero(init, numbytes);
920
921 ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
922 MPI2_FUNCTION_IOC_INIT);
923
924 ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
925 MPI2_WHOINIT_HOST_DRIVER);
926
927 /* set MsgVersion and HeaderVersion host driver was built with */
928 ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
929 MPI2_VERSION);
930
931 ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
932 MPI2_HEADER_VERSION);
933
934 ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
952 ddi_put64(mpi2_dma_obj->acc_handle,
953 (uint64_t *)&init->SystemRequestFrameBaseAddress,
954 instance->io_request_frames_phy);
955
956 ddi_put64(mpi2_dma_obj->acc_handle,
957 &init->ReplyDescriptorPostQueueAddress,
958 instance->reply_frame_pool_phy);
959
960 ddi_put64(mpi2_dma_obj->acc_handle,
961 &init->ReplyFreeQueueAddress, 0);
962
963 cmd = instance->cmd_list[0];
964 if (cmd == NULL) {
965 return (DDI_FAILURE);
966 }
967 cmd->retry_count_for_ocr = 0;
968 cmd->pkt = NULL;
969 cmd->drv_pkt_time = 0;
970
971 mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
972 con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%x", mfiFrameInit2));
973
974 frame_hdr = &cmd->frame->hdr;
975
976 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
977 MFI_CMD_STATUS_POLL_MODE);
978
979 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
980
981 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
982
983 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
984
985 con_log(CL_ANN, (CE_CONT,
986 "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
987
988 // Init the MFI Header
989 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
990 &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
991
992 con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
993
994 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
995 &mfiFrameInit2->cmd_status,
996 MFI_STAT_INVALID_STATUS);
997
998 con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
999
1000 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1001 &mfiFrameInit2->queue_info_new_phys_addr_lo,
1002 mpi2_dma_obj->dma_cookie[0].dmac_address);
1003
1004 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1005 &mfiFrameInit2->data_xfer_len,
1006 sizeof (Mpi2IOCInitRequest_t));
1007
1008 con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1009 init->ReplyDescriptorPostQueueAddress));
1010
1011 /* fill driver version information*/
1012 fill_up_drv_ver(&drv_ver_info);
1013
1014 /* allocate the driver version data transfer buffer */
1015 instance->drv_ver_dma_obj.size = sizeof(drv_ver_info.drv_ver);
1016 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1017 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1018 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1019 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1020 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1021
1022 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1023 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1024 cmn_err(CE_WARN,
1025 "fusion init: Could not allocate driver version buffer.");
1026 return (DDI_FAILURE);
1027 }
1028 /* copy driver version to dma buffer*/
1029 (void) memset(instance->drv_ver_dma_obj.buffer, 0,sizeof(drv_ver_info.drv_ver));
1030 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1031 (uint8_t *)drv_ver_info.drv_ver,
1032 (uint8_t *)instance->drv_ver_dma_obj.buffer,
1033 sizeof(drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1034
1035 /*send driver version physical address to firmware*/
1036 ddi_put64(cmd->frame_dma_obj.acc_handle,
1037 &mfiFrameInit2->driverversion, instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1038
1039 con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1040 mfiFrameInit2->queue_info_new_phys_addr_lo,
1041 sizeof (Mpi2IOCInitRequest_t)));
1042
1043 con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1044
1045 con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1046 cmd->scsi_io_request_phys_addr, sizeof (struct mrsas_init_frame2)));
1047
1048 /* disable interrupts before sending INIT2 frame */
1049 instance->func_ptr->disable_intr(instance);
1050
1051 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
1052 instance->request_message_pool;
1053 req_desc->Words = cmd->scsi_io_request_phys_addr;
1054 req_desc->MFAIo.RequestFlags =
1055 (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1056
1057 cmd->request_desc = req_desc;
1058
1059 /* issue the init frame */
1060 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1061
1062 con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1063 con_log(CL_ANN1, (CE_CONT, "[cmd Status= %x] ",
1064 frame_hdr->cmd_status));
1065
1066 if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1167 }
1168
1169 if (cmd == NULL) {
1170 return (TRAN_BUSY);
1171 }
1172
1173
1174 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1175 if (instance->fw_outstanding > instance->max_fw_cmds) {
1176 cmn_err(CE_WARN,
1177 "Command Queue Full... Returning BUSY \n");
1178 return_raid_msg_pkt(instance, cmd);
1179 return (TRAN_BUSY);
1180 }
1181
1182 /* Synchronize the Cmd frame for the controller */
1183 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1184 DDI_DMA_SYNC_FORDEV);
1185
1186 con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1187 "cmd->index:0x%x SMID %0x%x\n", pkt->pkt_cdbp[0], cmd->index, cmd->SMID));
1188
1189 instance->func_ptr->issue_cmd(cmd, instance);
1190
1191 return (TRAN_ACCEPT);
1192
1193 } else {
1194 instance->func_ptr->issue_cmd(cmd, instance);
1195 (void) wait_for_outstanding_poll_io(instance);
1196 return (TRAN_ACCEPT);
1197 }
1198 }
1199
1200 /*
1201 * prepare the pkt:
1202 * the pkt may have been resubmitted or just reused so
1203 * initialize some fields and do some checks.
1204 */
1205 int
1206 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1207 {
1299 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1300 &scsi_raid_io_sgl_ieee->Address,
1301 acmd->cmd_dmacookies[i].dmac_laddress);
1302
1303 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1304 &scsi_raid_io_sgl_ieee->Length,
1305 acmd->cmd_dmacookies[i].dmac_size);
1306
1307 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1308 &scsi_raid_io_sgl_ieee->Flags, 0);
1309
1310 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1311 if (i == (numElements - 1))
1312 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1313 &scsi_raid_io_sgl_ieee->Flags, IEEE_SGE_FLAGS_END_OF_LIST);
1314 }
1315
1316 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1317
1318 #ifdef DEBUG
1319 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]:%llx",
1320 scsi_raid_io_sgl_ieee->Address));
1321 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1322 scsi_raid_io_sgl_ieee->Length));
1323 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1324 scsi_raid_io_sgl_ieee->Flags));
1325 #endif
1326
1327 }
1328
1329 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1330 &scsi_raid_io->ChainOffset, 0);
1331
1332 /* check if chained SGL required */
1333 if (i < numElements) {
1334
1335 con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1336
1337 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1338 uint16_t ioFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1339 &scsi_raid_io->IoFlags);
1389 &scsi_raid_io_sgl_ieee->Address,
1390 acmd->cmd_dmacookies[i].dmac_laddress);
1391
1392 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1393 &scsi_raid_io_sgl_ieee->Length,
1394 acmd->cmd_dmacookies[i].dmac_size);
1395
1396 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1397 &scsi_raid_io_sgl_ieee->Flags, 0);
1398
1399 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1400 if (i == (numElements - 1))
1401 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1402 &scsi_raid_io_sgl_ieee->Flags, IEEE_SGE_FLAGS_END_OF_LIST);
1403 }
1404
1405 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1406
1407 #if DEBUG
1408 con_log(CL_DLEVEL1, (CE_NOTE,
1409 "[SGL Address]:%llx",
1410 scsi_raid_io_sgl_ieee->Address));
1411 con_log(CL_DLEVEL1, (CE_NOTE,
1412 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1413 con_log(CL_DLEVEL1, (CE_NOTE,
1414 "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1415 #endif
1416
1417 i++;
1418 }
1419 }
1420
1421 return (0);
1422 } /*end of BuildScatterGather */
1423
1424
1425 /*
1426 * build_cmd
1427 */
1428 struct mrsas_cmd *
1429 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1969 }
1970
1971
1972 uint32_t
1973 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1974 {
1975 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1976 }
1977
1978 void
1979 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1980 {
1981 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1982 atomic_add_16(&instance->fw_outstanding, 1);
1983
1984 struct scsi_pkt *pkt;
1985
1986 con_log(CL_ANN1, (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1987
1988 con_log(CL_DLEVEL1, (CE_CONT,
1989 " [req desc Words] %llx \n", req_desc->Words));
1990 con_log(CL_DLEVEL1, (CE_CONT,
1991 " [req desc low part] %x \n", req_desc->Words));
1992 con_log(CL_DLEVEL1, (CE_CONT,
1993 " [req desc high part] %x \n", (req_desc->Words >> 32)));
1994 pkt = cmd->pkt;
1995
1996 if (pkt) {
1997 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1998 "ISSUED CMD TO FW : called : cmd:"
1999 ": %p instance : %p pkt : %p pkt_time : %x\n",
2000 gethrtime(), (void *)cmd, (void *)instance,
2001 (void *)pkt, cmd->drv_pkt_time));
2002 if (instance->adapterresetinprogress) {
2003 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2004 con_log(CL_ANN, (CE_NOTE,
2005 "TBOLT Reset the scsi_pkt timer"));
2006 } else {
2007 push_pending_mfi_pkt(instance, cmd);
2008 }
2009
2010 } else {
2011 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
2012 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
2013 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
2040 if (instance->adapterresetinprogress) {
2041 cmd->drv_pkt_time = ddi_get16
2042 (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2043 if (cmd->drv_pkt_time < debug_timeout_g)
2044 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2045 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
2046 "RESET-IN-PROGRESS, issue cmd & return.\n"));
2047
2048 mutex_enter(&instance->reg_write_mtx);
2049 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2050 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2051 mutex_exit(&instance->reg_write_mtx);
2052
2053 return (DDI_SUCCESS);
2054 } else {
2055 con_log(CL_ANN1, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: pushing the pkt\n"));
2056 push_pending_mfi_pkt(instance, cmd);
2057 }
2058
2059 con_log(CL_DLEVEL2, (CE_NOTE,
2060 "HighQport offset :%lx",
2061 (uint32_t *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
2062 con_log(CL_DLEVEL2, (CE_NOTE,
2063 "LowQport offset :%lx",
2064 (uint32_t *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
2065
2066 cmd->sync_cmd = MRSAS_TRUE;
2067 cmd->cmd_status = ENODATA;
2068
2069
2070 mutex_enter(&instance->reg_write_mtx);
2071 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2072 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2073 mutex_exit(&instance->reg_write_mtx);
2074
2075 con_log(CL_ANN1, (CE_NOTE,
2076 " req desc high part %x \n", (req_desc->Words >> 32)));
2077 con_log(CL_ANN1, (CE_NOTE,
2078 " req desc low part %x \n", req_desc->Words));
2079
2080 mutex_enter(&instance->int_cmd_mtx);
2081 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
2082 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
2083 }
2084 mutex_exit(&instance->int_cmd_mtx);
2085
2086
2087 if (i < (msecs -1)) {
2088 return (DDI_SUCCESS);
2089 } else {
2090 return (DDI_FAILURE);
2091 }
2092 }
2093
2094 /*
2095 * issue_cmd_in_poll_mode
2096 */
2097 int
2098 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2099 struct mrsas_cmd *cmd)
2100 {
2101 int i;
2102 uint16_t flags;
2103 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2104 struct mrsas_header *frame_hdr;
2105
2106 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X", cmd->SMID));
2107
2108 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2109
2110 frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2111 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2112 MFI_CMD_STATUS_POLL_MODE);
2113 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2114 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2115 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2116
2117 con_log(CL_ANN1, (CE_NOTE,
2118 " req desc low part %x \n", req_desc->Words));
2119 con_log(CL_ANN1, (CE_NOTE,
2120 " req desc high part %x \n", (req_desc->Words >> 32)));
2121
2122 /* issue the frame using inbound queue port */
2123 mutex_enter(&instance->reg_write_mtx);
2124 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2125 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2126 mutex_exit(&instance->reg_write_mtx);
2127
2128 for (i = 0; i < msecs && (
2129 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2130 == MFI_CMD_STATUS_POLL_MODE); i++) {
2131 /* wait for cmd_status to change from 0xFF */
2132 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2133 }
2134
2135 if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2136 &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2137 con_log(CL_ANN1, (CE_NOTE,
2138 " cmd failed %x \n", (req_desc->Words)));
2139 return (DDI_FAILURE);
2140 }
2141
2142 return (DDI_SUCCESS);
2143 }
2144
2145 void
2146 tbolt_enable_intr(struct mrsas_instance *instance)
2147 {
2148 uint32_t mask;
2149
2150 /* TODO: For Thunderbolt/Invader also clear intr on enable */
2151 //writel(~0, ®s->outbound_intr_status);
2152 //readl(®s->outbound_intr_status);
2153
2154 WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2155
2156 /* dummy read to force PCI flush */
2157 mask = RD_OB_INTR_MASK(instance);
2158
2295 void
2296 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2297 struct mrsas_cmd *cmd)
2298 {
2299 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2300 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
2301 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
2302 uint32_t index;
2303
2304 if (!instance->tbolt) {
2305 con_log(CL_ANN, (CE_NOTE, "Not MFA enabled.\n"));
2306 return;
2307 }
2308
2309 index = cmd->index;
2310
2311 ReqDescUnion =
2312 mr_sas_get_request_descriptor(instance, index, cmd);
2313
2314 if (!ReqDescUnion) {
2315 con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]%x"));
2316 return;
2317 }
2318
2319 con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2320
2321 ReqDescUnion->Words = 0;
2322
2323 ReqDescUnion->SCSIIO.RequestFlags =
2324 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2325 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2326
2327 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2328
2329 cmd->request_desc = ReqDescUnion;
2330
2331 // get raid message frame pointer
2332 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2333
2334 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
2335 Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2353 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2354 &scsi_raid_io->SenseBufferLowAddress,
2355 cmd->sense_phys_addr1);
2356
2357
2358 scsi_raid_io_sgl_ieee =
2359 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2360
2361 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
2362 &scsi_raid_io_sgl_ieee->Address,
2363 (U64)cmd->frame_phys_addr);
2364
2365 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2366 &scsi_raid_io_sgl_ieee->Flags,
2367 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2368 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2369 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2370 &scsi_raid_io_sgl_ieee->Length, 1024); //MEGASAS_MAX_SZ_CHAIN_FRAME
2371
2372 con_log(CL_ANN1, (CE_NOTE,
2373 "[MFI CMD PHY ADDRESS]:%x",
2374 scsi_raid_io_sgl_ieee->Address));
2375 con_log(CL_ANN1, (CE_NOTE,
2376 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2377 con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2378 scsi_raid_io_sgl_ieee->Flags));
2379 }
2380
2381
2382 void
2383 tbolt_complete_cmd(struct mrsas_instance *instance,
2384 struct mrsas_cmd *cmd)
2385 {
2386 uint8_t status;
2387 uint8_t extStatus;
2388 uint8_t arm;
2389 struct scsa_cmd *acmd;
2390 struct scsi_pkt *pkt;
2391 struct scsi_arq_status *arqstat;
2392 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2393 LD_LOAD_BALANCE_INFO *lbinfo;
2613 }
2614 case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: // MFA command.
2615
2616 if (cmd->frame->dcmd.opcode
2617 == MR_DCMD_LD_MAP_GET_INFO &&
2618 cmd->frame->dcmd.mbox.b[1]
2619 == 1) {
2620
2621 mutex_enter(&instance->sync_map_mtx);
2622
2623 con_log(CL_ANN, (CE_NOTE,
2624 "LDMAP sync command SMID RECEIVED 0x%X",
2625 cmd->SMID));
2626 if (cmd->frame->hdr.cmd_status != 0) {
2627 cmn_err(CE_WARN,
2628 "map sync failed, status = 0x%x.\n",cmd->frame->hdr.cmd_status);
2629 }
2630 else {
2631 instance->map_id++;
2632 cmn_err(CE_NOTE,
2633 "map sync received, switched map_id to %ld \n",instance->map_id);
2634 }
2635
2636 if (MR_ValidateMapInfo(instance->ld_map[(instance->map_id & 1)], instance->load_balance_info))
2637 instance->fast_path_io = 1;
2638 else
2639 instance->fast_path_io = 0;
2640
2641 con_log(CL_ANN, (CE_NOTE,
2642 "instance->fast_path_io %d \n",instance->fast_path_io));
2643
2644 instance->unroll.syncCmd = 0;
2645
2646 if(instance->map_update_cmd == cmd) {
2647 return_raid_msg_pkt(instance, cmd);
2648 atomic_add_16(&instance->fw_outstanding, (-1));
2649 mrsas_tbolt_sync_map_info(instance);
2650 }
2651
2652 cmn_err(CE_NOTE, "LDMAP sync completed.\n");
2653 mutex_exit(&instance->sync_map_mtx);
2707
2708 struct mrsas_header *hdr;
2709 struct scsi_pkt *pkt;
2710
2711 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2712 0, 0, DDI_DMA_SYNC_FORDEV);
2713
2714 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2715 0, 0, DDI_DMA_SYNC_FORCPU);
2716
2717 desc = instance->reply_frame_pool;
2718 desc += instance->reply_read_index;
2719
2720 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2721 replyType = replyDesc->ReplyFlags &
2722 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2723
2724 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2725 return (DDI_INTR_UNCLAIMED);
2726
2727 con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %llx Words = %llx \n",
2728 desc, desc->Words));
2729
2730 d_val.word = desc->Words;
2731
2732
2733 /* Read Reply descriptor */
2734 while ((d_val.u1.low != 0xffffffff) &&
2735 (d_val.u1.high != 0xffffffff)) {
2736
2737 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2738 0, 0, DDI_DMA_SYNC_FORCPU);
2739
2740 smid = replyDesc->SMID;
2741
2742 if (!smid || smid > instance->max_fw_cmds + 1) {
2743 con_log(CL_ANN1, (CE_NOTE,
2744 "Reply Desc at Break = %llx Words = %llx \n",
2745 desc, desc->Words));
2746 break;
2747 }
2748
2749 cmd = instance->cmd_list[smid - 1];
2750 if(!cmd ) {
2751 con_log(CL_ANN1, (CE_NOTE,
2752 "mr_sas_tbolt_process_outstanding_cmd: Invalid command "
2753 " or Poll commad Received in completion path\n"));
2754 }
2755 else {
2756 mutex_enter(&instance->cmd_pend_mtx);
2757 if (cmd->sync_cmd == MRSAS_TRUE) {
2758 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2759 if (hdr) {
2760 con_log(CL_ANN1, (CE_NOTE,
2761 "mr_sas_tbolt_process_outstanding_cmd:"
2762 " mlist_del_init(&cmd->list).\n"));
2763 mlist_del_init(&cmd->list);
2764 }
2780 desc->Words = (uint64_t)~0;
2781
2782 instance->reply_read_index++;
2783
2784 if (instance->reply_read_index >= (instance->reply_q_depth)) {
2785 con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2786 instance->reply_read_index = 0;
2787 }
2788
2789 /* Get the next reply descriptor */
2790 if (!instance->reply_read_index)
2791 desc = instance->reply_frame_pool;
2792 else
2793 desc++;
2794
2795 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2796
2797 d_val.word = desc->Words;
2798
2799 con_log(CL_ANN1, (CE_NOTE,
2800 "Next Reply Desc = %llx Words = %llx\n",
2801 desc, desc->Words));
2802
2803 replyType = replyDesc->ReplyFlags &
2804 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2805
2806 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2807 break;
2808
2809 } /* End of while loop. */
2810
2811 /* update replyIndex to FW */
2812 WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2813
2814
2815 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2816 0, 0, DDI_DMA_SYNC_FORDEV);
2817
2818 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2819 0, 0, DDI_DMA_SYNC_FORCPU);
2820 return (DDI_INTR_CLAIMED);
|
1 /*
2 * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3 * i.e. Thunderbolt and Invader
4 *
5 * Solaris MegaRAID device driver for SAS2.0 controllers
6 * Copyright (c) 2008-2012, LSI Logic Corporation.
7 * All rights reserved.
8 *
9 * Version:
10 * Author:
11 * Swaminathan K S
12 * Arun Chandrashekhar
13 * Manju R
14 * Rasheed
15 * Shakeel Bukhari
16 */
17
18
19 #include <sys/types.h>
20 #include <sys/file.h>
21 #include <sys/atomic.h>
22 #include <sys/scsi/scsi.h>
23 #include <sys/byteorder.h>
24 #include "ld_pd_map.h"
25 #include "mr_sas.h"
26 #include "fusion.h"
27
28
29 // Pre-TB command size and TB command size.
30 #define MR_COMMAND_SIZE (64*20) // 1280 bytes
31 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
32 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
33 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
34 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *in_info);
35 extern ddi_dma_attr_t mrsas_generic_dma_attr;
36 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
37 extern struct ddi_device_acc_attr endian_attr;
38 extern int debug_level_g;
186 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
187 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
188
189 if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
190 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
191 cmn_err(CE_WARN,
192 "mr_sas: could not alloc mpi2 frame pool");
193 return (DDI_FAILURE);
194 }
195
196 bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
197 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
198
199 instance->io_request_frames =
200 (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
201 instance->io_request_frames_phy =
202 (uint32_t)
203 instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
204
205 con_log(CL_DLEVEL3, (CE_NOTE,
206 "io_request_frames 0x%p",
207 instance->io_request_frames));
208
209 con_log(CL_DLEVEL3, (CE_NOTE,
210 "io_request_frames_phy 0x%x",
211 instance->io_request_frames_phy));
212
213 io_req_base = (uint8_t *)instance->io_request_frames +
214 MRSAS_THUNDERBOLT_MSG_SIZE;
215 io_req_base_phys = instance->io_request_frames_phy +
216 MRSAS_THUNDERBOLT_MSG_SIZE;
217
218 con_log(CL_DLEVEL3, (CE_NOTE,
219 "io req_base_phys 0x%x", io_req_base_phys));
220
221 for (i = 0; i < max_cmd; i++) {
222 cmd = instance->cmd_list[i];
223
224 offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
225
226 cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
232 (max_cmd * raid_msg_size) + i * sgl_sz);
233
234 cmd->sgl_phys_addr =
235 (io_req_base_phys +
236 (max_cmd * raid_msg_size) + i * sgl_sz);
237
238 cmd->sense1 = (uint8_t *)
239 ((uint8_t *)io_req_base +
240 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
241 (i * SENSE_LENGTH));
242
243 cmd->sense_phys_addr1 =
244 (io_req_base_phys +
245 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
246 (i * SENSE_LENGTH));
247
248
249 cmd->SMID = i+1;
250
251 con_log(CL_DLEVEL3, (CE_NOTE,
252 "Frame Pool Addr [%x]0x%p",
253 cmd->index, cmd->scsi_io_request));
254
255 con_log(CL_DLEVEL3, (CE_NOTE,
256 "Frame Pool Phys Addr [%x]0x%x",
257 cmd->index, cmd->scsi_io_request_phys_addr));
258
259 con_log(CL_DLEVEL3, (CE_NOTE,
260 "Sense Addr [%x]0x%p",
261 cmd->index, cmd->sense1));
262
263 con_log(CL_DLEVEL3, (CE_NOTE,
264 "Sense Addr Phys [%x]0x%x",
265 cmd->index, cmd->sense_phys_addr1));
266
267
268 con_log(CL_DLEVEL3, (CE_NOTE,
269 "Sgl bufffers [%x]0x%p",
270 cmd->index, cmd->sgl));
271
272 con_log(CL_DLEVEL3, (CE_NOTE,
273 "Sgl bufffers phys [%x]0x%x",
274 cmd->index, cmd->sgl_phys_addr));
275 }
276
277 return (DDI_SUCCESS);
278
279 }
280
281
282 /*
283 * alloc_additional_dma_buffer for AEN
284 */
285 int
286 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
287 {
288 uint32_t internal_buf_size = PAGESIZE*2;
289 int i;
415 * Allocate Request and Reply Queue Descriptors.
416 */
417 int
418 alloc_req_rep_desc(struct mrsas_instance *instance)
419 {
420 uint32_t request_q_sz, reply_q_sz;
421 int i, max_request_q_sz, max_reply_q_sz;
422 uint64_t request_desc;
423 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
424 uint64_t *reply_ptr;
425
426 /*
427 * ThunderBolt(TB) There's no longer producer consumer mechanism.
428 * Once we have an interrupt we are supposed to scan through the list of
429 * reply descriptors and process them accordingly. We would be needing
430 * to allocate memory for 1024 reply descriptors
431 */
432
433 /* Allocate Reply Descriptors */
434 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x\n",
435 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
436
437 // reply queue size should be multiple of 16
438 max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
439
440 reply_q_sz = 8 * max_reply_q_sz;
441
442
443 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x\n",
444 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
445
446 instance->reply_desc_dma_obj.size = reply_q_sz;
447 instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
448 instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
449 instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
450 instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
451 instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
452
453 if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
454 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
455 cmn_err(CE_WARN,
456 "mr_sas: could not alloc reply queue");
457 return (DDI_FAILURE);
458 }
459
460 bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
461 instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
462
463 // virtual address of reply queue
464 instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
465 instance->reply_desc_dma_obj.buffer);
466
467 instance->reply_q_depth = max_reply_q_sz;
468
469 con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
470 instance->reply_q_depth));
471
472 con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
473 instance->reply_frame_pool));
474
475 /* initializing reply address to 0xFFFFFFFF */
476 reply_desc = instance->reply_frame_pool;
477
478 for (i = 0; i < instance->reply_q_depth; i++) {
479 reply_desc->Words = (uint64_t)~0;
480 reply_desc++;
481 }
482
483
484 instance->reply_frame_pool_phy =
485 (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
486
487 con_log(CL_ANN1, (CE_NOTE,
488 "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
489
490
491 instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
492 reply_q_sz);
493
494 con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
495 instance->reply_pool_limit_phy));
496
497
498 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x\n",
499 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
500
501 /* Allocate Request Descriptors */
502 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x\n",
503 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
504
505 request_q_sz = 8 *
506 (instance->max_fw_cmds);
507
508 instance->request_desc_dma_obj.size = request_q_sz;
509 instance->request_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
510 instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
511 instance->request_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
512 instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
513 instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
514
515 if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
516 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
517 cmn_err(CE_WARN,
518 "mr_sas: could not alloc request queue desc");
519 goto fail_undo_reply_queue;
520 }
521
522 bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
523 instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
888 int numbytes, i;
889 int ret = DDI_SUCCESS;
890 uint16_t flags;
891 int status;
892 timespec_t time;
893 uint64_t mSec;
894 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
895 struct mrsas_init_frame2 *mfiFrameInit2;
896 struct mrsas_header *frame_hdr;
897 Mpi2IOCInitRequest_t *init;
898 struct mrsas_cmd *cmd = NULL;
899 struct mrsas_drv_ver drv_ver_info;
900 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
901
902
903 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
904
905
906 #ifdef DEBUG
907 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
908 (int)sizeof (*mfiFrameInit2)));
909 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
910 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
911 (int)sizeof (struct mrsas_init_frame2)));
912 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
913 (int)sizeof (Mpi2IOCInitRequest_t)));
914 #endif
915
916 init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
917 numbytes = sizeof (*init);
918 bzero(init, numbytes);
919
920 ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
921 MPI2_FUNCTION_IOC_INIT);
922
923 ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
924 MPI2_WHOINIT_HOST_DRIVER);
925
926 /* set MsgVersion and HeaderVersion host driver was built with */
927 ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
928 MPI2_VERSION);
929
930 ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
931 MPI2_HEADER_VERSION);
932
933 ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
951 ddi_put64(mpi2_dma_obj->acc_handle,
952 (uint64_t *)&init->SystemRequestFrameBaseAddress,
953 instance->io_request_frames_phy);
954
955 ddi_put64(mpi2_dma_obj->acc_handle,
956 &init->ReplyDescriptorPostQueueAddress,
957 instance->reply_frame_pool_phy);
958
959 ddi_put64(mpi2_dma_obj->acc_handle,
960 &init->ReplyFreeQueueAddress, 0);
961
962 cmd = instance->cmd_list[0];
963 if (cmd == NULL) {
964 return (DDI_FAILURE);
965 }
966 cmd->retry_count_for_ocr = 0;
967 cmd->pkt = NULL;
968 cmd->drv_pkt_time = 0;
969
970 mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
971 con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", mfiFrameInit2));
972
973 frame_hdr = &cmd->frame->hdr;
974
975 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
976 MFI_CMD_STATUS_POLL_MODE);
977
978 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
979
980 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
981
982 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
983
984 con_log(CL_ANN, (CE_CONT,
985 "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
986
987 // Init the MFI Header
988 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
989 &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
990
991 con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
992
993 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
994 &mfiFrameInit2->cmd_status,
995 MFI_STAT_INVALID_STATUS);
996
997 con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
998
999 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1000 &mfiFrameInit2->queue_info_new_phys_addr_lo,
1001 mpi2_dma_obj->dma_cookie[0].dmac_address);
1002
1003 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1004 &mfiFrameInit2->data_xfer_len,
1005 sizeof (Mpi2IOCInitRequest_t));
1006
1007 con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1008 (int)init->ReplyDescriptorPostQueueAddress));
1009
1010 /* fill driver version information*/
1011 fill_up_drv_ver(&drv_ver_info);
1012
1013 /* allocate the driver version data transfer buffer */
1014 instance->drv_ver_dma_obj.size = sizeof(drv_ver_info.drv_ver);
1015 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1016 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1017 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1018 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1019 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1020
1021 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1022 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1023 cmn_err(CE_WARN,
1024 "fusion init: Could not allocate driver version buffer.");
1025 return (DDI_FAILURE);
1026 }
1027 /* copy driver version to dma buffer*/
1028 (void) memset(instance->drv_ver_dma_obj.buffer, 0,sizeof(drv_ver_info.drv_ver));
1029 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1030 (uint8_t *)drv_ver_info.drv_ver,
1031 (uint8_t *)instance->drv_ver_dma_obj.buffer,
1032 sizeof(drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1033
1034 /*send driver version physical address to firmware*/
1035 ddi_put64(cmd->frame_dma_obj.acc_handle,
1036 &mfiFrameInit2->driverversion, instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1037
1038 con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1039 mfiFrameInit2->queue_info_new_phys_addr_lo,
1040 (int)sizeof (Mpi2IOCInitRequest_t)));
1041
1042 con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1043
1044 con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1045 cmd->scsi_io_request_phys_addr,
1046 (int) sizeof (struct mrsas_init_frame2)));
1047
1048 /* disable interrupts before sending INIT2 frame */
1049 instance->func_ptr->disable_intr(instance);
1050
1051 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
1052 instance->request_message_pool;
1053 req_desc->Words = cmd->scsi_io_request_phys_addr;
1054 req_desc->MFAIo.RequestFlags =
1055 (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1056
1057 cmd->request_desc = req_desc;
1058
1059 /* issue the init frame */
1060 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1061
1062 con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1063 con_log(CL_ANN1, (CE_CONT, "[cmd Status= %x] ",
1064 frame_hdr->cmd_status));
1065
1066 if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1167 }
1168
1169 if (cmd == NULL) {
1170 return (TRAN_BUSY);
1171 }
1172
1173
1174 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1175 if (instance->fw_outstanding > instance->max_fw_cmds) {
1176 cmn_err(CE_WARN,
1177 "Command Queue Full... Returning BUSY \n");
1178 return_raid_msg_pkt(instance, cmd);
1179 return (TRAN_BUSY);
1180 }
1181
1182 /* Synchronize the Cmd frame for the controller */
1183 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1184 DDI_DMA_SYNC_FORDEV);
1185
1186 con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1187 "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0], cmd->index, cmd->SMID));
1188
1189 instance->func_ptr->issue_cmd(cmd, instance);
1190
1191 return (TRAN_ACCEPT);
1192
1193 } else {
1194 instance->func_ptr->issue_cmd(cmd, instance);
1195 (void) wait_for_outstanding_poll_io(instance);
1196 return (TRAN_ACCEPT);
1197 }
1198 }
1199
1200 /*
1201 * prepare the pkt:
1202 * the pkt may have been resubmitted or just reused so
1203 * initialize some fields and do some checks.
1204 */
1205 int
1206 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1207 {
1299 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1300 &scsi_raid_io_sgl_ieee->Address,
1301 acmd->cmd_dmacookies[i].dmac_laddress);
1302
1303 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1304 &scsi_raid_io_sgl_ieee->Length,
1305 acmd->cmd_dmacookies[i].dmac_size);
1306
1307 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1308 &scsi_raid_io_sgl_ieee->Flags, 0);
1309
1310 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1311 if (i == (numElements - 1))
1312 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1313 &scsi_raid_io_sgl_ieee->Flags, IEEE_SGE_FLAGS_END_OF_LIST);
1314 }
1315
1316 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1317
1318 #ifdef DEBUG
1319 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1320 scsi_raid_io_sgl_ieee->Address));
1321 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1322 scsi_raid_io_sgl_ieee->Length));
1323 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1324 scsi_raid_io_sgl_ieee->Flags));
1325 #endif
1326
1327 }
1328
1329 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1330 &scsi_raid_io->ChainOffset, 0);
1331
1332 /* check if chained SGL required */
1333 if (i < numElements) {
1334
1335 con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1336
1337 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1338 uint16_t ioFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1339 &scsi_raid_io->IoFlags);
1389 &scsi_raid_io_sgl_ieee->Address,
1390 acmd->cmd_dmacookies[i].dmac_laddress);
1391
1392 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1393 &scsi_raid_io_sgl_ieee->Length,
1394 acmd->cmd_dmacookies[i].dmac_size);
1395
1396 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1397 &scsi_raid_io_sgl_ieee->Flags, 0);
1398
1399 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1400 if (i == (numElements - 1))
1401 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1402 &scsi_raid_io_sgl_ieee->Flags, IEEE_SGE_FLAGS_END_OF_LIST);
1403 }
1404
1405 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1406
1407 #if DEBUG
1408 con_log(CL_DLEVEL1, (CE_NOTE,
1409 "[SGL Address]: %" PRIx64,
1410 scsi_raid_io_sgl_ieee->Address));
1411 con_log(CL_DLEVEL1, (CE_NOTE,
1412 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1413 con_log(CL_DLEVEL1, (CE_NOTE,
1414 "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1415 #endif
1416
1417 i++;
1418 }
1419 }
1420
1421 return (0);
1422 } /*end of BuildScatterGather */
1423
1424
1425 /*
1426 * build_cmd
1427 */
1428 struct mrsas_cmd *
1429 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1969 }
1970
1971
1972 uint32_t
1973 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1974 {
1975 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1976 }
1977
1978 void
1979 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1980 {
1981 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1982 atomic_add_16(&instance->fw_outstanding, 1);
1983
1984 struct scsi_pkt *pkt;
1985
1986 con_log(CL_ANN1, (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1987
1988 con_log(CL_DLEVEL1, (CE_CONT,
1989 " [req desc Words] %" PRIx64 " \n", req_desc->Words));
1990 con_log(CL_DLEVEL1, (CE_CONT,
1991 " [req desc low part] %x \n",
1992 (uint_t)(req_desc->Words & 0xffffffffff)));
1993 con_log(CL_DLEVEL1, (CE_CONT,
1994 " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
1995 pkt = cmd->pkt;
1996
1997 if (pkt) {
1998 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1999 "ISSUED CMD TO FW : called : cmd:"
2000 ": %p instance : %p pkt : %p pkt_time : %x\n",
2001 gethrtime(), (void *)cmd, (void *)instance,
2002 (void *)pkt, cmd->drv_pkt_time));
2003 if (instance->adapterresetinprogress) {
2004 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2005 con_log(CL_ANN, (CE_NOTE,
2006 "TBOLT Reset the scsi_pkt timer"));
2007 } else {
2008 push_pending_mfi_pkt(instance, cmd);
2009 }
2010
2011 } else {
2012 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
2013 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
2014 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
2041 if (instance->adapterresetinprogress) {
2042 cmd->drv_pkt_time = ddi_get16
2043 (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2044 if (cmd->drv_pkt_time < debug_timeout_g)
2045 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2046 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
2047 "RESET-IN-PROGRESS, issue cmd & return.\n"));
2048
2049 mutex_enter(&instance->reg_write_mtx);
2050 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2051 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2052 mutex_exit(&instance->reg_write_mtx);
2053
2054 return (DDI_SUCCESS);
2055 } else {
2056 con_log(CL_ANN1, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: pushing the pkt\n"));
2057 push_pending_mfi_pkt(instance, cmd);
2058 }
2059
2060 con_log(CL_DLEVEL2, (CE_NOTE,
2061 "HighQport offset :%p",
2062 (uint32_t *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
2063 con_log(CL_DLEVEL2, (CE_NOTE,
2064 "LowQport offset :%p",
2065 (uint32_t *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
2066
2067 cmd->sync_cmd = MRSAS_TRUE;
2068 cmd->cmd_status = ENODATA;
2069
2070
2071 mutex_enter(&instance->reg_write_mtx);
2072 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2073 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2074 mutex_exit(&instance->reg_write_mtx);
2075
2076 con_log(CL_ANN1, (CE_NOTE,
2077 " req desc high part %x \n", (uint_t)(req_desc->Words >> 32)));
2078 con_log(CL_ANN1, (CE_NOTE,
2079 " req desc low part %x \n", (uint_t)(req_desc->Words & 0xffffffff)));
2080
2081 mutex_enter(&instance->int_cmd_mtx);
2082 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
2083 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
2084 }
2085 mutex_exit(&instance->int_cmd_mtx);
2086
2087
2088 if (i < (msecs -1)) {
2089 return (DDI_SUCCESS);
2090 } else {
2091 return (DDI_FAILURE);
2092 }
2093 }
2094
2095 /*
2096 * issue_cmd_in_poll_mode
2097 */
2098 int
2099 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2100 struct mrsas_cmd *cmd)
2101 {
2102 int i;
2103 uint16_t flags;
2104 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2105 struct mrsas_header *frame_hdr;
2106
2107 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X", cmd->SMID));
2108
2109 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2110
2111 frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2112 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2113 MFI_CMD_STATUS_POLL_MODE);
2114 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2115 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2116 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2117
2118 con_log(CL_ANN1, (CE_NOTE,
2119 " req desc low part %x \n", (uint_t)(req_desc->Words & 0xffffffff)));
2120 con_log(CL_ANN1, (CE_NOTE,
2121 " req desc high part %x \n", (uint_t)(req_desc->Words >> 32)));
2122
2123 /* issue the frame using inbound queue port */
2124 mutex_enter(&instance->reg_write_mtx);
2125 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2126 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2127 mutex_exit(&instance->reg_write_mtx);
2128
2129 for (i = 0; i < msecs && (
2130 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2131 == MFI_CMD_STATUS_POLL_MODE); i++) {
2132 /* wait for cmd_status to change from 0xFF */
2133 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2134 }
2135
2136 if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2137 &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2138 con_log(CL_ANN1, (CE_NOTE,
2139 " cmd failed %" PRIx64 " \n", (req_desc->Words)));
2140 return (DDI_FAILURE);
2141 }
2142
2143 return (DDI_SUCCESS);
2144 }
2145
2146 void
2147 tbolt_enable_intr(struct mrsas_instance *instance)
2148 {
2149 uint32_t mask;
2150
2151 /* TODO: For Thunderbolt/Invader also clear intr on enable */
2152 //writel(~0, ®s->outbound_intr_status);
2153 //readl(®s->outbound_intr_status);
2154
2155 WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2156
2157 /* dummy read to force PCI flush */
2158 mask = RD_OB_INTR_MASK(instance);
2159
2296 void
2297 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2298 struct mrsas_cmd *cmd)
2299 {
2300 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2301 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
2302 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
2303 uint32_t index;
2304
2305 if (!instance->tbolt) {
2306 con_log(CL_ANN, (CE_NOTE, "Not MFA enabled.\n"));
2307 return;
2308 }
2309
2310 index = cmd->index;
2311
2312 ReqDescUnion =
2313 mr_sas_get_request_descriptor(instance, index, cmd);
2314
2315 if (!ReqDescUnion) {
2316 con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2317 return;
2318 }
2319
2320 con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2321
2322 ReqDescUnion->Words = 0;
2323
2324 ReqDescUnion->SCSIIO.RequestFlags =
2325 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2326 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2327
2328 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2329
2330 cmd->request_desc = ReqDescUnion;
2331
2332 // get raid message frame pointer
2333 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2334
2335 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
2336 Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2354 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2355 &scsi_raid_io->SenseBufferLowAddress,
2356 cmd->sense_phys_addr1);
2357
2358
2359 scsi_raid_io_sgl_ieee =
2360 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2361
2362 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
2363 &scsi_raid_io_sgl_ieee->Address,
2364 (U64)cmd->frame_phys_addr);
2365
2366 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2367 &scsi_raid_io_sgl_ieee->Flags,
2368 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2369 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2370 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2371 &scsi_raid_io_sgl_ieee->Length, 1024); //MEGASAS_MAX_SZ_CHAIN_FRAME
2372
2373 con_log(CL_ANN1, (CE_NOTE,
2374 "[MFI CMD PHY ADDRESS]:%" PRIx64,
2375 scsi_raid_io_sgl_ieee->Address));
2376 con_log(CL_ANN1, (CE_NOTE,
2377 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2378 con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2379 scsi_raid_io_sgl_ieee->Flags));
2380 }
2381
2382
2383 void
2384 tbolt_complete_cmd(struct mrsas_instance *instance,
2385 struct mrsas_cmd *cmd)
2386 {
2387 uint8_t status;
2388 uint8_t extStatus;
2389 uint8_t arm;
2390 struct scsa_cmd *acmd;
2391 struct scsi_pkt *pkt;
2392 struct scsi_arq_status *arqstat;
2393 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2394 LD_LOAD_BALANCE_INFO *lbinfo;
2614 }
2615 case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: // MFA command.
2616
2617 if (cmd->frame->dcmd.opcode
2618 == MR_DCMD_LD_MAP_GET_INFO &&
2619 cmd->frame->dcmd.mbox.b[1]
2620 == 1) {
2621
2622 mutex_enter(&instance->sync_map_mtx);
2623
2624 con_log(CL_ANN, (CE_NOTE,
2625 "LDMAP sync command SMID RECEIVED 0x%X",
2626 cmd->SMID));
2627 if (cmd->frame->hdr.cmd_status != 0) {
2628 cmn_err(CE_WARN,
2629 "map sync failed, status = 0x%x.\n",cmd->frame->hdr.cmd_status);
2630 }
2631 else {
2632 instance->map_id++;
2633 cmn_err(CE_NOTE,
2634 "map sync received, switched map_id to %" PRIu64 " \n",instance->map_id);
2635 }
2636
2637 if (MR_ValidateMapInfo(instance->ld_map[(instance->map_id & 1)], instance->load_balance_info))
2638 instance->fast_path_io = 1;
2639 else
2640 instance->fast_path_io = 0;
2641
2642 con_log(CL_ANN, (CE_NOTE,
2643 "instance->fast_path_io %d \n",instance->fast_path_io));
2644
2645 instance->unroll.syncCmd = 0;
2646
2647 if(instance->map_update_cmd == cmd) {
2648 return_raid_msg_pkt(instance, cmd);
2649 atomic_add_16(&instance->fw_outstanding, (-1));
2650 mrsas_tbolt_sync_map_info(instance);
2651 }
2652
2653 cmn_err(CE_NOTE, "LDMAP sync completed.\n");
2654 mutex_exit(&instance->sync_map_mtx);
2708
2709 struct mrsas_header *hdr;
2710 struct scsi_pkt *pkt;
2711
2712 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2713 0, 0, DDI_DMA_SYNC_FORDEV);
2714
2715 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2716 0, 0, DDI_DMA_SYNC_FORCPU);
2717
2718 desc = instance->reply_frame_pool;
2719 desc += instance->reply_read_index;
2720
2721 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2722 replyType = replyDesc->ReplyFlags &
2723 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2724
2725 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2726 return (DDI_INTR_UNCLAIMED);
2727
2728 con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %p Words = %" PRIx64 " \n",
2729 desc, desc->Words));
2730
2731 d_val.word = desc->Words;
2732
2733
2734 /* Read Reply descriptor */
2735 while ((d_val.u1.low != 0xffffffff) &&
2736 (d_val.u1.high != 0xffffffff)) {
2737
2738 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2739 0, 0, DDI_DMA_SYNC_FORCPU);
2740
2741 smid = replyDesc->SMID;
2742
2743 if (!smid || smid > instance->max_fw_cmds + 1) {
2744 con_log(CL_ANN1, (CE_NOTE,
2745 "Reply Desc at Break = %p Words = %" PRIx64 " \n",
2746 desc, desc->Words));
2747 break;
2748 }
2749
2750 cmd = instance->cmd_list[smid - 1];
2751 if(!cmd ) {
2752 con_log(CL_ANN1, (CE_NOTE,
2753 "mr_sas_tbolt_process_outstanding_cmd: Invalid command "
2754 " or Poll commad Received in completion path\n"));
2755 }
2756 else {
2757 mutex_enter(&instance->cmd_pend_mtx);
2758 if (cmd->sync_cmd == MRSAS_TRUE) {
2759 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2760 if (hdr) {
2761 con_log(CL_ANN1, (CE_NOTE,
2762 "mr_sas_tbolt_process_outstanding_cmd:"
2763 " mlist_del_init(&cmd->list).\n"));
2764 mlist_del_init(&cmd->list);
2765 }
2781 desc->Words = (uint64_t)~0;
2782
2783 instance->reply_read_index++;
2784
2785 if (instance->reply_read_index >= (instance->reply_q_depth)) {
2786 con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2787 instance->reply_read_index = 0;
2788 }
2789
2790 /* Get the next reply descriptor */
2791 if (!instance->reply_read_index)
2792 desc = instance->reply_frame_pool;
2793 else
2794 desc++;
2795
2796 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2797
2798 d_val.word = desc->Words;
2799
2800 con_log(CL_ANN1, (CE_NOTE,
2801 "Next Reply Desc = %p Words = %" PRIx64 "\n",
2802 desc, desc->Words));
2803
2804 replyType = replyDesc->ReplyFlags &
2805 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2806
2807 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2808 break;
2809
2810 } /* End of while loop. */
2811
2812 /* update replyIndex to FW */
2813 WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2814
2815
2816 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2817 0, 0, DDI_DMA_SYNC_FORDEV);
2818
2819 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2820 0, 0, DDI_DMA_SYNC_FORCPU);
2821 return (DDI_INTR_CLAIMED);
|