719 if (strncmp(data, "no", 3) == 0) {
720 ctio_enable = 0;
721 con_log(CL_ANN1, (CE_WARN,
722 "ctio_enable = %d disabled", ctio_enable));
723 }
724 ddi_prop_free(data);
725 }
726
727 con_log(CL_DLEVEL1, (CE_WARN, "ctio_enable = %d", ctio_enable));
728
729 /* setup the mfi based low level driver */
730 if (mrsas_init_adapter(instance) != DDI_SUCCESS) {
731 cmn_err(CE_WARN, "mr_sas: "
732 "could not initialize the low level driver");
733
734 goto fail_attach;
735 }
736
737 /* Initialize all Mutex */
738 INIT_LIST_HEAD(&instance->completed_pool_list);
739 mutex_init(&instance->completed_pool_mtx,
740 "completed_pool_mtx", MUTEX_DRIVER,
741 DDI_INTR_PRI(instance->intr_pri));
742
743 mutex_init(&instance->sync_map_mtx,
744 "sync_map_mtx", MUTEX_DRIVER,
745 DDI_INTR_PRI(instance->intr_pri));
746
747 mutex_init(&instance->app_cmd_pool_mtx,
748 "app_cmd_pool_mtx", MUTEX_DRIVER,
749 DDI_INTR_PRI(instance->intr_pri));
750
751 mutex_init(&instance->config_dev_mtx, "config_dev_mtx",
752 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
753
754 mutex_init(&instance->cmd_pend_mtx, "cmd_pend_mtx",
755 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
756
757 mutex_init(&instance->ocr_flags_mtx, "ocr_flags_mtx",
758 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
759
760 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx",
761 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
762 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
763
764 mutex_init(&instance->cmd_pool_mtx, "cmd_pool_mtx",
765 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
766
767 mutex_init(&instance->reg_write_mtx, "reg_write_mtx",
768 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
769
770 if (instance->tbolt) {
771 mutex_init(&instance->cmd_app_pool_mtx,
772 "cmd_app_pool_mtx", MUTEX_DRIVER,
773 DDI_INTR_PRI(instance->intr_pri));
774
775 mutex_init(&instance->chip_mtx,
776 "chip_mtx", MUTEX_DRIVER,
777 DDI_INTR_PRI(instance->intr_pri));
778
779 }
780
781 instance->unroll.mutexs = 1;
782
783 instance->timeout_id = (timeout_id_t)-1;
784
785 /* Register our soft-isr for highlevel interrupts. */
786 instance->isr_level = instance->intr_pri;
787 if (!(instance->tbolt)) {
788 if (instance->isr_level == HIGH_LEVEL_INTR) {
789 if (ddi_add_softintr(dip,
790 DDI_SOFTINT_HIGH,
791 &instance->soft_intr_id, NULL, NULL,
792 mrsas_softintr, (caddr_t)instance) !=
793 DDI_SUCCESS) {
794 cmn_err(CE_WARN,
795 "Software ISR did not register");
796
797 goto fail_attach;
925 instance->unroll.aenPend = 1;
926 con_log(CL_ANN1,
927 (CE_CONT, "AEN started for instance %d.", instance_no));
928
929 /* Finally! We are on the air. */
930 ddi_report_dev(dip);
931
932 /* FMA handle checking. */
933 if (mrsas_check_acc_handle(instance->regmap_handle) !=
934 DDI_SUCCESS) {
935 goto fail_attach;
936 }
937 if (mrsas_check_acc_handle(instance->pci_handle) !=
938 DDI_SUCCESS) {
939 goto fail_attach;
940 }
941
942 instance->mr_ld_list =
943 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld),
944 KM_SLEEP);
945 if (instance->mr_ld_list == NULL) {
946 cmn_err(CE_WARN, "mr_sas attach(): "
947 "failed to allocate ld_list array");
948 goto fail_attach;
949 }
950 instance->unroll.ldlist_buff = 1;
951
952 #ifdef PDSUPPORT
953 if (instance->tbolt) {
954 instance->mr_tbolt_pd_max = MRSAS_TBOLT_PD_TGT_MAX;
955 instance->mr_tbolt_pd_list =
956 kmem_zalloc(MRSAS_TBOLT_GET_PD_MAX(instance) *
957 sizeof (struct mrsas_tbolt_pd), KM_SLEEP);
958 ASSERT(instance->mr_tbolt_pd_list);
959 for (i = 0; i < instance->mr_tbolt_pd_max; i++) {
960 instance->mr_tbolt_pd_list[i].lun_type =
961 MRSAS_TBOLT_PD_LUN;
962 instance->mr_tbolt_pd_list[i].dev_id =
963 (uint8_t)i;
964 }
965
966 instance->unroll.pdlist_buff = 1;
967 }
968 #endif
969 break;
1428 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1429 int *rvalp)
1430 {
1431 int rval = 0;
1432
1433 struct mrsas_instance *instance;
1434 struct mrsas_ioctl *ioctl;
1435 struct mrsas_aen aen;
1436 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1437
1438 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev)));
1439
1440 if (instance == NULL) {
1441 /* invalid minor number */
1442 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found."));
1443 return (ENXIO);
1444 }
1445
1446 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl),
1447 KM_SLEEP);
1448 if (ioctl == NULL) {
1449 /* Failed to allocate memory for ioctl */
1450 con_log(CL_ANN, (CE_WARN, "mr_sas_ioctl: "
1451 "failed to allocate memory for ioctl"));
1452 return (ENOMEM);
1453 }
1454
1455 switch ((uint_t)cmd) {
1456 case MRSAS_IOCTL_FIRMWARE:
1457 if (ddi_copyin((void *)arg, ioctl,
1458 sizeof (struct mrsas_ioctl), mode)) {
1459 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: "
1460 "ERROR IOCTL copyin"));
1461 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1462 return (EFAULT);
1463 }
1464
1465 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) {
1466 rval = handle_drv_ioctl(instance, ioctl, mode);
1467 } else {
1468 rval = handle_mfi_ioctl(instance, ioctl, mode);
1469 }
1470
1471 if (ddi_copyout((void *)ioctl, (void *)arg,
1472 (sizeof (struct mrsas_ioctl) - 1), mode)) {
1473 con_log(CL_ANN, (CE_WARN,
1474 "mrsas_ioctl: copy_to_user failed"));
1475 rval = 1;
1476 }
1477
1478 break;
1479 case MRSAS_IOCTL_AEN:
1480 con_log(CL_ANN,
1481 (CE_NOTE, "mrsas_ioctl: IOCTL Register AEN.\n"));
1482
1483 if (ddi_copyin((void *) arg, &aen,
1484 sizeof (struct mrsas_aen), mode)) {
1485 con_log(CL_ANN, (CE_WARN,
1486 "mrsas_ioctl: ERROR AEN copyin"));
1487 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1488 return (EFAULT);
1489 }
1490
1491 rval = handle_mfi_aen(instance, &aen);
1492
1493 if (ddi_copyout((void *) &aen, (void *)arg,
1494 sizeof (struct mrsas_aen), mode)) {
1495 con_log(CL_ANN, (CE_WARN,
1496 "mrsas_ioctl: copy_to_user failed"));
1497 rval = 1;
1498 }
1499
1500 break;
1501 default:
1502 rval = scsi_hba_ioctl(dev, cmd, arg,
3182 int count;
3183 uint32_t max_cmd;
3184 uint32_t reserve_cmd;
3185 size_t sz;
3186
3187 struct mrsas_cmd *cmd;
3188
3189 max_cmd = instance->max_fw_cmds;
3190 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
3191 "max_cmd %x", max_cmd));
3192
3193
3194 sz = sizeof (struct mrsas_cmd *) * max_cmd;
3195
3196 /*
3197 * instance->cmd_list is an array of struct mrsas_cmd pointers.
3198 * Allocate the dynamic array first and then allocate individual
3199 * commands.
3200 */
3201 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
3202 if (instance->cmd_list == NULL) {
3203 con_log(CL_NONE, (CE_WARN,
3204 "Failed to allocate memory for cmd_list"));
3205 return (DDI_FAILURE);
3206 }
3207
3208 /* create a frame pool and assign one frame to each cmd */
3209 for (count = 0; count < max_cmd; count++) {
3210 instance->cmd_list[count] =
3211 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
3212 if (instance->cmd_list[count] == NULL) {
3213 con_log(CL_NONE, (CE_WARN,
3214 "Failed to allocate memory for mrsas_cmd"));
3215 goto mrsas_undo_cmds;
3216 }
3217 }
3218
3219 /* add all the commands to command pool */
3220
3221 INIT_LIST_HEAD(&instance->cmd_pool_list);
3222 INIT_LIST_HEAD(&instance->cmd_pend_list);
3223 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3224
3225 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
3226
3227 for (i = 0; i < reserve_cmd; i++) {
3228 cmd = instance->cmd_list[i];
3229 cmd->index = i;
3230 mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list);
3231 }
3232
3233
3234 for (i = reserve_cmd; i < max_cmd; i++) {
3235 cmd = instance->cmd_list[i];
3236 cmd->index = i;
3237 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
3713 /* Initialize adapter */
3714 if (instance->func_ptr->init_adapter(instance) != DDI_SUCCESS) {
3715 con_log(CL_ANN,
3716 (CE_WARN, "mr_sas: could not initialize adapter"));
3717 return (DDI_FAILURE);
3718 }
3719
3720 /* gather misc FW related information */
3721 instance->disable_online_ctrl_reset = 0;
3722
3723 if (!get_ctrl_info(instance, &ctrl_info)) {
3724 instance->max_sectors_per_req = ctrl_info.max_request_size;
3725 con_log(CL_ANN1, (CE_NOTE,
3726 "product name %s ld present %d",
3727 ctrl_info.product_name, ctrl_info.ld_present_count));
3728 } else {
3729 instance->max_sectors_per_req = instance->max_num_sge *
3730 PAGESIZE / 512;
3731 }
3732
3733 if (ctrl_info.properties.on_off_properties & DISABLE_OCR_PROP_FLAG) {
3734 instance->disable_online_ctrl_reset = 1;
3735 con_log(CL_ANN1,
3736 (CE_NOTE, "Disable online control Flag is set\n"));
3737 } else {
3738 con_log(CL_ANN1,
3739 (CE_NOTE, "Disable online control Flag is not set\n"));
3740 }
3741
3742 return (DDI_SUCCESS);
3743
3744 }
3745
3746
3747
3748 static int
3749 mrsas_issue_init_mfi(struct mrsas_instance *instance)
3750 {
3751 struct mrsas_cmd *cmd;
3752 struct mrsas_init_frame *init_frame;
3753 struct mrsas_init_queue_info *initq_info;
3754
3755 /*
3756 * Prepare a init frame. Note the init frame points to queue info
3757 * structure. Each frame has SGL allocated after first 64 bytes. For
3758 * this frame - since we don't need any SGL - we use SGL's space as
3759 * queue info structure
3760 */
4003 }
4004 };
4005
4006 if (!instance->tbolt) {
4007 fw_ctrl = RD_IB_DOORBELL(instance);
4008 con_log(CL_ANN1, (CE_CONT,
4009 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
4010
4011 /*
4012 * Write 0xF to the doorbell register to do the following.
4013 * - Abort all outstanding commands (bit 0).
4014 * - Transition from OPERATIONAL to READY state (bit 1).
4015 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
4016 * - Set to release FW to continue running (i.e. BIOS handshake
4017 * (bit 3).
4018 */
4019 WR_IB_DOORBELL(0xF, instance);
4020 }
4021
4022 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
4023 return (ENODEV);
4024 }
4025
4026 return (DDI_SUCCESS);
4027 }
4028
4029 /*
4030 * get_seq_num
4031 */
4032 static int
4033 get_seq_num(struct mrsas_instance *instance,
4034 struct mrsas_evt_log_info *eli)
4035 {
4036 int ret = DDI_SUCCESS;
4037
4038 dma_obj_t dcmd_dma_obj;
4039 struct mrsas_cmd *cmd;
4040 struct mrsas_dcmd_frame *dcmd;
4041 struct mrsas_evt_log_info *eli_tmp;
4042 if (instance->tbolt) {
4043 cmd = get_raid_msg_mfi_pkt(instance);
4861
4862 if (mrsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) {
4863 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4864 return (-1);
4865 }
4866
4867 if (mrsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) {
4868 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4869 return (-1);
4870 }
4871
4872 return (cookie_cnt);
4873 }
4874
4875 /*
4876 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t)
4877 *
4878 * De-allocate the memory and other resources for an dma object, which must
4879 * have been alloated by a previous call to mrsas_alloc_dma_obj()
4880 */
4881 /* ARGSUSED */
4882 int
4883 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj)
4884 {
4885
4886 if ((obj.dma_handle == NULL) || (obj.acc_handle == NULL)) {
4887 return (DDI_SUCCESS);
4888 }
4889
4890 /*
4891 * NOTE: These check-handle functions fail if *_handle == NULL, but
4892 * this function succeeds because of the previous check.
4893 */
4894 if (mrsas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) {
4895 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4896 return (DDI_FAILURE);
4897 }
4898
4899 if (mrsas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) {
4900 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4901 return (DDI_FAILURE);
5575 DTRACE_PROBE3(issue_pthru, uint8_t, kpthru->cmd, uint8_t,
5576 kpthru->cmd_status, uint8_t, kpthru->scsi_status);
5577
5578 if (kpthru->sense_len) {
5579 uint_t sense_len = SENSE_LENGTH;
5580 void *sense_ubuf =
5581 (void *)(ulong_t)kpthru->sense_buf_phys_addr_lo;
5582 if (kpthru->sense_len <= SENSE_LENGTH) {
5583 sense_len = kpthru->sense_len;
5584 }
5585
5586 for (i = 0; i < sense_len; i++) {
5587 if (ddi_copyout(
5588 (uint8_t *)cmd->sense+i,
5589 (uint8_t *)sense_ubuf+i, 1, mode)) {
5590 con_log(CL_ANN, (CE_WARN,
5591 "issue_mfi_pthru : "
5592 "copy to user space failed"));
5593 }
5594 con_log(CL_DLEVEL1, (CE_WARN,
5595 "Copying Sense info sense_buff[%d] = 0x%X\n",
5596 i, *((uint8_t *)cmd->sense + i)));
5597 }
5598 }
5599 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
5600 DDI_DMA_SYNC_FORDEV);
5601
5602 if (xferlen) {
5603 /* free kernel buffer */
5604 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
5605 return (DDI_FAILURE);
5606 }
5607
5608 return (DDI_SUCCESS);
5609 }
5610
5611 /*
5612 * issue_mfi_dcmd
5613 */
5614 static int
5615 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5616 struct mrsas_cmd *cmd, int mode)
5617 {
5618 void *ubuf;
5619 uint32_t kphys_addr = 0;
5620 uint32_t xferlen = 0;
5621 uint32_t new_xfer_length = 0;
5622 uint32_t model;
5623 dma_obj_t dcmd_dma_obj;
5624 struct mrsas_dcmd_frame *kdcmd;
5625 struct mrsas_dcmd_frame *dcmd;
5626 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5627 int i;
5628 dcmd = &cmd->frame->dcmd;
5629 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
5630
5631 if (instance->adapterresetinprogress) {
5632 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5633 "returning mfi_pkt and setting TRAN_BUSY\n"));
5634 return (DDI_FAILURE);
5635 }
5636 model = ddi_model_convert_from(mode & FMODELS);
5637 if (model == DDI_MODEL_ILP32) {
5638 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5639
5640 xferlen = kdcmd->sgl.sge32[0].length;
5641
5642 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5643 } else {
5644 #ifdef _ILP32
5645 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5646 xferlen = kdcmd->sgl.sge32[0].length;
5647 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5648 #else
5649 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_LP64"));
5650 xferlen = kdcmd->sgl.sge64[0].length;
5651 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
5652 #endif
5653 }
7024 "flag set, time %llx", gethrtime()));
7025
7026 instance->func_ptr->disable_intr(instance);
7027 retry_reset:
7028 WR_IB_WRITE_SEQ(0, instance);
7029 WR_IB_WRITE_SEQ(4, instance);
7030 WR_IB_WRITE_SEQ(0xb, instance);
7031 WR_IB_WRITE_SEQ(2, instance);
7032 WR_IB_WRITE_SEQ(7, instance);
7033 WR_IB_WRITE_SEQ(0xd, instance);
7034 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: magic number written "
7035 "to write sequence register\n"));
7036 delay(100 * drv_usectohz(MILLISEC));
7037 status = RD_OB_DRWE(instance);
7038
7039 while (!(status & DIAG_WRITE_ENABLE)) {
7040 delay(100 * drv_usectohz(MILLISEC));
7041 status = RD_OB_DRWE(instance);
7042 if (retry++ == 100) {
7043 cmn_err(CE_WARN, "mrsas_reset_ppc: DRWE bit "
7044 "check retry count %d\n", retry);
7045 return (DDI_FAILURE);
7046 }
7047 }
7048 WR_IB_DRWE(status | DIAG_RESET_ADAPTER, instance);
7049 delay(100 * drv_usectohz(MILLISEC));
7050 status = RD_OB_DRWE(instance);
7051 while (status & DIAG_RESET_ADAPTER) {
7052 delay(100 * drv_usectohz(MILLISEC));
7053 status = RD_OB_DRWE(instance);
7054 if (retry++ == 100) {
7055 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7056 "RESET FAILED. KILL adapter called\n.");
7057
7058 (void) mrsas_kill_adapter(instance);
7059 return (DDI_FAILURE);
7060 }
7061 }
7062 con_log(CL_ANN, (CE_NOTE, "mrsas_reset_ppc: Adapter reset complete"));
7063 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7064 "Calling mfi_state_transition_to_ready"));
7065
7066 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
7067 if (mfi_state_transition_to_ready(instance) ||
7068 debug_fw_faults_after_ocr_g == 1) {
7069 cur_abs_reg_val =
7070 instance->func_ptr->read_fw_status_reg(instance);
7071 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
7072
7073 #ifdef OCRDEBUG
7074 con_log(CL_ANN1, (CE_NOTE,
7075 "mrsas_reset_ppc :before fake: FW is not ready "
7076 "FW state = 0x%x", fw_state));
7363 if ((ret != DDI_SUCCESS) || (avail == 0)) {
7364 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:"
7365 "ret %d avail %d", ret, avail));
7366
7367 return (DDI_FAILURE);
7368 }
7369 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: avail = %d ", avail));
7370
7371 /* Only one interrupt routine. So limit the count to 1 */
7372 if (count > 1) {
7373 count = 1;
7374 }
7375
7376 /*
7377 * Allocate an array of interrupt handlers. Currently we support
7378 * only one interrupt. The framework can be extended later.
7379 */
7380 instance->intr_htable_size = count * sizeof (ddi_intr_handle_t);
7381 instance->intr_htable = kmem_zalloc(instance->intr_htable_size,
7382 KM_SLEEP);
7383 if (instance->intr_htable == NULL) {
7384 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7385 "failed to allocate memory for intr-handle table"));
7386 instance->intr_htable_size = 0;
7387 return (DDI_FAILURE);
7388 }
7389
7390 flag = ((intr_type == DDI_INTR_TYPE_MSI) ||
7391 (intr_type == DDI_INTR_TYPE_MSIX)) ?
7392 DDI_INTR_ALLOC_STRICT : DDI_INTR_ALLOC_NORMAL;
7393
7394 /* Allocate interrupt */
7395 ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0,
7396 count, &actual, flag);
7397
7398 if ((ret != DDI_SUCCESS) || (actual == 0)) {
7399 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7400 "avail = %d", avail));
7401 goto mrsas_free_htable;
7402 }
7403
7404 if (actual < count) {
7405 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7406 "Requested = %d Received = %d", count, actual));
7407 }
7408 instance->intr_cnt = actual;
7667
7668 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d",
7669 tgt, lun));
7670
7671 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
7672 if (ldip) {
7673 *ldip = child;
7674 }
7675 if (instance->mr_ld_list[tgt].flag != MRDRV_TGT_VALID) {
7676 rval = mrsas_service_evt(instance, tgt, 0,
7677 MRSAS_EVT_UNCONFIG_TGT, NULL);
7678 con_log(CL_ANN1, (CE_WARN,
7679 "mr_sas: DELETING STALE ENTRY rval = %d "
7680 "tgt id = %d ", rval, tgt));
7681 return (NDI_FAILURE);
7682 }
7683 return (NDI_SUCCESS);
7684 }
7685
7686 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
7687 if (sd == NULL) {
7688 con_log(CL_ANN1, (CE_WARN, "mrsas_config_ld: "
7689 "failed to allocate mem for scsi_device"));
7690 return (NDI_FAILURE);
7691 }
7692 sd->sd_address.a_hba_tran = instance->tran;
7693 sd->sd_address.a_target = (uint16_t)tgt;
7694 sd->sd_address.a_lun = (uint8_t)lun;
7695
7696 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS)
7697 rval = mrsas_config_scsi_device(instance, sd, ldip);
7698 else
7699 rval = NDI_FAILURE;
7700
7701 /* sd_unprobe is blank now. Free buffer manually */
7702 if (sd->sd_inq) {
7703 kmem_free(sd->sd_inq, SUN_INQSIZE);
7704 sd->sd_inq = (struct scsi_inquiry *)NULL;
7705 }
7706
7707 kmem_free(sd, sizeof (struct scsi_device));
7708 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: return rval = %d",
7709 rval));
7710 return (rval);
7711 }
|
719 if (strncmp(data, "no", 3) == 0) {
720 ctio_enable = 0;
721 con_log(CL_ANN1, (CE_WARN,
722 "ctio_enable = %d disabled", ctio_enable));
723 }
724 ddi_prop_free(data);
725 }
726
727 con_log(CL_DLEVEL1, (CE_WARN, "ctio_enable = %d", ctio_enable));
728
729 /* setup the mfi based low level driver */
730 if (mrsas_init_adapter(instance) != DDI_SUCCESS) {
731 cmn_err(CE_WARN, "mr_sas: "
732 "could not initialize the low level driver");
733
734 goto fail_attach;
735 }
736
737 /* Initialize all Mutex */
738 INIT_LIST_HEAD(&instance->completed_pool_list);
739 mutex_init(&instance->completed_pool_mtx, NULL,
740 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
741
742 mutex_init(&instance->sync_map_mtx, NULL,
743 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
744
745 mutex_init(&instance->app_cmd_pool_mtx, NULL,
746 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
747
748 mutex_init(&instance->config_dev_mtx, NULL,
749 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
750
751 mutex_init(&instance->cmd_pend_mtx, NULL,
752 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
753
754 mutex_init(&instance->ocr_flags_mtx, NULL,
755 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
756
757 mutex_init(&instance->int_cmd_mtx, NULL,
758 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
759 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
760
761 mutex_init(&instance->cmd_pool_mtx, NULL,
762 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
763
764 mutex_init(&instance->reg_write_mtx, NULL,
765 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
766
767 if (instance->tbolt) {
768 mutex_init(&instance->cmd_app_pool_mtx, NULL,
769 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
770
771 mutex_init(&instance->chip_mtx, NULL,
772 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
773
774 }
775
776 instance->unroll.mutexs = 1;
777
778 instance->timeout_id = (timeout_id_t)-1;
779
780 /* Register our soft-isr for highlevel interrupts. */
781 instance->isr_level = instance->intr_pri;
782 if (!(instance->tbolt)) {
783 if (instance->isr_level == HIGH_LEVEL_INTR) {
784 if (ddi_add_softintr(dip,
785 DDI_SOFTINT_HIGH,
786 &instance->soft_intr_id, NULL, NULL,
787 mrsas_softintr, (caddr_t)instance) !=
788 DDI_SUCCESS) {
789 cmn_err(CE_WARN,
790 "Software ISR did not register");
791
792 goto fail_attach;
920 instance->unroll.aenPend = 1;
921 con_log(CL_ANN1,
922 (CE_CONT, "AEN started for instance %d.", instance_no));
923
924 /* Finally! We are on the air. */
925 ddi_report_dev(dip);
926
927 /* FMA handle checking. */
928 if (mrsas_check_acc_handle(instance->regmap_handle) !=
929 DDI_SUCCESS) {
930 goto fail_attach;
931 }
932 if (mrsas_check_acc_handle(instance->pci_handle) !=
933 DDI_SUCCESS) {
934 goto fail_attach;
935 }
936
937 instance->mr_ld_list =
938 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld),
939 KM_SLEEP);
940 instance->unroll.ldlist_buff = 1;
941
942 #ifdef PDSUPPORT
943 if (instance->tbolt) {
944 instance->mr_tbolt_pd_max = MRSAS_TBOLT_PD_TGT_MAX;
945 instance->mr_tbolt_pd_list =
946 kmem_zalloc(MRSAS_TBOLT_GET_PD_MAX(instance) *
947 sizeof (struct mrsas_tbolt_pd), KM_SLEEP);
948 ASSERT(instance->mr_tbolt_pd_list);
949 for (i = 0; i < instance->mr_tbolt_pd_max; i++) {
950 instance->mr_tbolt_pd_list[i].lun_type =
951 MRSAS_TBOLT_PD_LUN;
952 instance->mr_tbolt_pd_list[i].dev_id =
953 (uint8_t)i;
954 }
955
956 instance->unroll.pdlist_buff = 1;
957 }
958 #endif
959 break;
1418 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1419 int *rvalp)
1420 {
1421 int rval = 0;
1422
1423 struct mrsas_instance *instance;
1424 struct mrsas_ioctl *ioctl;
1425 struct mrsas_aen aen;
1426 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1427
1428 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev)));
1429
1430 if (instance == NULL) {
1431 /* invalid minor number */
1432 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found."));
1433 return (ENXIO);
1434 }
1435
1436 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl),
1437 KM_SLEEP);
1438 ASSERT(ioctl);
1439
1440 switch ((uint_t)cmd) {
1441 case MRSAS_IOCTL_FIRMWARE:
1442 if (ddi_copyin((void *)arg, ioctl,
1443 sizeof (struct mrsas_ioctl), mode)) {
1444 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: "
1445 "ERROR IOCTL copyin"));
1446 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1447 return (EFAULT);
1448 }
1449
1450 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) {
1451 rval = handle_drv_ioctl(instance, ioctl, mode);
1452 } else {
1453 rval = handle_mfi_ioctl(instance, ioctl, mode);
1454 }
1455
1456 if (ddi_copyout((void *)ioctl, (void *)arg,
1457 (sizeof (struct mrsas_ioctl) - 1), mode)) {
1458 con_log(CL_ANN, (CE_WARN,
1459 "mrsas_ioctl: copy_to_user failed"));
1460 rval = 1;
1461 }
1462
1463 break;
1464 case MRSAS_IOCTL_AEN:
1465 if (ddi_copyin((void *) arg, &aen,
1466 sizeof (struct mrsas_aen), mode)) {
1467 con_log(CL_ANN, (CE_WARN,
1468 "mrsas_ioctl: ERROR AEN copyin"));
1469 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1470 return (EFAULT);
1471 }
1472
1473 rval = handle_mfi_aen(instance, &aen);
1474
1475 if (ddi_copyout((void *) &aen, (void *)arg,
1476 sizeof (struct mrsas_aen), mode)) {
1477 con_log(CL_ANN, (CE_WARN,
1478 "mrsas_ioctl: copy_to_user failed"));
1479 rval = 1;
1480 }
1481
1482 break;
1483 default:
1484 rval = scsi_hba_ioctl(dev, cmd, arg,
3164 int count;
3165 uint32_t max_cmd;
3166 uint32_t reserve_cmd;
3167 size_t sz;
3168
3169 struct mrsas_cmd *cmd;
3170
3171 max_cmd = instance->max_fw_cmds;
3172 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
3173 "max_cmd %x", max_cmd));
3174
3175
3176 sz = sizeof (struct mrsas_cmd *) * max_cmd;
3177
3178 /*
3179 * instance->cmd_list is an array of struct mrsas_cmd pointers.
3180 * Allocate the dynamic array first and then allocate individual
3181 * commands.
3182 */
3183 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
3184 ASSERT(instance->cmd_list);
3185
3186 /* create a frame pool and assign one frame to each cmd */
3187 for (count = 0; count < max_cmd; count++) {
3188 instance->cmd_list[count] =
3189 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
3190 ASSERT(instance->cmd_list[count]);
3191 }
3192
3193 /* add all the commands to command pool */
3194
3195 INIT_LIST_HEAD(&instance->cmd_pool_list);
3196 INIT_LIST_HEAD(&instance->cmd_pend_list);
3197 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3198
3199 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
3200
3201 for (i = 0; i < reserve_cmd; i++) {
3202 cmd = instance->cmd_list[i];
3203 cmd->index = i;
3204 mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list);
3205 }
3206
3207
3208 for (i = reserve_cmd; i < max_cmd; i++) {
3209 cmd = instance->cmd_list[i];
3210 cmd->index = i;
3211 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
3687 /* Initialize adapter */
3688 if (instance->func_ptr->init_adapter(instance) != DDI_SUCCESS) {
3689 con_log(CL_ANN,
3690 (CE_WARN, "mr_sas: could not initialize adapter"));
3691 return (DDI_FAILURE);
3692 }
3693
3694 /* gather misc FW related information */
3695 instance->disable_online_ctrl_reset = 0;
3696
3697 if (!get_ctrl_info(instance, &ctrl_info)) {
3698 instance->max_sectors_per_req = ctrl_info.max_request_size;
3699 con_log(CL_ANN1, (CE_NOTE,
3700 "product name %s ld present %d",
3701 ctrl_info.product_name, ctrl_info.ld_present_count));
3702 } else {
3703 instance->max_sectors_per_req = instance->max_num_sge *
3704 PAGESIZE / 512;
3705 }
3706
3707 if (ctrl_info.properties.on_off_properties & DISABLE_OCR_PROP_FLAG)
3708 instance->disable_online_ctrl_reset = 1;
3709
3710 return (DDI_SUCCESS);
3711
3712 }
3713
3714
3715
3716 static int
3717 mrsas_issue_init_mfi(struct mrsas_instance *instance)
3718 {
3719 struct mrsas_cmd *cmd;
3720 struct mrsas_init_frame *init_frame;
3721 struct mrsas_init_queue_info *initq_info;
3722
3723 /*
3724 * Prepare a init frame. Note the init frame points to queue info
3725 * structure. Each frame has SGL allocated after first 64 bytes. For
3726 * this frame - since we don't need any SGL - we use SGL's space as
3727 * queue info structure
3728 */
3971 }
3972 };
3973
3974 if (!instance->tbolt) {
3975 fw_ctrl = RD_IB_DOORBELL(instance);
3976 con_log(CL_ANN1, (CE_CONT,
3977 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
3978
3979 /*
3980 * Write 0xF to the doorbell register to do the following.
3981 * - Abort all outstanding commands (bit 0).
3982 * - Transition from OPERATIONAL to READY state (bit 1).
3983 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
3984 * - Set to release FW to continue running (i.e. BIOS handshake
3985 * (bit 3).
3986 */
3987 WR_IB_DOORBELL(0xF, instance);
3988 }
3989
3990 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
3991 return (EIO);
3992 }
3993
3994 return (DDI_SUCCESS);
3995 }
3996
3997 /*
3998 * get_seq_num
3999 */
4000 static int
4001 get_seq_num(struct mrsas_instance *instance,
4002 struct mrsas_evt_log_info *eli)
4003 {
4004 int ret = DDI_SUCCESS;
4005
4006 dma_obj_t dcmd_dma_obj;
4007 struct mrsas_cmd *cmd;
4008 struct mrsas_dcmd_frame *dcmd;
4009 struct mrsas_evt_log_info *eli_tmp;
4010 if (instance->tbolt) {
4011 cmd = get_raid_msg_mfi_pkt(instance);
4829
4830 if (mrsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) {
4831 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4832 return (-1);
4833 }
4834
4835 if (mrsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) {
4836 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4837 return (-1);
4838 }
4839
4840 return (cookie_cnt);
4841 }
4842
4843 /*
4844 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t)
4845 *
4846 * De-allocate the memory and other resources for an dma object, which must
4847 * have been alloated by a previous call to mrsas_alloc_dma_obj()
4848 */
4849 int
4850 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj)
4851 {
4852
4853 if ((obj.dma_handle == NULL) || (obj.acc_handle == NULL)) {
4854 return (DDI_SUCCESS);
4855 }
4856
4857 /*
4858 * NOTE: These check-handle functions fail if *_handle == NULL, but
4859 * this function succeeds because of the previous check.
4860 */
4861 if (mrsas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) {
4862 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4863 return (DDI_FAILURE);
4864 }
4865
4866 if (mrsas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) {
4867 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4868 return (DDI_FAILURE);
5542 DTRACE_PROBE3(issue_pthru, uint8_t, kpthru->cmd, uint8_t,
5543 kpthru->cmd_status, uint8_t, kpthru->scsi_status);
5544
5545 if (kpthru->sense_len) {
5546 uint_t sense_len = SENSE_LENGTH;
5547 void *sense_ubuf =
5548 (void *)(ulong_t)kpthru->sense_buf_phys_addr_lo;
5549 if (kpthru->sense_len <= SENSE_LENGTH) {
5550 sense_len = kpthru->sense_len;
5551 }
5552
5553 for (i = 0; i < sense_len; i++) {
5554 if (ddi_copyout(
5555 (uint8_t *)cmd->sense+i,
5556 (uint8_t *)sense_ubuf+i, 1, mode)) {
5557 con_log(CL_ANN, (CE_WARN,
5558 "issue_mfi_pthru : "
5559 "copy to user space failed"));
5560 }
5561 con_log(CL_DLEVEL1, (CE_WARN,
5562 "Copying Sense info sense_buff[%d] = 0x%X",
5563 i, *((uint8_t *)cmd->sense + i)));
5564 }
5565 }
5566 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
5567 DDI_DMA_SYNC_FORDEV);
5568
5569 if (xferlen) {
5570 /* free kernel buffer */
5571 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
5572 return (DDI_FAILURE);
5573 }
5574
5575 return (DDI_SUCCESS);
5576 }
5577
5578 /*
5579 * issue_mfi_dcmd
5580 */
5581 static int
5582 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5583 struct mrsas_cmd *cmd, int mode)
5584 {
5585 void *ubuf;
5586 uint32_t kphys_addr = 0;
5587 uint32_t xferlen = 0;
5588 uint32_t new_xfer_length = 0;
5589 uint32_t model;
5590 dma_obj_t dcmd_dma_obj;
5591 struct mrsas_dcmd_frame *kdcmd;
5592 struct mrsas_dcmd_frame *dcmd;
5593 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5594 int i;
5595 dcmd = &cmd->frame->dcmd;
5596 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
5597
5598 if (instance->adapterresetinprogress) {
5599 con_log(CL_ANN1, (CE_NOTE, "Reset flag set, "
5600 "returning mfi_pkt and setting TRAN_BUSY"));
5601 return (DDI_FAILURE);
5602 }
5603 model = ddi_model_convert_from(mode & FMODELS);
5604 if (model == DDI_MODEL_ILP32) {
5605 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5606
5607 xferlen = kdcmd->sgl.sge32[0].length;
5608
5609 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5610 } else {
5611 #ifdef _ILP32
5612 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5613 xferlen = kdcmd->sgl.sge32[0].length;
5614 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5615 #else
5616 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_LP64"));
5617 xferlen = kdcmd->sgl.sge64[0].length;
5618 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
5619 #endif
5620 }
6991 "flag set, time %llx", gethrtime()));
6992
6993 instance->func_ptr->disable_intr(instance);
6994 retry_reset:
6995 WR_IB_WRITE_SEQ(0, instance);
6996 WR_IB_WRITE_SEQ(4, instance);
6997 WR_IB_WRITE_SEQ(0xb, instance);
6998 WR_IB_WRITE_SEQ(2, instance);
6999 WR_IB_WRITE_SEQ(7, instance);
7000 WR_IB_WRITE_SEQ(0xd, instance);
7001 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: magic number written "
7002 "to write sequence register\n"));
7003 delay(100 * drv_usectohz(MILLISEC));
7004 status = RD_OB_DRWE(instance);
7005
7006 while (!(status & DIAG_WRITE_ENABLE)) {
7007 delay(100 * drv_usectohz(MILLISEC));
7008 status = RD_OB_DRWE(instance);
7009 if (retry++ == 100) {
7010 cmn_err(CE_WARN, "mrsas_reset_ppc: DRWE bit "
7011 "check retry count %d", retry);
7012 return (DDI_FAILURE);
7013 }
7014 }
7015 WR_IB_DRWE(status | DIAG_RESET_ADAPTER, instance);
7016 delay(100 * drv_usectohz(MILLISEC));
7017 status = RD_OB_DRWE(instance);
7018 while (status & DIAG_RESET_ADAPTER) {
7019 delay(100 * drv_usectohz(MILLISEC));
7020 status = RD_OB_DRWE(instance);
7021 if (retry++ == 100) {
7022 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7023 "RESET FAILED. KILL adapter called.");
7024
7025 (void) mrsas_kill_adapter(instance);
7026 return (DDI_FAILURE);
7027 }
7028 }
7029 con_log(CL_ANN, (CE_NOTE, "mrsas_reset_ppc: Adapter reset complete"));
7030 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7031 "Calling mfi_state_transition_to_ready"));
7032
7033 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
7034 if (mfi_state_transition_to_ready(instance) ||
7035 debug_fw_faults_after_ocr_g == 1) {
7036 cur_abs_reg_val =
7037 instance->func_ptr->read_fw_status_reg(instance);
7038 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
7039
7040 #ifdef OCRDEBUG
7041 con_log(CL_ANN1, (CE_NOTE,
7042 "mrsas_reset_ppc :before fake: FW is not ready "
7043 "FW state = 0x%x", fw_state));
7330 if ((ret != DDI_SUCCESS) || (avail == 0)) {
7331 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:"
7332 "ret %d avail %d", ret, avail));
7333
7334 return (DDI_FAILURE);
7335 }
7336 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: avail = %d ", avail));
7337
7338 /* Only one interrupt routine. So limit the count to 1 */
7339 if (count > 1) {
7340 count = 1;
7341 }
7342
7343 /*
7344 * Allocate an array of interrupt handlers. Currently we support
7345 * only one interrupt. The framework can be extended later.
7346 */
7347 instance->intr_htable_size = count * sizeof (ddi_intr_handle_t);
7348 instance->intr_htable = kmem_zalloc(instance->intr_htable_size,
7349 KM_SLEEP);
7350 ASSERT(instance->intr_htable);
7351
7352 flag = ((intr_type == DDI_INTR_TYPE_MSI) ||
7353 (intr_type == DDI_INTR_TYPE_MSIX)) ?
7354 DDI_INTR_ALLOC_STRICT : DDI_INTR_ALLOC_NORMAL;
7355
7356 /* Allocate interrupt */
7357 ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0,
7358 count, &actual, flag);
7359
7360 if ((ret != DDI_SUCCESS) || (actual == 0)) {
7361 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7362 "avail = %d", avail));
7363 goto mrsas_free_htable;
7364 }
7365
7366 if (actual < count) {
7367 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7368 "Requested = %d Received = %d", count, actual));
7369 }
7370 instance->intr_cnt = actual;
7629
7630 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d",
7631 tgt, lun));
7632
7633 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
7634 if (ldip) {
7635 *ldip = child;
7636 }
7637 if (instance->mr_ld_list[tgt].flag != MRDRV_TGT_VALID) {
7638 rval = mrsas_service_evt(instance, tgt, 0,
7639 MRSAS_EVT_UNCONFIG_TGT, NULL);
7640 con_log(CL_ANN1, (CE_WARN,
7641 "mr_sas: DELETING STALE ENTRY rval = %d "
7642 "tgt id = %d ", rval, tgt));
7643 return (NDI_FAILURE);
7644 }
7645 return (NDI_SUCCESS);
7646 }
7647
7648 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
7649 sd->sd_address.a_hba_tran = instance->tran;
7650 sd->sd_address.a_target = (uint16_t)tgt;
7651 sd->sd_address.a_lun = (uint8_t)lun;
7652
7653 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS)
7654 rval = mrsas_config_scsi_device(instance, sd, ldip);
7655 else
7656 rval = NDI_FAILURE;
7657
7658 /* sd_unprobe is blank now. Free buffer manually */
7659 if (sd->sd_inq) {
7660 kmem_free(sd->sd_inq, SUN_INQSIZE);
7661 sd->sd_inq = (struct scsi_inquiry *)NULL;
7662 }
7663
7664 kmem_free(sd, sizeof (struct scsi_device));
7665 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: return rval = %d",
7666 rval));
7667 return (rval);
7668 }
|