27 * used to endorse or promote products derived from this software without
28 * specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
34 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
36 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
37 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
41 * DAMAGE.
42 */
43
44 /*
45 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
46 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
47 * Copyright 2012 Nexenta System, Inc. All rights reserved.
48 */
49
50 #include <sys/types.h>
51 #include <sys/param.h>
52 #include <sys/file.h>
53 #include <sys/errno.h>
54 #include <sys/open.h>
55 #include <sys/cred.h>
56 #include <sys/modctl.h>
57 #include <sys/conf.h>
58 #include <sys/devops.h>
59 #include <sys/cmn_err.h>
60 #include <sys/kmem.h>
61 #include <sys/stat.h>
62 #include <sys/mkdev.h>
63 #include <sys/pci.h>
64 #include <sys/scsi/scsi.h>
65 #include <sys/ddi.h>
66 #include <sys/sunddi.h>
67 #include <sys/atomic.h>
68 #include <sys/signal.h>
69 #include <sys/byteorder.h>
70 #include <sys/sdt.h>
71 #include <sys/fs/dv_node.h> /* devfs_clean */
72
73 #include "mr_sas.h"
74
75 /*
76 * FMA header files
77 */
78 #include <sys/ddifm.h>
79 #include <sys/fm/protocol.h>
80 #include <sys/fm/util.h>
81 #include <sys/fm/io/ddi.h>
82
83 /*
84 * Local static data
85 */
86 static void *mrsas_state = NULL;
87 static volatile boolean_t mrsas_relaxed_ordering = B_TRUE;
88 volatile int debug_level_g = CL_NONE;
89 static volatile int msi_enable = 1;
90 static volatile int ctio_enable = 1;
91
92 /* Default Timeout value to issue online controller reset */
93 volatile int debug_timeout_g = 0xF0; /* 0xB4; */
94 /* Simulate consecutive firmware fault */
95 static volatile int debug_fw_faults_after_ocr_g = 0;
96 #ifdef OCRDEBUG
97 /* Simulate three consecutive timeout for an IO */
98 static volatile int debug_consecutive_timeout_after_ocr_g = 0;
99 #endif
100
101 #pragma weak scsi_hba_open
102 #pragma weak scsi_hba_close
118 static int mrsas_tran_tgt_init(dev_info_t *, dev_info_t *,
119 scsi_hba_tran_t *, struct scsi_device *);
120 static struct scsi_pkt *mrsas_tran_init_pkt(struct scsi_address *, register
121 struct scsi_pkt *, struct buf *, int, int, int, int,
122 int (*)(), caddr_t);
123 static int mrsas_tran_start(struct scsi_address *,
124 register struct scsi_pkt *);
125 static int mrsas_tran_abort(struct scsi_address *, struct scsi_pkt *);
126 static int mrsas_tran_reset(struct scsi_address *, int);
127 static int mrsas_tran_getcap(struct scsi_address *, char *, int);
128 static int mrsas_tran_setcap(struct scsi_address *, char *, int, int);
129 static void mrsas_tran_destroy_pkt(struct scsi_address *,
130 struct scsi_pkt *);
131 static void mrsas_tran_dmafree(struct scsi_address *, struct scsi_pkt *);
132 static void mrsas_tran_sync_pkt(struct scsi_address *, struct scsi_pkt *);
133 static int mrsas_tran_quiesce(dev_info_t *dip);
134 static int mrsas_tran_unquiesce(dev_info_t *dip);
135 static uint_t mrsas_isr();
136 static uint_t mrsas_softintr();
137 static void mrsas_undo_resources(dev_info_t *, struct mrsas_instance *);
138 static struct mrsas_cmd *get_mfi_pkt(struct mrsas_instance *);
139 static void return_mfi_pkt(struct mrsas_instance *,
140 struct mrsas_cmd *);
141
142 static void free_space_for_mfi(struct mrsas_instance *);
143 static uint32_t read_fw_status_reg_ppc(struct mrsas_instance *);
144 static void issue_cmd_ppc(struct mrsas_cmd *, struct mrsas_instance *);
145 static int issue_cmd_in_poll_mode_ppc(struct mrsas_instance *,
146 struct mrsas_cmd *);
147 static int issue_cmd_in_sync_mode_ppc(struct mrsas_instance *,
148 struct mrsas_cmd *);
149 static void enable_intr_ppc(struct mrsas_instance *);
150 static void disable_intr_ppc(struct mrsas_instance *);
151 static int intr_ack_ppc(struct mrsas_instance *);
152 static void flush_cache(struct mrsas_instance *instance);
153 void display_scsi_inquiry(caddr_t);
154 static int start_mfi_aen(struct mrsas_instance *instance);
155 static int handle_drv_ioctl(struct mrsas_instance *instance,
156 struct mrsas_ioctl *ioctl, int mode);
157 static int handle_mfi_ioctl(struct mrsas_instance *instance,
158 struct mrsas_ioctl *ioctl, int mode);
159 static int handle_mfi_aen(struct mrsas_instance *instance,
160 struct mrsas_aen *aen);
558
559 con_log(CL_ANN, (CE_CONT, "mr_sas%d: "
560 "enable bus-mastering", instance_no));
561 } else {
562 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
563 "bus-mastering already set", instance_no));
564 }
565
566 /* initialize function pointers */
567 switch (device_id) {
568 case PCI_DEVICE_ID_LSI_TBOLT:
569 case PCI_DEVICE_ID_LSI_INVADER:
570 con_log(CL_ANN, (CE_NOTE,
571 "mr_sas: 2208 T.B. device detected"));
572
573 instance->func_ptr =
574 &mrsas_function_template_fusion;
575 instance->tbolt = 1;
576 break;
577
578 case PCI_DEVICE_ID_LSI_2108VDE:
579 case PCI_DEVICE_ID_LSI_2108V:
580 con_log(CL_ANN, (CE_NOTE,
581 "mr_sas: 2108 Liberator device detected"));
582
583 instance->func_ptr =
584 &mrsas_function_template_ppc;
585 break;
586
587 default:
588 cmn_err(CE_WARN,
589 "mr_sas: Invalid device detected");
590
591 pci_config_teardown(&instance->pci_handle);
592 ddi_soft_state_free(mrsas_state, instance_no);
593 return (DDI_FAILURE);
594 }
595
596 instance->baseaddress = pci_config_get32(
597 instance->pci_handle, PCI_CONF_BASE0);
798 }
799
800 instance->softint_running = 0;
801
802 /* Allocate a transport structure */
803 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
804
805 if (tran == NULL) {
806 cmn_err(CE_WARN,
807 "scsi_hba_tran_alloc failed");
808 goto fail_attach;
809 }
810
811 instance->tran = tran;
812 instance->unroll.tran = 1;
813
814 tran->tran_hba_private = instance;
815 tran->tran_tgt_init = mrsas_tran_tgt_init;
816 tran->tran_tgt_probe = scsi_hba_probe;
817 tran->tran_tgt_free = mrsas_tran_tgt_free;
818 if (instance->tbolt) {
819 tran->tran_init_pkt =
820 mrsas_tbolt_tran_init_pkt;
821 tran->tran_start =
822 mrsas_tbolt_tran_start;
823 } else {
824 tran->tran_init_pkt = mrsas_tran_init_pkt;
825 tran->tran_start = mrsas_tran_start;
826 }
827 tran->tran_abort = mrsas_tran_abort;
828 tran->tran_reset = mrsas_tran_reset;
829 tran->tran_getcap = mrsas_tran_getcap;
830 tran->tran_setcap = mrsas_tran_setcap;
831 tran->tran_destroy_pkt = mrsas_tran_destroy_pkt;
832 tran->tran_dmafree = mrsas_tran_dmafree;
833 tran->tran_sync_pkt = mrsas_tran_sync_pkt;
834 tran->tran_quiesce = mrsas_tran_quiesce;
835 tran->tran_unquiesce = mrsas_tran_unquiesce;
836 tran->tran_bus_config = mrsas_tran_bus_config;
837
838 if (mrsas_relaxed_ordering)
839 mrsas_generic_dma_attr.dma_attr_flags |=
840 DDI_DMA_RELAXED_ORDERING;
841
842
843 tran_dma_attr = mrsas_generic_dma_attr;
844 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
845
846 /* Attach this instance of the hba */
923
924 /* Finally! We are on the air. */
925 ddi_report_dev(dip);
926
927 /* FMA handle checking. */
928 if (mrsas_check_acc_handle(instance->regmap_handle) !=
929 DDI_SUCCESS) {
930 goto fail_attach;
931 }
932 if (mrsas_check_acc_handle(instance->pci_handle) !=
933 DDI_SUCCESS) {
934 goto fail_attach;
935 }
936
937 instance->mr_ld_list =
938 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld),
939 KM_SLEEP);
940 instance->unroll.ldlist_buff = 1;
941
942 #ifdef PDSUPPORT
943 if (instance->tbolt) {
944 instance->mr_tbolt_pd_max = MRSAS_TBOLT_PD_TGT_MAX;
945 instance->mr_tbolt_pd_list =
946 kmem_zalloc(MRSAS_TBOLT_GET_PD_MAX(instance) *
947 sizeof (struct mrsas_tbolt_pd), KM_SLEEP);
948 ASSERT(instance->mr_tbolt_pd_list);
949 for (i = 0; i < instance->mr_tbolt_pd_max; i++) {
950 instance->mr_tbolt_pd_list[i].lun_type =
951 MRSAS_TBOLT_PD_LUN;
952 instance->mr_tbolt_pd_list[i].dev_id =
953 (uint8_t)i;
954 }
955
956 instance->unroll.pdlist_buff = 1;
957 }
958 #endif
959 break;
960 case DDI_PM_RESUME:
961 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_PM_RESUME"));
962 break;
963 case DDI_RESUME:
1646 "DDI_FAILURE t = %d l = %d", tgt, lun));
1647 return (DDI_FAILURE);
1648
1649 }
1650
1651 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p",
1652 (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip));
1653
1654 if (tgt < MRDRV_MAX_LD && lun == 0) {
1655 if (instance->mr_ld_list[tgt].dip == NULL &&
1656 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) {
1657 mutex_enter(&instance->config_dev_mtx);
1658 instance->mr_ld_list[tgt].dip = tgt_dip;
1659 instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN;
1660 instance->mr_ld_list[tgt].flag = MRDRV_TGT_VALID;
1661 mutex_exit(&instance->config_dev_mtx);
1662 }
1663 }
1664
1665 #ifdef PDSUPPORT
1666 else if (instance->tbolt) {
1667 if (instance->mr_tbolt_pd_list[tgt].dip == NULL) {
1668 mutex_enter(&instance->config_dev_mtx);
1669 instance->mr_tbolt_pd_list[tgt].dip = tgt_dip;
1670 instance->mr_tbolt_pd_list[tgt].flag =
1671 MRDRV_TGT_VALID;
1672 mutex_exit(&instance->config_dev_mtx);
1673 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_tgt_init:"
1674 "t%xl%x", tgt, lun));
1675 }
1676 }
1677 #endif
1678
1679 return (DDI_SUCCESS);
1680 }
1681
1682 /*ARGSUSED*/
1683 static void
1684 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1685 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1686 {
1687 struct mrsas_instance *instance;
1688 int tgt = sd->sd_address.a_target;
1689 int lun = sd->sd_address.a_lun;
1690
1691 instance = ADDR2MR(&sd->sd_address);
1692
1693 con_log(CL_DLEVEL2, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun));
1694
1695 if (tgt < MRDRV_MAX_LD && lun == 0) {
1696 if (instance->mr_ld_list[tgt].dip == tgt_dip) {
1697 mutex_enter(&instance->config_dev_mtx);
1698 instance->mr_ld_list[tgt].dip = NULL;
1699 mutex_exit(&instance->config_dev_mtx);
1700 }
1701 }
1702
1703 #ifdef PDSUPPORT
1704 else if (instance->tbolt) {
1705 mutex_enter(&instance->config_dev_mtx);
1706 instance->mr_tbolt_pd_list[tgt].dip = NULL;
1707 mutex_exit(&instance->config_dev_mtx);
1708 con_log(CL_ANN1, (CE_NOTE, "tgt_free: Setting dip = NULL"
1709 "for tgt:%x", tgt));
1710 }
1711 #endif
1712
1713 }
1714
1715 dev_info_t *
1716 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun)
1717 {
1718 dev_info_t *child = NULL;
1719 char addr[SCSI_MAXNAMELEN];
1720 char tmp[MAXNAMELEN];
1721
1722 (void) sprintf(addr, "%x,%x", tgt, lun);
1723 for (child = ddi_get_child(instance->dip); child;
1724 child = ddi_get_next_sibling(child)) {
1921 pkt->pkt_scbp[0] = STATUS_GOOD;
1922 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1923 | STATE_SENT_CMD;
1924 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1925 (*pkt->pkt_comp)(pkt);
1926 }
1927
1928 return (TRAN_ACCEPT);
1929 }
1930
1931 if (cmd == NULL) {
1932 return (TRAN_BUSY);
1933 }
1934
1935 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1936 if (instance->fw_outstanding > instance->max_fw_cmds) {
1937 con_log(CL_ANN, (CE_CONT, "mr_sas:Firmware busy"));
1938 DTRACE_PROBE2(start_tran_err,
1939 uint16_t, instance->fw_outstanding,
1940 uint16_t, instance->max_fw_cmds);
1941 return_mfi_pkt(instance, cmd);
1942 return (TRAN_BUSY);
1943 }
1944
1945 /* Synchronize the Cmd frame for the controller */
1946 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1947 DDI_DMA_SYNC_FORDEV);
1948 con_log(CL_ANN, (CE_CONT, "issue_cmd_ppc: SCSI CDB[0]=0x%x"
1949 "cmd->index:%x\n", pkt->pkt_cdbp[0], cmd->index));
1950 instance->func_ptr->issue_cmd(cmd, instance);
1951
1952 } else {
1953 struct mrsas_header *hdr = &cmd->frame->hdr;
1954
1955 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1956
1957 pkt->pkt_reason = CMD_CMPLT;
1958 pkt->pkt_statistics = 0;
1959 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1960
1961 switch (ddi_get8(cmd->frame_dma_obj.acc_handle,
1970 pkt->pkt_reason = CMD_CMPLT;
1971 pkt->pkt_statistics = 0;
1972
1973 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
1974 break;
1975
1976 case MFI_STAT_DEVICE_NOT_FOUND:
1977 con_log(CL_ANN, (CE_CONT,
1978 "mrsas_tran_start: device not found error"));
1979 pkt->pkt_reason = CMD_DEV_GONE;
1980 pkt->pkt_statistics = STAT_DISCON;
1981 break;
1982
1983 default:
1984 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
1985 }
1986
1987 (void) mrsas_common_check(instance, cmd);
1988 DTRACE_PROBE2(start_nointr_done, uint8_t, hdr->cmd,
1989 uint8_t, hdr->cmd_status);
1990 return_mfi_pkt(instance, cmd);
1991
1992 if (pkt->pkt_comp) {
1993 (*pkt->pkt_comp)(pkt);
1994 }
1995
1996 }
1997
1998 return (TRAN_ACCEPT);
1999 }
2000
2001 /*
2002 * tran_abort - Abort any commands that are currently in transport
2003 * @ap:
2004 * @pkt:
2005 *
2006 * The tran_abort() entry point for a SCSI HBA driver is called to abort any
2007 * commands that are currently in transport for a particular target. This entry
2008 * point is called when a target driver calls scsi_abort(). The tran_abort()
2009 * entry point should attempt to abort the command denoted by the pkt
2010 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
2444 }
2445
2446
2447 /*
2448 * ************************************************************************** *
2449 * *
2450 * libraries *
2451 * *
2452 * ************************************************************************** *
2453 */
2454 /*
2455 * get_mfi_pkt : Get a command from the free pool
2456 * After successful allocation, the caller of this routine
2457 * must clear the frame buffer (memset to zero) before
2458 * using the packet further.
2459 *
2460 * ***** Note *****
2461 * After clearing the frame buffer the context id of the
2462 * frame buffer SHOULD be restored back.
2463 */
2464 static struct mrsas_cmd *
2465 get_mfi_pkt(struct mrsas_instance *instance)
2466 {
2467 mlist_t *head = &instance->cmd_pool_list;
2468 struct mrsas_cmd *cmd = NULL;
2469
2470 mutex_enter(&instance->cmd_pool_mtx);
2471
2472 if (!mlist_empty(head)) {
2473 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2474 mlist_del_init(head->next);
2475 }
2476 if (cmd != NULL) {
2477 cmd->pkt = NULL;
2478 cmd->retry_count_for_ocr = 0;
2479 cmd->drv_pkt_time = 0;
2480
2481 }
2482 mutex_exit(&instance->cmd_pool_mtx);
2483
2484 return (cmd);
2485 }
2492
2493 mutex_enter(&instance->app_cmd_pool_mtx);
2494
2495 if (!mlist_empty(head)) {
2496 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2497 mlist_del_init(head->next);
2498 }
2499 if (cmd != NULL) {
2500 cmd->pkt = NULL;
2501 cmd->retry_count_for_ocr = 0;
2502 cmd->drv_pkt_time = 0;
2503 }
2504
2505 mutex_exit(&instance->app_cmd_pool_mtx);
2506
2507 return (cmd);
2508 }
2509 /*
2510 * return_mfi_pkt : Return a cmd to free command pool
2511 */
2512 static void
2513 return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2514 {
2515 mutex_enter(&instance->cmd_pool_mtx);
2516 /* use mlist_add_tail for debug assistance */
2517 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2518
2519 mutex_exit(&instance->cmd_pool_mtx);
2520 }
2521
2522 static void
2523 return_mfi_app_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2524 {
2525 mutex_enter(&instance->app_cmd_pool_mtx);
2526
2527 mlist_add(&cmd->list, &instance->app_cmd_pool_list);
2528
2529 mutex_exit(&instance->app_cmd_pool_mtx);
2530 }
2531 void
2532 push_pending_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2533 {
3145 * instance->cmd_list is an array of struct mrsas_cmd pointers.
3146 * Allocate the dynamic array first and then allocate individual
3147 * commands.
3148 */
3149 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
3150 ASSERT(instance->cmd_list);
3151
3152 /* create a frame pool and assign one frame to each cmd */
3153 for (count = 0; count < max_cmd; count++) {
3154 instance->cmd_list[count] =
3155 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
3156 ASSERT(instance->cmd_list[count]);
3157 }
3158
3159 /* add all the commands to command pool */
3160
3161 INIT_LIST_HEAD(&instance->cmd_pool_list);
3162 INIT_LIST_HEAD(&instance->cmd_pend_list);
3163 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3164
3165 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
3166
3167 for (i = 0; i < reserve_cmd; i++) {
3168 cmd = instance->cmd_list[i];
3169 cmd->index = i;
3170 mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list);
3171 }
3172
3173
3174 for (i = reserve_cmd; i < max_cmd; i++) {
3175 cmd = instance->cmd_list[i];
3176 cmd->index = i;
3177 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
3178 }
3179
3180 return (DDI_SUCCESS);
3181
3182 mrsas_undo_cmds:
3183 if (count > 0) {
3184 /* free each cmd */
3185 for (i = 0; i < count; i++) {
3259 }
3260
3261
3262
3263 /*
3264 * get_ctrl_info
3265 */
3266 static int
3267 get_ctrl_info(struct mrsas_instance *instance,
3268 struct mrsas_ctrl_info *ctrl_info)
3269 {
3270 int ret = 0;
3271
3272 struct mrsas_cmd *cmd;
3273 struct mrsas_dcmd_frame *dcmd;
3274 struct mrsas_ctrl_info *ci;
3275
3276 if (instance->tbolt) {
3277 cmd = get_raid_msg_mfi_pkt(instance);
3278 } else {
3279 cmd = get_mfi_pkt(instance);
3280 }
3281
3282 if (!cmd) {
3283 con_log(CL_ANN, (CE_WARN,
3284 "Failed to get a cmd for ctrl info"));
3285 DTRACE_PROBE2(info_mfi_err, uint16_t, instance->fw_outstanding,
3286 uint16_t, instance->max_fw_cmds);
3287 return (DDI_FAILURE);
3288 }
3289
3290 /* Clear the frame buffer and assign back the context id */
3291 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3292 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3293 cmd->index);
3294
3295 dcmd = &cmd->frame->dcmd;
3296
3297 ci = (struct mrsas_ctrl_info *)instance->internal_buf;
3298
3299 if (!ci) {
3300 cmn_err(CE_WARN,
3301 "Failed to alloc mem for ctrl info");
3302 return_mfi_pkt(instance, cmd);
3303 return (DDI_FAILURE);
3304 }
3305
3306 (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info));
3307
3308 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
3309 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3310
3311 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3312 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status,
3313 MFI_CMD_STATUS_POLL_MODE);
3314 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3315 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3316 MFI_FRAME_DIR_READ);
3317 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3318 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3319 sizeof (struct mrsas_ctrl_info));
3320 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3321 MR_DCMD_CTRL_GET_INFO);
3322 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3341
3342 ctrl_info->properties.on_off_properties = ddi_get32(
3343 cmd->frame_dma_obj.acc_handle,
3344 &ci->properties.on_off_properties);
3345 ddi_rep_get8(cmd->frame_dma_obj.acc_handle,
3346 (uint8_t *)(ctrl_info->product_name),
3347 (uint8_t *)(ci->product_name), 80 * sizeof (char),
3348 DDI_DEV_AUTOINCR);
3349 /* should get more members of ci with ddi_get when needed */
3350 } else {
3351 cmn_err(CE_WARN, "get_ctrl_info: Ctrl info failed");
3352 ret = -1;
3353 }
3354
3355 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3356 ret = -1;
3357 }
3358 if (instance->tbolt) {
3359 return_raid_msg_mfi_pkt(instance, cmd);
3360 } else {
3361 return_mfi_pkt(instance, cmd);
3362 }
3363
3364 return (ret);
3365 }
3366
3367 /*
3368 * abort_aen_cmd
3369 */
3370 static int
3371 abort_aen_cmd(struct mrsas_instance *instance,
3372 struct mrsas_cmd *cmd_to_abort)
3373 {
3374 int ret = 0;
3375
3376 struct mrsas_cmd *cmd;
3377 struct mrsas_abort_frame *abort_fr;
3378
3379 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_aen:%d", __LINE__));
3380
3381 if (instance->tbolt) {
3382 cmd = get_raid_msg_mfi_pkt(instance);
3383 } else {
3384 cmd = get_mfi_pkt(instance);
3385 }
3386
3387 if (!cmd) {
3388 con_log(CL_ANN1, (CE_WARN,
3389 "abort_aen_cmd():Failed to get a cmd for abort_aen_cmd"));
3390 DTRACE_PROBE2(abort_mfi_err, uint16_t, instance->fw_outstanding,
3391 uint16_t, instance->max_fw_cmds);
3392 return (DDI_FAILURE);
3393 }
3394
3395 /* Clear the frame buffer and assign back the context id */
3396 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3397 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3398 cmd->index);
3399
3400 abort_fr = &cmd->frame->abort;
3401
3402 /* prepare and issue the abort frame */
3403 ddi_put8(cmd->frame_dma_obj.acc_handle,
3404 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3417 cmd->frame_count = 1;
3418
3419 if (instance->tbolt) {
3420 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3421 }
3422
3423 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3424 con_log(CL_ANN1, (CE_WARN,
3425 "abort_aen_cmd: issue_cmd_in_poll_mode failed"));
3426 ret = -1;
3427 } else {
3428 ret = 0;
3429 }
3430
3431 instance->aen_cmd->abort_aen = 1;
3432 instance->aen_cmd = 0;
3433
3434 if (instance->tbolt) {
3435 return_raid_msg_mfi_pkt(instance, cmd);
3436 } else {
3437 return_mfi_pkt(instance, cmd);
3438 }
3439
3440 atomic_add_16(&instance->fw_outstanding, (-1));
3441
3442 return (ret);
3443 }
3444
3445
3446 static int
3447 mrsas_build_init_cmd(struct mrsas_instance *instance,
3448 struct mrsas_cmd **cmd_ptr)
3449 {
3450 struct mrsas_cmd *cmd;
3451 struct mrsas_init_frame *init_frame;
3452 struct mrsas_init_queue_info *initq_info;
3453 struct mrsas_drv_ver drv_ver_info;
3454
3455
3456 /*
3457 * Prepare a init frame. Note the init frame points to queue info
3552
3553 /*
3554 * mrsas_init_adapter_ppc - Initialize MFI interface adapter.
3555 */
3556 int
3557 mrsas_init_adapter_ppc(struct mrsas_instance *instance)
3558 {
3559 struct mrsas_cmd *cmd;
3560
3561 /*
3562 * allocate memory for mfi adapter(cmd pool, individual commands, mfi
3563 * frames etc
3564 */
3565 if (alloc_space_for_mfi(instance) != DDI_SUCCESS) {
3566 con_log(CL_ANN, (CE_NOTE,
3567 "Error, failed to allocate memory for MFI adapter"));
3568 return (DDI_FAILURE);
3569 }
3570
3571 /* Build INIT command */
3572 cmd = get_mfi_pkt(instance);
3573
3574 if (mrsas_build_init_cmd(instance, &cmd) != DDI_SUCCESS) {
3575 con_log(CL_ANN,
3576 (CE_NOTE, "Error, failed to build INIT command"));
3577
3578 goto fail_undo_alloc_mfi_space;
3579 }
3580
3581 /*
3582 * Disable interrupt before sending init frame ( see linux driver code)
3583 * send INIT MFI frame in polled mode
3584 */
3585 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3586 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
3587 goto fail_fw_init;
3588 }
3589
3590 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
3591 goto fail_fw_init;
3592 return_mfi_pkt(instance, cmd);
3593
3594 if (ctio_enable &&
3595 (instance->func_ptr->read_fw_status_reg(instance) & 0x04000000)) {
3596 con_log(CL_ANN, (CE_NOTE, "mr_sas: IEEE SGL's supported"));
3597 instance->flag_ieee = 1;
3598 } else {
3599 instance->flag_ieee = 0;
3600 }
3601
3602 instance->unroll.alloc_space_mfi = 1;
3603 instance->unroll.verBuff = 1;
3604
3605 return (DDI_SUCCESS);
3606
3607
3608 fail_fw_init:
3609 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
3610
3611 fail_undo_alloc_mfi_space:
3612 return_mfi_pkt(instance, cmd);
3613 free_space_for_mfi(instance);
3614
3615 return (DDI_FAILURE);
3616
3617 }
3618
3619 /*
3620 * mrsas_init_adapter - Initialize adapter.
3621 */
3622 int
3623 mrsas_init_adapter(struct mrsas_instance *instance)
3624 {
3625 struct mrsas_ctrl_info ctrl_info;
3626
3627
3628 /* we expect the FW state to be READY */
3629 if (mfi_state_transition_to_ready(instance)) {
3630 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready"));
3631 return (DDI_FAILURE);
3632 }
3744 &init_frame->queue_info_new_phys_addr_lo,
3745 cmd->frame_phys_addr + 64);
3746 ddi_put32(cmd->frame_dma_obj.acc_handle,
3747 &init_frame->queue_info_new_phys_addr_hi, 0);
3748
3749 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3750 sizeof (struct mrsas_init_queue_info));
3751
3752 cmd->frame_count = 1;
3753
3754 /* issue the init frame in polled mode */
3755 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3756 con_log(CL_ANN1, (CE_WARN,
3757 "mrsas_issue_init_mfi():failed to "
3758 "init firmware"));
3759 return_mfi_app_pkt(instance, cmd);
3760 return (DDI_FAILURE);
3761 }
3762
3763 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3764 return_mfi_pkt(instance, cmd);
3765 return (DDI_FAILURE);
3766 }
3767
3768 return_mfi_app_pkt(instance, cmd);
3769 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_init_mfi: Done"));
3770
3771 return (DDI_SUCCESS);
3772 }
3773 /*
3774 * mfi_state_transition_to_ready : Move the FW to READY state
3775 *
3776 * @reg_set : MFI register set
3777 */
3778 int
3779 mfi_state_transition_to_ready(struct mrsas_instance *instance)
3780 {
3781 int i;
3782 uint8_t max_wait;
3783 uint32_t fw_ctrl = 0;
3784 uint32_t fw_state;
3797 while (fw_state != MFI_STATE_READY) {
3798 con_log(CL_ANN, (CE_CONT,
3799 "mfi_state_transition_to_ready:FW state%x", fw_state));
3800
3801 switch (fw_state) {
3802 case MFI_STATE_FAULT:
3803 con_log(CL_ANN, (CE_NOTE,
3804 "mr_sas: FW in FAULT state!!"));
3805
3806 return (ENODEV);
3807 case MFI_STATE_WAIT_HANDSHAKE:
3808 /* set the CLR bit in IMR0 */
3809 con_log(CL_ANN1, (CE_NOTE,
3810 "mr_sas: FW waiting for HANDSHAKE"));
3811 /*
3812 * PCI_Hot Plug: MFI F/W requires
3813 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3814 * to be set
3815 */
3816 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
3817 if (!instance->tbolt) {
3818 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
3819 MFI_INIT_HOTPLUG, instance);
3820 } else {
3821 WR_RESERVED0_REGISTER(MFI_INIT_CLEAR_HANDSHAKE |
3822 MFI_INIT_HOTPLUG, instance);
3823 }
3824 max_wait = (instance->tbolt == 1) ? 180 : 2;
3825 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3826 break;
3827 case MFI_STATE_BOOT_MESSAGE_PENDING:
3828 /* set the CLR bit in IMR0 */
3829 con_log(CL_ANN1, (CE_NOTE,
3830 "mr_sas: FW state boot message pending"));
3831 /*
3832 * PCI_Hot Plug: MFI F/W requires
3833 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3834 * to be set
3835 */
3836 if (!instance->tbolt) {
3837 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
3838 } else {
3839 WR_RESERVED0_REGISTER(MFI_INIT_HOTPLUG,
3840 instance);
3841 }
3842 max_wait = (instance->tbolt == 1) ? 180 : 10;
3843 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3844 break;
3845 case MFI_STATE_OPERATIONAL:
3846 /* bring it to READY state; assuming max wait 2 secs */
3847 instance->func_ptr->disable_intr(instance);
3848 con_log(CL_ANN1, (CE_NOTE,
3849 "mr_sas: FW in OPERATIONAL state"));
3850 /*
3851 * PCI_Hot Plug: MFI F/W requires
3852 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
3853 * to be set
3854 */
3855 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
3856 if (!instance->tbolt) {
3857 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
3858 } else {
3859 WR_RESERVED0_REGISTER(MFI_RESET_FLAGS,
3860 instance);
3861
3862 for (i = 0; i < (10 * 1000); i++) {
3863 status =
3864 RD_RESERVED0_REGISTER(instance);
3865 if (status & 1) {
3866 delay(1 *
3867 drv_usectohz(MILLISEC));
3868 } else {
3869 break;
3870 }
3871 }
3872
3873 }
3874 max_wait = (instance->tbolt == 1) ? 180 : 10;
3875 cur_state = MFI_STATE_OPERATIONAL;
3876 break;
3920 if (fw_state == cur_state) {
3921 delay(1 * drv_usectohz(MILLISEC));
3922 } else {
3923 break;
3924 }
3925 }
3926 if (fw_state == MFI_STATE_DEVICE_SCAN) {
3927 if (prev_abs_reg_val != cur_abs_reg_val) {
3928 continue;
3929 }
3930 }
3931
3932 /* return error if fw_state hasn't changed after max_wait */
3933 if (fw_state == cur_state) {
3934 con_log(CL_ANN1, (CE_WARN,
3935 "FW state hasn't changed in %d secs", max_wait));
3936 return (ENODEV);
3937 }
3938 };
3939
3940 if (!instance->tbolt) {
3941 fw_ctrl = RD_IB_DOORBELL(instance);
3942 con_log(CL_ANN1, (CE_CONT,
3943 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
3944
3945 /*
3946 * Write 0xF to the doorbell register to do the following.
3947 * - Abort all outstanding commands (bit 0).
3948 * - Transition from OPERATIONAL to READY state (bit 1).
3949 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
3950 * - Set to release FW to continue running (i.e. BIOS handshake
3951 * (bit 3).
3952 */
3953 WR_IB_DOORBELL(0xF, instance);
3954 }
3955
3956 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
3957 return (EIO);
3958 }
3959
3960 return (DDI_SUCCESS);
3961 }
3962
3963 /*
3964 * get_seq_num
3965 */
3966 static int
3967 get_seq_num(struct mrsas_instance *instance,
3968 struct mrsas_evt_log_info *eli)
3969 {
3970 int ret = DDI_SUCCESS;
3971
3972 dma_obj_t dcmd_dma_obj;
3973 struct mrsas_cmd *cmd;
3974 struct mrsas_dcmd_frame *dcmd;
3975 struct mrsas_evt_log_info *eli_tmp;
3976 if (instance->tbolt) {
3977 cmd = get_raid_msg_mfi_pkt(instance);
3978 } else {
3979 cmd = get_mfi_pkt(instance);
3980 }
3981
3982 if (!cmd) {
3983 cmn_err(CE_WARN, "mr_sas: failed to get a cmd");
3984 DTRACE_PROBE2(seq_num_mfi_err, uint16_t,
3985 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
3986 return (ENOMEM);
3987 }
3988
3989 /* Clear the frame buffer and assign back the context id */
3990 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3991 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3992 cmd->index);
3993
3994 dcmd = &cmd->frame->dcmd;
3995
3996 /* allocate the data transfer buffer */
3997 dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info);
3998 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3999 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
4035 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4036 }
4037
4038 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
4039 cmn_err(CE_WARN, "get_seq_num: "
4040 "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO");
4041 ret = DDI_FAILURE;
4042 } else {
4043 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer;
4044 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle,
4045 &eli_tmp->newest_seq_num);
4046 ret = DDI_SUCCESS;
4047 }
4048
4049 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
4050 ret = DDI_FAILURE;
4051
4052 if (instance->tbolt) {
4053 return_raid_msg_mfi_pkt(instance, cmd);
4054 } else {
4055 return_mfi_pkt(instance, cmd);
4056 }
4057
4058 return (ret);
4059 }
4060
4061 /*
4062 * start_mfi_aen
4063 */
4064 static int
4065 start_mfi_aen(struct mrsas_instance *instance)
4066 {
4067 int ret = 0;
4068
4069 struct mrsas_evt_log_info eli;
4070 union mrsas_evt_class_locale class_locale;
4071
4072 /* get the latest sequence number from FW */
4073 (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info));
4074
4075 if (get_seq_num(instance, &eli)) {
4088 if (ret) {
4089 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed");
4090 return (-1);
4091 }
4092
4093
4094 return (ret);
4095 }
4096
4097 /*
4098 * flush_cache
4099 */
4100 static void
4101 flush_cache(struct mrsas_instance *instance)
4102 {
4103 struct mrsas_cmd *cmd = NULL;
4104 struct mrsas_dcmd_frame *dcmd;
4105 if (instance->tbolt) {
4106 cmd = get_raid_msg_mfi_pkt(instance);
4107 } else {
4108 cmd = get_mfi_pkt(instance);
4109 }
4110
4111 if (!cmd) {
4112 con_log(CL_ANN1, (CE_WARN,
4113 "flush_cache():Failed to get a cmd for flush_cache"));
4114 DTRACE_PROBE2(flush_cache_err, uint16_t,
4115 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
4116 return;
4117 }
4118
4119 /* Clear the frame buffer and assign back the context id */
4120 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4121 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
4122 cmd->index);
4123
4124 dcmd = &cmd->frame->dcmd;
4125
4126 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4127
4128 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4134 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0);
4135 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4136 MR_DCMD_CTRL_CACHE_FLUSH);
4137 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0],
4138 MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE);
4139
4140 cmd->frame_count = 1;
4141
4142 if (instance->tbolt) {
4143 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4144 }
4145
4146 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
4147 con_log(CL_ANN1, (CE_WARN,
4148 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH"));
4149 }
4150 con_log(CL_ANN1, (CE_CONT, "flush_cache done"));
4151 if (instance->tbolt) {
4152 return_raid_msg_mfi_pkt(instance, cmd);
4153 } else {
4154 return_mfi_pkt(instance, cmd);
4155 }
4156
4157 }
4158
4159 /*
4160 * service_mfi_aen- Completes an AEN command
4161 * @instance: Adapter soft state
4162 * @cmd: Command to be completed
4163 *
4164 */
4165 void
4166 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
4167 {
4168 uint32_t seq_num;
4169 struct mrsas_evt_detail *evt_detail =
4170 (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
4171 int rval = 0;
4172 int tgt = 0;
4173 uint8_t dtype;
4174 #ifdef PDSUPPORT
4234 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d "
4235 "tgt id = %d index = %d", rval,
4236 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4237 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4238 break;
4239 } /* End of MR_EVT_LD_DELETED */
4240
4241 case MR_EVT_LD_CREATED: {
4242 rval = mrsas_service_evt(instance,
4243 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4244 MRSAS_EVT_CONFIG_TGT, NULL);
4245 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d "
4246 "tgt id = %d index = %d", rval,
4247 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4248 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4249 break;
4250 } /* End of MR_EVT_LD_CREATED */
4251
4252 #ifdef PDSUPPORT
4253 case MR_EVT_PD_REMOVED_EXT: {
4254 if (instance->tbolt) {
4255 pd_addr = &evt_detail->args.pd_addr;
4256 dtype = pd_addr->scsi_dev_type;
4257 con_log(CL_DLEVEL1, (CE_NOTE,
4258 " MR_EVT_PD_REMOVED_EXT: dtype = %x,"
4259 " arg_type = %d ", dtype, evt_detail->arg_type));
4260 tgt = ddi_get16(acc_handle,
4261 &evt_detail->args.pd.device_id);
4262 mutex_enter(&instance->config_dev_mtx);
4263 instance->mr_tbolt_pd_list[tgt].flag =
4264 (uint8_t)~MRDRV_TGT_VALID;
4265 mutex_exit(&instance->config_dev_mtx);
4266 rval = mrsas_service_evt(instance, ddi_get16(
4267 acc_handle, &evt_detail->args.pd.device_id),
4268 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4269 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4270 "rval = %d tgt id = %d ", rval,
4271 ddi_get16(acc_handle,
4272 &evt_detail->args.pd.device_id)));
4273 }
4274 break;
4275 } /* End of MR_EVT_PD_REMOVED_EXT */
4276
4277 case MR_EVT_PD_INSERTED_EXT: {
4278 if (instance->tbolt) {
4279 rval = mrsas_service_evt(instance,
4280 ddi_get16(acc_handle,
4281 &evt_detail->args.pd.device_id),
4282 1, MRSAS_EVT_CONFIG_TGT, NULL);
4283 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_INSERTEDi_EXT:"
4284 "rval = %d tgt id = %d ", rval,
4285 ddi_get16(acc_handle,
4286 &evt_detail->args.pd.device_id)));
4287 }
4288 break;
4289 } /* End of MR_EVT_PD_INSERTED_EXT */
4290
4291 case MR_EVT_PD_STATE_CHANGE: {
4292 if (instance->tbolt) {
4293 tgt = ddi_get16(acc_handle,
4294 &evt_detail->args.pd.device_id);
4295 if ((evt_detail->args.pd_state.prevState ==
4296 PD_SYSTEM) &&
4297 (evt_detail->args.pd_state.newState != PD_SYSTEM)) {
4298 mutex_enter(&instance->config_dev_mtx);
4299 instance->mr_tbolt_pd_list[tgt].flag =
4300 (uint8_t)~MRDRV_TGT_VALID;
4301 mutex_exit(&instance->config_dev_mtx);
4302 rval = mrsas_service_evt(instance,
4303 ddi_get16(acc_handle,
4304 &evt_detail->args.pd.device_id),
4305 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4306 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4307 "rval = %d tgt id = %d ", rval,
4308 ddi_get16(acc_handle,
4309 &evt_detail->args.pd.device_id)));
4310 break;
4311 }
4312 if ((evt_detail->args.pd_state.prevState
4508 pkt->pkt_state = STATE_GOT_BUS
4509 | STATE_GOT_TARGET | STATE_SENT_CMD
4510 | STATE_XFERRED_DATA | STATE_GOT_STATUS;
4511
4512 con_log(CL_ANN, (CE_CONT,
4513 "CDB[0] = %x completed for %s: size %lx context %x",
4514 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
4515 acmd->cmd_dmacount, hdr->context));
4516 DTRACE_PROBE3(softintr_cdb, uint8_t, pkt->pkt_cdbp[0],
4517 uint_t, acmd->cmd_cdblen, ulong_t,
4518 acmd->cmd_dmacount);
4519
4520 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
4521 struct scsi_inquiry *inq;
4522
4523 if (acmd->cmd_dmacount != 0) {
4524 bp_mapin(acmd->cmd_buf);
4525 inq = (struct scsi_inquiry *)
4526 acmd->cmd_buf->b_un.b_addr;
4527
4528 /* don't expose physical drives to OS */
4529 if (acmd->islogical &&
4530 (hdr->cmd_status == MFI_STAT_OK)) {
4531 display_scsi_inquiry(
4532 (caddr_t)inq);
4533 } else if ((hdr->cmd_status ==
4534 MFI_STAT_OK) && inq->inq_dtype ==
4535 DTYPE_DIRECT) {
4536
4537 display_scsi_inquiry(
4538 (caddr_t)inq);
4539
4540 /* for physical disk */
4541 hdr->cmd_status =
4542 MFI_STAT_DEVICE_NOT_FOUND;
4543 }
4544 }
4545 }
4546
4547 DTRACE_PROBE2(softintr_done, uint8_t, hdr->cmd,
4548 uint8_t, hdr->cmd_status);
4549
4550 switch (hdr->cmd_status) {
4551 case MFI_STAT_OK:
4552 pkt->pkt_scbp[0] = STATUS_GOOD;
4553 break;
4554 case MFI_STAT_LD_CC_IN_PROGRESS:
4555 case MFI_STAT_LD_RECON_IN_PROGRESS:
4556 pkt->pkt_scbp[0] = STATUS_GOOD;
4557 break;
4558 case MFI_STAT_LD_INIT_IN_PROGRESS:
4559 con_log(CL_ANN,
4560 (CE_WARN, "Initialization in Progress"));
4561 pkt->pkt_reason = CMD_TRAN_ERR;
4562
4563 break;
4632 con_log(CL_ANN, (CE_CONT, "Unknown status!"));
4633 pkt->pkt_reason = CMD_TRAN_ERR;
4634
4635 break;
4636 }
4637
4638 atomic_add_16(&instance->fw_outstanding, (-1));
4639
4640 (void) mrsas_common_check(instance, cmd);
4641
4642 if (acmd->cmd_dmahandle) {
4643 if (mrsas_check_dma_handle(
4644 acmd->cmd_dmahandle) != DDI_SUCCESS) {
4645 ddi_fm_service_impact(instance->dip,
4646 DDI_SERVICE_UNAFFECTED);
4647 pkt->pkt_reason = CMD_TRAN_ERR;
4648 pkt->pkt_statistics = 0;
4649 }
4650 }
4651
4652 /* Call the callback routine */
4653 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4654 pkt->pkt_comp) {
4655
4656 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_softintr: "
4657 "posting to scsa cmd %p index %x pkt %p "
4658 "time %llx", (void *)cmd, cmd->index,
4659 (void *)pkt, gethrtime()));
4660 (*pkt->pkt_comp)(pkt);
4661
4662 }
4663
4664 return_mfi_pkt(instance, cmd);
4665 break;
4666
4667 case MFI_CMD_OP_SMP:
4668 case MFI_CMD_OP_STP:
4669 complete_cmd_in_sync_mode(instance, cmd);
4670 break;
4671
4672 case MFI_CMD_OP_DCMD:
4673 /* see if got an event notification */
4674 if (ddi_get32(cmd->frame_dma_obj.acc_handle,
4675 &cmd->frame->dcmd.opcode) ==
4676 MR_DCMD_CTRL_EVENT_WAIT) {
4677 if ((instance->aen_cmd == cmd) &&
4678 (instance->aen_cmd->abort_aen)) {
4679 con_log(CL_ANN, (CE_WARN,
4680 "mrsas_softintr: "
4681 "aborted_aen returned"));
4682 } else {
4683 atomic_add_16(&instance->fw_outstanding,
4684 (-1));
5071 {
5072 uint16_t flags = 0;
5073 uint32_t i;
5074 uint32_t context;
5075 uint32_t sge_bytes;
5076 uint32_t tmp_data_xfer_len;
5077 ddi_acc_handle_t acc_handle;
5078 struct mrsas_cmd *cmd;
5079 struct mrsas_sge64 *mfi_sgl;
5080 struct mrsas_sge_ieee *mfi_sgl_ieee;
5081 struct scsa_cmd *acmd = PKT2CMD(pkt);
5082 struct mrsas_pthru_frame *pthru;
5083 struct mrsas_io_frame *ldio;
5084
5085 /* find out if this is logical or physical drive command. */
5086 acmd->islogical = MRDRV_IS_LOGICAL(ap);
5087 acmd->device_id = MAP_DEVICE_ID(instance, ap);
5088 *cmd_done = 0;
5089
5090 /* get the command packet */
5091 if (!(cmd = get_mfi_pkt(instance))) {
5092 DTRACE_PROBE2(build_cmd_mfi_err, uint16_t,
5093 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
5094 return (NULL);
5095 }
5096
5097 acc_handle = cmd->frame_dma_obj.acc_handle;
5098
5099 /* Clear the frame buffer and assign back the context id */
5100 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
5101 ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index);
5102
5103 cmd->pkt = pkt;
5104 cmd->cmd = acmd;
5105 DTRACE_PROBE3(build_cmds, uint8_t, pkt->pkt_cdbp[0],
5106 ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len);
5107
5108 /* lets get the command directions */
5109 if (acmd->cmd_flags & CFLAG_DMASEND) {
5110 flags = MFI_FRAME_DIR_WRITE;
5111
5119
5120 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5121 (void) ddi_dma_sync(acmd->cmd_dmahandle,
5122 acmd->cmd_dma_offset, acmd->cmd_dma_len,
5123 DDI_DMA_SYNC_FORCPU);
5124 }
5125 } else {
5126 flags = MFI_FRAME_DIR_NONE;
5127 }
5128
5129 if (instance->flag_ieee) {
5130 flags |= MFI_FRAME_IEEE;
5131 }
5132 flags |= MFI_FRAME_SGL64;
5133
5134 switch (pkt->pkt_cdbp[0]) {
5135
5136 /*
5137 * case SCMD_SYNCHRONIZE_CACHE:
5138 * flush_cache(instance);
5139 * return_mfi_pkt(instance, cmd);
5140 * *cmd_done = 1;
5141 *
5142 * return (NULL);
5143 */
5144
5145 case SCMD_READ:
5146 case SCMD_WRITE:
5147 case SCMD_READ_G1:
5148 case SCMD_WRITE_G1:
5149 case SCMD_READ_G4:
5150 case SCMD_WRITE_G4:
5151 case SCMD_READ_G5:
5152 case SCMD_WRITE_G5:
5153 if (acmd->islogical) {
5154 ldio = (struct mrsas_io_frame *)cmd->frame;
5155
5156 /*
5157 * preare the Logical IO frame:
5158 * 2nd bit is zero for all read cmds
5159 */
5228 ((uint32_t)(pkt->pkt_cdbp[13])) |
5229 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
5230 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
5231 ((uint32_t)(pkt->pkt_cdbp[10]) << 24)));
5232
5233 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5234 ((uint32_t)(pkt->pkt_cdbp[9])) |
5235 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5236 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5237 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5238
5239 ddi_put32(acc_handle, &ldio->start_lba_hi, (
5240 ((uint32_t)(pkt->pkt_cdbp[5])) |
5241 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5242 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5243 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5244 }
5245
5246 break;
5247 }
5248 /* fall through For all non-rd/wr cmds */
5249 default:
5250
5251 switch (pkt->pkt_cdbp[0]) {
5252 case SCMD_MODE_SENSE:
5253 case SCMD_MODE_SENSE_G1: {
5254 union scsi_cdb *cdbp;
5255 uint16_t page_code;
5256
5257 cdbp = (void *)pkt->pkt_cdbp;
5258 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
5259 switch (page_code) {
5260 case 0x3:
5261 case 0x4:
5262 (void) mrsas_mode_sense_build(pkt);
5263 return_mfi_pkt(instance, cmd);
5264 *cmd_done = 1;
5265 return (NULL);
5266 }
5267 break;
5268 }
5269 default:
5270 break;
5271 }
5272
5273 pthru = (struct mrsas_pthru_frame *)cmd->frame;
5274
5275 /* prepare the DCDB frame */
5276 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ?
5277 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI);
5278 ddi_put8(acc_handle, &pthru->cmd_status, 0x0);
5279 ddi_put8(acc_handle, &pthru->scsi_status, 0x0);
5280 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id);
5281 ddi_put8(acc_handle, &pthru->lun, 0);
5282 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen);
5283 ddi_put16(acc_handle, &pthru->timeout, 0);
6277 }
6278
6279 return (rval);
6280 }
6281
6282 /*
6283 * handle_mfi_ioctl
6284 */
6285 static int
6286 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6287 int mode)
6288 {
6289 int rval = DDI_SUCCESS;
6290
6291 struct mrsas_header *hdr;
6292 struct mrsas_cmd *cmd;
6293
6294 if (instance->tbolt) {
6295 cmd = get_raid_msg_mfi_pkt(instance);
6296 } else {
6297 cmd = get_mfi_pkt(instance);
6298 }
6299 if (!cmd) {
6300 con_log(CL_ANN, (CE_WARN, "mr_sas: "
6301 "failed to get a cmd packet"));
6302 DTRACE_PROBE2(mfi_ioctl_err, uint16_t,
6303 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
6304 return (DDI_FAILURE);
6305 }
6306
6307 /* Clear the frame buffer and assign back the context id */
6308 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6309 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6310 cmd->index);
6311
6312 hdr = (struct mrsas_header *)&ioctl->frame[0];
6313
6314 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
6315 case MFI_CMD_OP_DCMD:
6316 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
6317 break;
6321 case MFI_CMD_OP_STP:
6322 rval = issue_mfi_stp(instance, ioctl, cmd, mode);
6323 break;
6324 case MFI_CMD_OP_LD_SCSI:
6325 case MFI_CMD_OP_PD_SCSI:
6326 rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
6327 break;
6328 default:
6329 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
6330 "invalid mfi ioctl hdr->cmd = %d", hdr->cmd));
6331 rval = DDI_FAILURE;
6332 break;
6333 }
6334
6335 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
6336 rval = DDI_FAILURE;
6337
6338 if (instance->tbolt) {
6339 return_raid_msg_mfi_pkt(instance, cmd);
6340 } else {
6341 return_mfi_pkt(instance, cmd);
6342 }
6343
6344 return (rval);
6345 }
6346
6347 /*
6348 * AEN
6349 */
6350 static int
6351 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen)
6352 {
6353 int rval = 0;
6354
6355 rval = register_mfi_aen(instance, instance->aen_seq_num,
6356 aen->class_locale_word);
6357
6358 aen->cmd_status = (uint8_t)rval;
6359
6360 return (rval);
6361 }
6418 if (prev_aen.members.class < curr_aen.members.class)
6419 curr_aen.members.class = prev_aen.members.class;
6420
6421 ret_val = abort_aen_cmd(instance, aen_cmd);
6422
6423 if (ret_val) {
6424 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
6425 "failed to abort prevous AEN command"));
6426
6427 return (ret_val);
6428 }
6429 }
6430 } else {
6431 curr_aen.word = LE_32(class_locale_word);
6432 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6433 }
6434
6435 if (instance->tbolt) {
6436 cmd = get_raid_msg_mfi_pkt(instance);
6437 } else {
6438 cmd = get_mfi_pkt(instance);
6439 }
6440
6441 if (!cmd) {
6442 DTRACE_PROBE2(mfi_aen_err, uint16_t, instance->fw_outstanding,
6443 uint16_t, instance->max_fw_cmds);
6444 return (ENOMEM);
6445 }
6446
6447 /* Clear the frame buffer and assign back the context id */
6448 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6449 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6450 cmd->index);
6451
6452 dcmd = &cmd->frame->dcmd;
6453
6454 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
6455 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
6456
6457 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
6458 sizeof (struct mrsas_evt_detail));
6711 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6712 "ISSUED CMD TO FW : called : cmd:"
6713 ": %p instance : %p pkt : %p pkt_time : %x\n",
6714 gethrtime(), (void *)cmd, (void *)instance,
6715 (void *)pkt, cmd->drv_pkt_time));
6716 if (instance->adapterresetinprogress) {
6717 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6718 con_log(CL_ANN1, (CE_NOTE, "Reset the scsi_pkt timer"));
6719 } else {
6720 push_pending_mfi_pkt(instance, cmd);
6721 }
6722
6723 } else {
6724 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6725 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
6726 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
6727 }
6728
6729 mutex_enter(&instance->reg_write_mtx);
6730 /* Issue the command to the FW */
6731 WR_IB_QPORT((cmd->frame_phys_addr) |
6732 (((cmd->frame_count - 1) << 1) | 1), instance);
6733 mutex_exit(&instance->reg_write_mtx);
6734
6735 }
6736
6737 /*
6738 * issue_cmd_in_sync_mode
6739 */
6740 static int
6741 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance,
6742 struct mrsas_cmd *cmd)
6743 {
6744 int i;
6745 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
6746 struct mrsas_header *hdr = &cmd->frame->hdr;
6747
6748 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called"));
6749
6750 if (instance->adapterresetinprogress) {
6751 cmd->drv_pkt_time = ddi_get16(
6752 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
6753 if (cmd->drv_pkt_time < debug_timeout_g)
6754 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6755
6756 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: "
6757 "issue and return in reset case\n"));
6758 WR_IB_QPORT((cmd->frame_phys_addr) |
6759 (((cmd->frame_count - 1) << 1) | 1), instance);
6760
6761 return (DDI_SUCCESS);
6762 } else {
6763 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: pushing the pkt\n"));
6764 push_pending_mfi_pkt(instance, cmd);
6765 }
6766
6767 cmd->cmd_status = ENODATA;
6768
6769 mutex_enter(&instance->reg_write_mtx);
6770 /* Issue the command to the FW */
6771 WR_IB_QPORT((cmd->frame_phys_addr) |
6772 (((cmd->frame_count - 1) << 1) | 1), instance);
6773 mutex_exit(&instance->reg_write_mtx);
6774
6775 mutex_enter(&instance->int_cmd_mtx);
6776 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
6777 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
6778 }
6779 mutex_exit(&instance->int_cmd_mtx);
6780
6781 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done"));
6782
6783 if (i < (msecs -1)) {
6784 return (DDI_SUCCESS);
6785 } else {
6786 return (DDI_FAILURE);
6787 }
6788 }
6789
6790 /*
6791 * issue_cmd_in_poll_mode
6793 static int
6794 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance,
6795 struct mrsas_cmd *cmd)
6796 {
6797 int i;
6798 uint16_t flags;
6799 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6800 struct mrsas_header *frame_hdr;
6801
6802 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called"));
6803
6804 frame_hdr = (struct mrsas_header *)cmd->frame;
6805 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
6806 MFI_CMD_STATUS_POLL_MODE);
6807 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
6808 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
6809
6810 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
6811
6812 /* issue the frame using inbound queue port */
6813 WR_IB_QPORT((cmd->frame_phys_addr) |
6814 (((cmd->frame_count - 1) << 1) | 1), instance);
6815
6816 /* wait for cmd_status to change from 0xFF */
6817 for (i = 0; i < msecs && (
6818 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6819 == MFI_CMD_STATUS_POLL_MODE); i++) {
6820 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
6821 }
6822
6823 if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6824 == MFI_CMD_STATUS_POLL_MODE) {
6825 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
6826 "cmd polling timed out"));
6827 return (DDI_FAILURE);
6828 }
6829
6830 return (DDI_SUCCESS);
6831 }
6832
6833 static void
6834 enable_intr_ppc(struct mrsas_instance *instance)
6835 {
6836 uint32_t mask;
6837
6838 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called"));
6839
6840 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
6841 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
6842
6843 /* WR_OB_INTR_MASK(~0x80000000, instance); */
6844 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance);
6845
6846 /* dummy read to force PCI flush */
6847 mask = RD_OB_INTR_MASK(instance);
6848
6849 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
6850 "outbound_intr_mask = 0x%x", mask));
6851 }
6852
6853 static void
6854 disable_intr_ppc(struct mrsas_instance *instance)
6855 {
6856 uint32_t mask;
6857
6858 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called"));
6859
6860 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
6861 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6862
6863 /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */
6864 WR_OB_INTR_MASK(OB_INTR_MASK, instance);
6865
6866 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
6867 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6868
6869 /* dummy read to force PCI flush */
6870 mask = RD_OB_INTR_MASK(instance);
6871 #ifdef lint
6872 mask = mask;
6873 #endif
6874 }
6875
6876 static int
6877 intr_ack_ppc(struct mrsas_instance *instance)
6878 {
6879 uint32_t status;
6880 int ret = DDI_INTR_CLAIMED;
6881
6882 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called"));
6883
6884 /* check if it is our interrupt */
6885 status = RD_OB_INTR_STATUS(instance);
6886
6887 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status));
6888
6889 if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) {
6890 ret = DDI_INTR_UNCLAIMED;
6891 }
6892
6893 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
6894 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
6895 ret = DDI_INTR_UNCLAIMED;
6896 }
6897
6898 if (ret == DDI_INTR_UNCLAIMED) {
6899 return (ret);
6900 }
6901 /* clear the interrupt by writing back the same value */
6902 WR_OB_DOORBELL_CLEAR(status, instance);
6903
6904 /* dummy READ */
6905 status = RD_OB_INTR_STATUS(instance);
6906
6907 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared"));
6908
6909 return (ret);
6910 }
6911
6912 /*
6913 * Marks HBA as bad. This will be called either when an
6914 * IO packet times out even after 3 FW resets
6915 * or FW is found to be fault even after 3 continuous resets.
6916 */
6917
6918 static int
6919 mrsas_kill_adapter(struct mrsas_instance *instance)
6920 {
6921 if (instance->deadadapter == 1)
6922 return (DDI_FAILURE);
7468 /* Hold nexus during bus_config */
7469 ndi_devi_enter(parent, &config);
7470 switch (op) {
7471 case BUS_CONFIG_ONE: {
7472
7473 /* parse wwid/target name out of name given */
7474 if ((ptr = strchr((char *)arg, '@')) == NULL) {
7475 rval = NDI_FAILURE;
7476 break;
7477 }
7478 ptr++;
7479
7480 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) {
7481 rval = NDI_FAILURE;
7482 break;
7483 }
7484
7485 if (lun == 0) {
7486 rval = mrsas_config_ld(instance, tgt, lun, childp);
7487 #ifdef PDSUPPORT
7488 } else if (instance->tbolt == 1 && lun != 0) {
7489 rval = mrsas_tbolt_config_pd(instance,
7490 tgt, lun, childp);
7491 #endif
7492 } else {
7493 rval = NDI_FAILURE;
7494 }
7495
7496 break;
7497 }
7498 case BUS_CONFIG_DRIVER:
7499 case BUS_CONFIG_ALL: {
7500
7501 rval = mrsas_config_all_devices(instance);
7502
7503 rval = NDI_SUCCESS;
7504 break;
7505 }
7506 }
7507
7508 if (rval == NDI_SUCCESS) {
7511 }
7512 ndi_devi_exit(parent, config);
7513
7514 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x",
7515 rval));
7516 return (rval);
7517 }
7518
7519 static int
7520 mrsas_config_all_devices(struct mrsas_instance *instance)
7521 {
7522 int rval, tgt;
7523
7524 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
7525 (void) mrsas_config_ld(instance, tgt, 0, NULL);
7526
7527 }
7528
7529 #ifdef PDSUPPORT
7530 /* Config PD devices connected to the card */
7531 if (instance->tbolt) {
7532 for (tgt = 0; tgt < instance->mr_tbolt_pd_max; tgt++) {
7533 (void) mrsas_tbolt_config_pd(instance, tgt, 1, NULL);
7534 }
7535 }
7536 #endif
7537
7538 rval = NDI_SUCCESS;
7539 return (rval);
7540 }
7541
7542 static int
7543 mrsas_parse_devname(char *devnm, int *tgt, int *lun)
7544 {
7545 char devbuf[SCSI_MAXNAMELEN];
7546 char *addr;
7547 char *p, *tp, *lp;
7548 long num;
7549
7550 /* Parse dev name and address */
7551 (void) strcpy(devbuf, devnm);
7765 dip = instance->mr_ld_list[mrevt->tgt].dip;
7766 mutex_exit(&instance->config_dev_mtx);
7767 #ifdef PDSUPPORT
7768 } else {
7769 mutex_enter(&instance->config_dev_mtx);
7770 dip = instance->mr_tbolt_pd_list[mrevt->tgt].dip;
7771 mutex_exit(&instance->config_dev_mtx);
7772 #endif
7773 }
7774
7775
7776 ndi_devi_enter(instance->dip, &circ1);
7777 switch (mrevt->event) {
7778 case MRSAS_EVT_CONFIG_TGT:
7779 if (dip == NULL) {
7780
7781 if (mrevt->lun == 0) {
7782 (void) mrsas_config_ld(instance, mrevt->tgt,
7783 0, NULL);
7784 #ifdef PDSUPPORT
7785 } else if (instance->tbolt) {
7786 (void) mrsas_tbolt_config_pd(instance,
7787 mrevt->tgt,
7788 1, NULL);
7789 #endif
7790 }
7791 con_log(CL_ANN1, (CE_NOTE,
7792 "mr_sas: EVT_CONFIG_TGT called:"
7793 " for tgt %d lun %d event %d",
7794 mrevt->tgt, mrevt->lun, mrevt->event));
7795
7796 } else {
7797 con_log(CL_ANN1, (CE_NOTE,
7798 "mr_sas: EVT_CONFIG_TGT dip != NULL:"
7799 " for tgt %d lun %d event %d",
7800 mrevt->tgt, mrevt->lun, mrevt->event));
7801 }
7802 break;
7803 case MRSAS_EVT_UNCONFIG_TGT:
7804 if (dip) {
7805 if (i_ddi_devi_attached(dip)) {
|
27 * used to endorse or promote products derived from this software without
28 * specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
34 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
36 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
37 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
41 * DAMAGE.
42 */
43
44 /*
45 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
46 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
47 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
48 */
49
50 #include <sys/types.h>
51 #include <sys/param.h>
52 #include <sys/file.h>
53 #include <sys/errno.h>
54 #include <sys/open.h>
55 #include <sys/cred.h>
56 #include <sys/modctl.h>
57 #include <sys/conf.h>
58 #include <sys/devops.h>
59 #include <sys/cmn_err.h>
60 #include <sys/kmem.h>
61 #include <sys/stat.h>
62 #include <sys/mkdev.h>
63 #include <sys/pci.h>
64 #include <sys/scsi/scsi.h>
65 #include <sys/ddi.h>
66 #include <sys/sunddi.h>
67 #include <sys/atomic.h>
68 #include <sys/signal.h>
69 #include <sys/byteorder.h>
70 #include <sys/sdt.h>
71 #include <sys/fs/dv_node.h> /* devfs_clean */
72
73 #include "mr_sas.h"
74
75 /*
76 * FMA header files
77 */
78 #include <sys/ddifm.h>
79 #include <sys/fm/protocol.h>
80 #include <sys/fm/util.h>
81 #include <sys/fm/io/ddi.h>
82
83 /* Macros to help Skinny and stock 2108/MFI live together. */
84 #define WR_IB_PICK_QPORT(addr, instance) \
85 if ((instance)->skinny) { \
86 WR_IB_LOW_QPORT((addr), (instance)); \
87 WR_IB_HIGH_QPORT(0, (instance)); \
88 } else { \
89 WR_IB_QPORT((addr), (instance)); \
90 }
91
92 /*
93 * Local static data
94 */
95 static void *mrsas_state = NULL;
96 static volatile boolean_t mrsas_relaxed_ordering = B_TRUE;
97 volatile int debug_level_g = CL_NONE;
98 static volatile int msi_enable = 1;
99 static volatile int ctio_enable = 1;
100
101 /* Default Timeout value to issue online controller reset */
102 volatile int debug_timeout_g = 0xF0; /* 0xB4; */
103 /* Simulate consecutive firmware fault */
104 static volatile int debug_fw_faults_after_ocr_g = 0;
105 #ifdef OCRDEBUG
106 /* Simulate three consecutive timeout for an IO */
107 static volatile int debug_consecutive_timeout_after_ocr_g = 0;
108 #endif
109
110 #pragma weak scsi_hba_open
111 #pragma weak scsi_hba_close
127 static int mrsas_tran_tgt_init(dev_info_t *, dev_info_t *,
128 scsi_hba_tran_t *, struct scsi_device *);
129 static struct scsi_pkt *mrsas_tran_init_pkt(struct scsi_address *, register
130 struct scsi_pkt *, struct buf *, int, int, int, int,
131 int (*)(), caddr_t);
132 static int mrsas_tran_start(struct scsi_address *,
133 register struct scsi_pkt *);
134 static int mrsas_tran_abort(struct scsi_address *, struct scsi_pkt *);
135 static int mrsas_tran_reset(struct scsi_address *, int);
136 static int mrsas_tran_getcap(struct scsi_address *, char *, int);
137 static int mrsas_tran_setcap(struct scsi_address *, char *, int, int);
138 static void mrsas_tran_destroy_pkt(struct scsi_address *,
139 struct scsi_pkt *);
140 static void mrsas_tran_dmafree(struct scsi_address *, struct scsi_pkt *);
141 static void mrsas_tran_sync_pkt(struct scsi_address *, struct scsi_pkt *);
142 static int mrsas_tran_quiesce(dev_info_t *dip);
143 static int mrsas_tran_unquiesce(dev_info_t *dip);
144 static uint_t mrsas_isr();
145 static uint_t mrsas_softintr();
146 static void mrsas_undo_resources(dev_info_t *, struct mrsas_instance *);
147
148 static void free_space_for_mfi(struct mrsas_instance *);
149 static uint32_t read_fw_status_reg_ppc(struct mrsas_instance *);
150 static void issue_cmd_ppc(struct mrsas_cmd *, struct mrsas_instance *);
151 static int issue_cmd_in_poll_mode_ppc(struct mrsas_instance *,
152 struct mrsas_cmd *);
153 static int issue_cmd_in_sync_mode_ppc(struct mrsas_instance *,
154 struct mrsas_cmd *);
155 static void enable_intr_ppc(struct mrsas_instance *);
156 static void disable_intr_ppc(struct mrsas_instance *);
157 static int intr_ack_ppc(struct mrsas_instance *);
158 static void flush_cache(struct mrsas_instance *instance);
159 void display_scsi_inquiry(caddr_t);
160 static int start_mfi_aen(struct mrsas_instance *instance);
161 static int handle_drv_ioctl(struct mrsas_instance *instance,
162 struct mrsas_ioctl *ioctl, int mode);
163 static int handle_mfi_ioctl(struct mrsas_instance *instance,
164 struct mrsas_ioctl *ioctl, int mode);
165 static int handle_mfi_aen(struct mrsas_instance *instance,
166 struct mrsas_aen *aen);
564
565 con_log(CL_ANN, (CE_CONT, "mr_sas%d: "
566 "enable bus-mastering", instance_no));
567 } else {
568 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
569 "bus-mastering already set", instance_no));
570 }
571
572 /* initialize function pointers */
573 switch (device_id) {
574 case PCI_DEVICE_ID_LSI_TBOLT:
575 case PCI_DEVICE_ID_LSI_INVADER:
576 con_log(CL_ANN, (CE_NOTE,
577 "mr_sas: 2208 T.B. device detected"));
578
579 instance->func_ptr =
580 &mrsas_function_template_fusion;
581 instance->tbolt = 1;
582 break;
583
584 case PCI_DEVICE_ID_LSI_SKINNY:
585 case PCI_DEVICE_ID_LSI_SKINNY_NEW:
586 /*
587 * FALLTHRU to PPC-style functions, but mark this
588 * instance as Skinny, because the register set is
589 * slightly different (See WR_IB_PICK_QPORT), and
590 * certain other features are available to a Skinny
591 * HBA.
592 */
593 instance->skinny = 1;
594 /* FALLTHRU */
595
596 case PCI_DEVICE_ID_LSI_2108VDE:
597 case PCI_DEVICE_ID_LSI_2108V:
598 con_log(CL_ANN, (CE_NOTE,
599 "mr_sas: 2108 Liberator device detected"));
600
601 instance->func_ptr =
602 &mrsas_function_template_ppc;
603 break;
604
605 default:
606 cmn_err(CE_WARN,
607 "mr_sas: Invalid device detected");
608
609 pci_config_teardown(&instance->pci_handle);
610 ddi_soft_state_free(mrsas_state, instance_no);
611 return (DDI_FAILURE);
612 }
613
614 instance->baseaddress = pci_config_get32(
615 instance->pci_handle, PCI_CONF_BASE0);
816 }
817
818 instance->softint_running = 0;
819
820 /* Allocate a transport structure */
821 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
822
823 if (tran == NULL) {
824 cmn_err(CE_WARN,
825 "scsi_hba_tran_alloc failed");
826 goto fail_attach;
827 }
828
829 instance->tran = tran;
830 instance->unroll.tran = 1;
831
832 tran->tran_hba_private = instance;
833 tran->tran_tgt_init = mrsas_tran_tgt_init;
834 tran->tran_tgt_probe = scsi_hba_probe;
835 tran->tran_tgt_free = mrsas_tran_tgt_free;
836 tran->tran_init_pkt = mrsas_tran_init_pkt;
837 if (instance->tbolt)
838 tran->tran_start = mrsas_tbolt_tran_start;
839 else
840 tran->tran_start = mrsas_tran_start;
841 tran->tran_abort = mrsas_tran_abort;
842 tran->tran_reset = mrsas_tran_reset;
843 tran->tran_getcap = mrsas_tran_getcap;
844 tran->tran_setcap = mrsas_tran_setcap;
845 tran->tran_destroy_pkt = mrsas_tran_destroy_pkt;
846 tran->tran_dmafree = mrsas_tran_dmafree;
847 tran->tran_sync_pkt = mrsas_tran_sync_pkt;
848 tran->tran_quiesce = mrsas_tran_quiesce;
849 tran->tran_unquiesce = mrsas_tran_unquiesce;
850 tran->tran_bus_config = mrsas_tran_bus_config;
851
852 if (mrsas_relaxed_ordering)
853 mrsas_generic_dma_attr.dma_attr_flags |=
854 DDI_DMA_RELAXED_ORDERING;
855
856
857 tran_dma_attr = mrsas_generic_dma_attr;
858 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
859
860 /* Attach this instance of the hba */
937
938 /* Finally! We are on the air. */
939 ddi_report_dev(dip);
940
941 /* FMA handle checking. */
942 if (mrsas_check_acc_handle(instance->regmap_handle) !=
943 DDI_SUCCESS) {
944 goto fail_attach;
945 }
946 if (mrsas_check_acc_handle(instance->pci_handle) !=
947 DDI_SUCCESS) {
948 goto fail_attach;
949 }
950
951 instance->mr_ld_list =
952 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld),
953 KM_SLEEP);
954 instance->unroll.ldlist_buff = 1;
955
956 #ifdef PDSUPPORT
957 if (instance->tbolt || instance->skinny) {
958 instance->mr_tbolt_pd_max = MRSAS_TBOLT_PD_TGT_MAX;
959 instance->mr_tbolt_pd_list =
960 kmem_zalloc(MRSAS_TBOLT_GET_PD_MAX(instance) *
961 sizeof (struct mrsas_tbolt_pd), KM_SLEEP);
962 ASSERT(instance->mr_tbolt_pd_list);
963 for (i = 0; i < instance->mr_tbolt_pd_max; i++) {
964 instance->mr_tbolt_pd_list[i].lun_type =
965 MRSAS_TBOLT_PD_LUN;
966 instance->mr_tbolt_pd_list[i].dev_id =
967 (uint8_t)i;
968 }
969
970 instance->unroll.pdlist_buff = 1;
971 }
972 #endif
973 break;
974 case DDI_PM_RESUME:
975 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_PM_RESUME"));
976 break;
977 case DDI_RESUME:
1660 "DDI_FAILURE t = %d l = %d", tgt, lun));
1661 return (DDI_FAILURE);
1662
1663 }
1664
1665 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p",
1666 (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip));
1667
1668 if (tgt < MRDRV_MAX_LD && lun == 0) {
1669 if (instance->mr_ld_list[tgt].dip == NULL &&
1670 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) {
1671 mutex_enter(&instance->config_dev_mtx);
1672 instance->mr_ld_list[tgt].dip = tgt_dip;
1673 instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN;
1674 instance->mr_ld_list[tgt].flag = MRDRV_TGT_VALID;
1675 mutex_exit(&instance->config_dev_mtx);
1676 }
1677 }
1678
1679 #ifdef PDSUPPORT
1680 else if (instance->tbolt || instance->skinny) {
1681 if (instance->mr_tbolt_pd_list[tgt].dip == NULL) {
1682 mutex_enter(&instance->config_dev_mtx);
1683 instance->mr_tbolt_pd_list[tgt].dip = tgt_dip;
1684 instance->mr_tbolt_pd_list[tgt].flag =
1685 MRDRV_TGT_VALID;
1686 mutex_exit(&instance->config_dev_mtx);
1687 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_tgt_init:"
1688 "t%xl%x", tgt, lun));
1689 }
1690 }
1691 #endif
1692
1693 return (DDI_SUCCESS);
1694 }
1695
1696 /*ARGSUSED*/
1697 static void
1698 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1699 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1700 {
1701 struct mrsas_instance *instance;
1702 int tgt = sd->sd_address.a_target;
1703 int lun = sd->sd_address.a_lun;
1704
1705 instance = ADDR2MR(&sd->sd_address);
1706
1707 con_log(CL_DLEVEL2, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun));
1708
1709 if (tgt < MRDRV_MAX_LD && lun == 0) {
1710 if (instance->mr_ld_list[tgt].dip == tgt_dip) {
1711 mutex_enter(&instance->config_dev_mtx);
1712 instance->mr_ld_list[tgt].dip = NULL;
1713 mutex_exit(&instance->config_dev_mtx);
1714 }
1715 }
1716
1717 #ifdef PDSUPPORT
1718 else if (instance->tbolt || instance->skinny) {
1719 mutex_enter(&instance->config_dev_mtx);
1720 instance->mr_tbolt_pd_list[tgt].dip = NULL;
1721 mutex_exit(&instance->config_dev_mtx);
1722 con_log(CL_ANN1, (CE_NOTE, "tgt_free: Setting dip = NULL"
1723 "for tgt:%x", tgt));
1724 }
1725 #endif
1726
1727 }
1728
1729 dev_info_t *
1730 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun)
1731 {
1732 dev_info_t *child = NULL;
1733 char addr[SCSI_MAXNAMELEN];
1734 char tmp[MAXNAMELEN];
1735
1736 (void) sprintf(addr, "%x,%x", tgt, lun);
1737 for (child = ddi_get_child(instance->dip); child;
1738 child = ddi_get_next_sibling(child)) {
1935 pkt->pkt_scbp[0] = STATUS_GOOD;
1936 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1937 | STATE_SENT_CMD;
1938 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1939 (*pkt->pkt_comp)(pkt);
1940 }
1941
1942 return (TRAN_ACCEPT);
1943 }
1944
1945 if (cmd == NULL) {
1946 return (TRAN_BUSY);
1947 }
1948
1949 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1950 if (instance->fw_outstanding > instance->max_fw_cmds) {
1951 con_log(CL_ANN, (CE_CONT, "mr_sas:Firmware busy"));
1952 DTRACE_PROBE2(start_tran_err,
1953 uint16_t, instance->fw_outstanding,
1954 uint16_t, instance->max_fw_cmds);
1955 mrsas_return_mfi_pkt(instance, cmd);
1956 return (TRAN_BUSY);
1957 }
1958
1959 /* Synchronize the Cmd frame for the controller */
1960 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1961 DDI_DMA_SYNC_FORDEV);
1962 con_log(CL_ANN, (CE_CONT, "issue_cmd_ppc: SCSI CDB[0]=0x%x"
1963 "cmd->index:%x\n", pkt->pkt_cdbp[0], cmd->index));
1964 instance->func_ptr->issue_cmd(cmd, instance);
1965
1966 } else {
1967 struct mrsas_header *hdr = &cmd->frame->hdr;
1968
1969 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1970
1971 pkt->pkt_reason = CMD_CMPLT;
1972 pkt->pkt_statistics = 0;
1973 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1974
1975 switch (ddi_get8(cmd->frame_dma_obj.acc_handle,
1984 pkt->pkt_reason = CMD_CMPLT;
1985 pkt->pkt_statistics = 0;
1986
1987 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
1988 break;
1989
1990 case MFI_STAT_DEVICE_NOT_FOUND:
1991 con_log(CL_ANN, (CE_CONT,
1992 "mrsas_tran_start: device not found error"));
1993 pkt->pkt_reason = CMD_DEV_GONE;
1994 pkt->pkt_statistics = STAT_DISCON;
1995 break;
1996
1997 default:
1998 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
1999 }
2000
2001 (void) mrsas_common_check(instance, cmd);
2002 DTRACE_PROBE2(start_nointr_done, uint8_t, hdr->cmd,
2003 uint8_t, hdr->cmd_status);
2004 mrsas_return_mfi_pkt(instance, cmd);
2005
2006 if (pkt->pkt_comp) {
2007 (*pkt->pkt_comp)(pkt);
2008 }
2009
2010 }
2011
2012 return (TRAN_ACCEPT);
2013 }
2014
2015 /*
2016 * tran_abort - Abort any commands that are currently in transport
2017 * @ap:
2018 * @pkt:
2019 *
2020 * The tran_abort() entry point for a SCSI HBA driver is called to abort any
2021 * commands that are currently in transport for a particular target. This entry
2022 * point is called when a target driver calls scsi_abort(). The tran_abort()
2023 * entry point should attempt to abort the command denoted by the pkt
2024 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
2458 }
2459
2460
2461 /*
2462 * ************************************************************************** *
2463 * *
2464 * libraries *
2465 * *
2466 * ************************************************************************** *
2467 */
2468 /*
2469 * get_mfi_pkt : Get a command from the free pool
2470 * After successful allocation, the caller of this routine
2471 * must clear the frame buffer (memset to zero) before
2472 * using the packet further.
2473 *
2474 * ***** Note *****
2475 * After clearing the frame buffer the context id of the
2476 * frame buffer SHOULD be restored back.
2477 */
2478 struct mrsas_cmd *
2479 mrsas_get_mfi_pkt(struct mrsas_instance *instance)
2480 {
2481 mlist_t *head = &instance->cmd_pool_list;
2482 struct mrsas_cmd *cmd = NULL;
2483
2484 mutex_enter(&instance->cmd_pool_mtx);
2485
2486 if (!mlist_empty(head)) {
2487 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2488 mlist_del_init(head->next);
2489 }
2490 if (cmd != NULL) {
2491 cmd->pkt = NULL;
2492 cmd->retry_count_for_ocr = 0;
2493 cmd->drv_pkt_time = 0;
2494
2495 }
2496 mutex_exit(&instance->cmd_pool_mtx);
2497
2498 return (cmd);
2499 }
2506
2507 mutex_enter(&instance->app_cmd_pool_mtx);
2508
2509 if (!mlist_empty(head)) {
2510 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2511 mlist_del_init(head->next);
2512 }
2513 if (cmd != NULL) {
2514 cmd->pkt = NULL;
2515 cmd->retry_count_for_ocr = 0;
2516 cmd->drv_pkt_time = 0;
2517 }
2518
2519 mutex_exit(&instance->app_cmd_pool_mtx);
2520
2521 return (cmd);
2522 }
2523 /*
2524 * return_mfi_pkt : Return a cmd to free command pool
2525 */
2526 void
2527 mrsas_return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2528 {
2529 mutex_enter(&instance->cmd_pool_mtx);
2530 /* use mlist_add_tail for debug assistance */
2531 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2532
2533 mutex_exit(&instance->cmd_pool_mtx);
2534 }
2535
2536 static void
2537 return_mfi_app_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2538 {
2539 mutex_enter(&instance->app_cmd_pool_mtx);
2540
2541 mlist_add(&cmd->list, &instance->app_cmd_pool_list);
2542
2543 mutex_exit(&instance->app_cmd_pool_mtx);
2544 }
2545 void
2546 push_pending_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2547 {
3159 * instance->cmd_list is an array of struct mrsas_cmd pointers.
3160 * Allocate the dynamic array first and then allocate individual
3161 * commands.
3162 */
3163 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
3164 ASSERT(instance->cmd_list);
3165
3166 /* create a frame pool and assign one frame to each cmd */
3167 for (count = 0; count < max_cmd; count++) {
3168 instance->cmd_list[count] =
3169 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
3170 ASSERT(instance->cmd_list[count]);
3171 }
3172
3173 /* add all the commands to command pool */
3174
3175 INIT_LIST_HEAD(&instance->cmd_pool_list);
3176 INIT_LIST_HEAD(&instance->cmd_pend_list);
3177 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3178
3179 /*
3180 * When max_cmd is lower than MRSAS_APP_RESERVED_CMDS, how do I split
3181 * into app_cmd and regular cmd? For now, just take
3182 * max(1/8th of max, 4);
3183 */
3184 reserve_cmd = min(MRSAS_APP_RESERVED_CMDS,
3185 max(max_cmd >> 3, MRSAS_APP_MIN_RESERVED_CMDS));
3186
3187 for (i = 0; i < reserve_cmd; i++) {
3188 cmd = instance->cmd_list[i];
3189 cmd->index = i;
3190 mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list);
3191 }
3192
3193
3194 for (i = reserve_cmd; i < max_cmd; i++) {
3195 cmd = instance->cmd_list[i];
3196 cmd->index = i;
3197 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
3198 }
3199
3200 return (DDI_SUCCESS);
3201
3202 mrsas_undo_cmds:
3203 if (count > 0) {
3204 /* free each cmd */
3205 for (i = 0; i < count; i++) {
3279 }
3280
3281
3282
3283 /*
3284 * get_ctrl_info
3285 */
3286 static int
3287 get_ctrl_info(struct mrsas_instance *instance,
3288 struct mrsas_ctrl_info *ctrl_info)
3289 {
3290 int ret = 0;
3291
3292 struct mrsas_cmd *cmd;
3293 struct mrsas_dcmd_frame *dcmd;
3294 struct mrsas_ctrl_info *ci;
3295
3296 if (instance->tbolt) {
3297 cmd = get_raid_msg_mfi_pkt(instance);
3298 } else {
3299 cmd = mrsas_get_mfi_pkt(instance);
3300 }
3301
3302 if (!cmd) {
3303 con_log(CL_ANN, (CE_WARN,
3304 "Failed to get a cmd for ctrl info"));
3305 DTRACE_PROBE2(info_mfi_err, uint16_t, instance->fw_outstanding,
3306 uint16_t, instance->max_fw_cmds);
3307 return (DDI_FAILURE);
3308 }
3309
3310 /* Clear the frame buffer and assign back the context id */
3311 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3312 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3313 cmd->index);
3314
3315 dcmd = &cmd->frame->dcmd;
3316
3317 ci = (struct mrsas_ctrl_info *)instance->internal_buf;
3318
3319 if (!ci) {
3320 cmn_err(CE_WARN,
3321 "Failed to alloc mem for ctrl info");
3322 mrsas_return_mfi_pkt(instance, cmd);
3323 return (DDI_FAILURE);
3324 }
3325
3326 (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info));
3327
3328 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
3329 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3330
3331 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3332 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status,
3333 MFI_CMD_STATUS_POLL_MODE);
3334 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3335 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3336 MFI_FRAME_DIR_READ);
3337 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3338 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3339 sizeof (struct mrsas_ctrl_info));
3340 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3341 MR_DCMD_CTRL_GET_INFO);
3342 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3361
3362 ctrl_info->properties.on_off_properties = ddi_get32(
3363 cmd->frame_dma_obj.acc_handle,
3364 &ci->properties.on_off_properties);
3365 ddi_rep_get8(cmd->frame_dma_obj.acc_handle,
3366 (uint8_t *)(ctrl_info->product_name),
3367 (uint8_t *)(ci->product_name), 80 * sizeof (char),
3368 DDI_DEV_AUTOINCR);
3369 /* should get more members of ci with ddi_get when needed */
3370 } else {
3371 cmn_err(CE_WARN, "get_ctrl_info: Ctrl info failed");
3372 ret = -1;
3373 }
3374
3375 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3376 ret = -1;
3377 }
3378 if (instance->tbolt) {
3379 return_raid_msg_mfi_pkt(instance, cmd);
3380 } else {
3381 mrsas_return_mfi_pkt(instance, cmd);
3382 }
3383
3384 return (ret);
3385 }
3386
3387 /*
3388 * abort_aen_cmd
3389 */
3390 static int
3391 abort_aen_cmd(struct mrsas_instance *instance,
3392 struct mrsas_cmd *cmd_to_abort)
3393 {
3394 int ret = 0;
3395
3396 struct mrsas_cmd *cmd;
3397 struct mrsas_abort_frame *abort_fr;
3398
3399 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_aen:%d", __LINE__));
3400
3401 if (instance->tbolt) {
3402 cmd = get_raid_msg_mfi_pkt(instance);
3403 } else {
3404 cmd = mrsas_get_mfi_pkt(instance);
3405 }
3406
3407 if (!cmd) {
3408 con_log(CL_ANN1, (CE_WARN,
3409 "abort_aen_cmd():Failed to get a cmd for abort_aen_cmd"));
3410 DTRACE_PROBE2(abort_mfi_err, uint16_t, instance->fw_outstanding,
3411 uint16_t, instance->max_fw_cmds);
3412 return (DDI_FAILURE);
3413 }
3414
3415 /* Clear the frame buffer and assign back the context id */
3416 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3417 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3418 cmd->index);
3419
3420 abort_fr = &cmd->frame->abort;
3421
3422 /* prepare and issue the abort frame */
3423 ddi_put8(cmd->frame_dma_obj.acc_handle,
3424 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3437 cmd->frame_count = 1;
3438
3439 if (instance->tbolt) {
3440 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3441 }
3442
3443 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3444 con_log(CL_ANN1, (CE_WARN,
3445 "abort_aen_cmd: issue_cmd_in_poll_mode failed"));
3446 ret = -1;
3447 } else {
3448 ret = 0;
3449 }
3450
3451 instance->aen_cmd->abort_aen = 1;
3452 instance->aen_cmd = 0;
3453
3454 if (instance->tbolt) {
3455 return_raid_msg_mfi_pkt(instance, cmd);
3456 } else {
3457 mrsas_return_mfi_pkt(instance, cmd);
3458 }
3459
3460 atomic_add_16(&instance->fw_outstanding, (-1));
3461
3462 return (ret);
3463 }
3464
3465
3466 static int
3467 mrsas_build_init_cmd(struct mrsas_instance *instance,
3468 struct mrsas_cmd **cmd_ptr)
3469 {
3470 struct mrsas_cmd *cmd;
3471 struct mrsas_init_frame *init_frame;
3472 struct mrsas_init_queue_info *initq_info;
3473 struct mrsas_drv_ver drv_ver_info;
3474
3475
3476 /*
3477 * Prepare a init frame. Note the init frame points to queue info
3572
3573 /*
3574 * mrsas_init_adapter_ppc - Initialize MFI interface adapter.
3575 */
3576 int
3577 mrsas_init_adapter_ppc(struct mrsas_instance *instance)
3578 {
3579 struct mrsas_cmd *cmd;
3580
3581 /*
3582 * allocate memory for mfi adapter(cmd pool, individual commands, mfi
3583 * frames etc
3584 */
3585 if (alloc_space_for_mfi(instance) != DDI_SUCCESS) {
3586 con_log(CL_ANN, (CE_NOTE,
3587 "Error, failed to allocate memory for MFI adapter"));
3588 return (DDI_FAILURE);
3589 }
3590
3591 /* Build INIT command */
3592 cmd = mrsas_get_mfi_pkt(instance);
3593 if (cmd == NULL) {
3594 DTRACE_PROBE2(init_adapter_mfi_err, uint16_t,
3595 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
3596 return (DDI_FAILURE);
3597 }
3598
3599 if (mrsas_build_init_cmd(instance, &cmd) != DDI_SUCCESS) {
3600 con_log(CL_ANN,
3601 (CE_NOTE, "Error, failed to build INIT command"));
3602
3603 goto fail_undo_alloc_mfi_space;
3604 }
3605
3606 /*
3607 * Disable interrupt before sending init frame ( see linux driver code)
3608 * send INIT MFI frame in polled mode
3609 */
3610 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3611 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
3612 goto fail_fw_init;
3613 }
3614
3615 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
3616 goto fail_fw_init;
3617 mrsas_return_mfi_pkt(instance, cmd);
3618
3619 if (ctio_enable &&
3620 (instance->func_ptr->read_fw_status_reg(instance) & 0x04000000)) {
3621 con_log(CL_ANN, (CE_NOTE, "mr_sas: IEEE SGL's supported"));
3622 instance->flag_ieee = 1;
3623 } else {
3624 instance->flag_ieee = 0;
3625 }
3626
3627 ASSERT(!instance->skinny || instance->flag_ieee);
3628
3629 instance->unroll.alloc_space_mfi = 1;
3630 instance->unroll.verBuff = 1;
3631
3632 return (DDI_SUCCESS);
3633
3634
3635 fail_fw_init:
3636 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
3637
3638 fail_undo_alloc_mfi_space:
3639 mrsas_return_mfi_pkt(instance, cmd);
3640 free_space_for_mfi(instance);
3641
3642 return (DDI_FAILURE);
3643
3644 }
3645
3646 /*
3647 * mrsas_init_adapter - Initialize adapter.
3648 */
3649 int
3650 mrsas_init_adapter(struct mrsas_instance *instance)
3651 {
3652 struct mrsas_ctrl_info ctrl_info;
3653
3654
3655 /* we expect the FW state to be READY */
3656 if (mfi_state_transition_to_ready(instance)) {
3657 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready"));
3658 return (DDI_FAILURE);
3659 }
3771 &init_frame->queue_info_new_phys_addr_lo,
3772 cmd->frame_phys_addr + 64);
3773 ddi_put32(cmd->frame_dma_obj.acc_handle,
3774 &init_frame->queue_info_new_phys_addr_hi, 0);
3775
3776 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3777 sizeof (struct mrsas_init_queue_info));
3778
3779 cmd->frame_count = 1;
3780
3781 /* issue the init frame in polled mode */
3782 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3783 con_log(CL_ANN1, (CE_WARN,
3784 "mrsas_issue_init_mfi():failed to "
3785 "init firmware"));
3786 return_mfi_app_pkt(instance, cmd);
3787 return (DDI_FAILURE);
3788 }
3789
3790 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3791 return_mfi_app_pkt(instance, cmd);
3792 return (DDI_FAILURE);
3793 }
3794
3795 return_mfi_app_pkt(instance, cmd);
3796 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_init_mfi: Done"));
3797
3798 return (DDI_SUCCESS);
3799 }
3800 /*
3801 * mfi_state_transition_to_ready : Move the FW to READY state
3802 *
3803 * @reg_set : MFI register set
3804 */
3805 int
3806 mfi_state_transition_to_ready(struct mrsas_instance *instance)
3807 {
3808 int i;
3809 uint8_t max_wait;
3810 uint32_t fw_ctrl = 0;
3811 uint32_t fw_state;
3824 while (fw_state != MFI_STATE_READY) {
3825 con_log(CL_ANN, (CE_CONT,
3826 "mfi_state_transition_to_ready:FW state%x", fw_state));
3827
3828 switch (fw_state) {
3829 case MFI_STATE_FAULT:
3830 con_log(CL_ANN, (CE_NOTE,
3831 "mr_sas: FW in FAULT state!!"));
3832
3833 return (ENODEV);
3834 case MFI_STATE_WAIT_HANDSHAKE:
3835 /* set the CLR bit in IMR0 */
3836 con_log(CL_ANN1, (CE_NOTE,
3837 "mr_sas: FW waiting for HANDSHAKE"));
3838 /*
3839 * PCI_Hot Plug: MFI F/W requires
3840 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3841 * to be set
3842 */
3843 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
3844 if (!instance->tbolt && !instance->skinny) {
3845 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
3846 MFI_INIT_HOTPLUG, instance);
3847 } else {
3848 WR_RESERVED0_REGISTER(MFI_INIT_CLEAR_HANDSHAKE |
3849 MFI_INIT_HOTPLUG, instance);
3850 }
3851 max_wait = (instance->tbolt == 1) ? 180 : 2;
3852 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3853 break;
3854 case MFI_STATE_BOOT_MESSAGE_PENDING:
3855 /* set the CLR bit in IMR0 */
3856 con_log(CL_ANN1, (CE_NOTE,
3857 "mr_sas: FW state boot message pending"));
3858 /*
3859 * PCI_Hot Plug: MFI F/W requires
3860 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3861 * to be set
3862 */
3863 if (!instance->tbolt && !instance->skinny) {
3864 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
3865 } else {
3866 WR_RESERVED0_REGISTER(MFI_INIT_HOTPLUG,
3867 instance);
3868 }
3869 max_wait = (instance->tbolt == 1) ? 180 : 10;
3870 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3871 break;
3872 case MFI_STATE_OPERATIONAL:
3873 /* bring it to READY state; assuming max wait 2 secs */
3874 instance->func_ptr->disable_intr(instance);
3875 con_log(CL_ANN1, (CE_NOTE,
3876 "mr_sas: FW in OPERATIONAL state"));
3877 /*
3878 * PCI_Hot Plug: MFI F/W requires
3879 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
3880 * to be set
3881 */
3882 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
3883 if (!instance->tbolt && !instance->skinny) {
3884 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
3885 } else {
3886 WR_RESERVED0_REGISTER(MFI_RESET_FLAGS,
3887 instance);
3888
3889 for (i = 0; i < (10 * 1000); i++) {
3890 status =
3891 RD_RESERVED0_REGISTER(instance);
3892 if (status & 1) {
3893 delay(1 *
3894 drv_usectohz(MILLISEC));
3895 } else {
3896 break;
3897 }
3898 }
3899
3900 }
3901 max_wait = (instance->tbolt == 1) ? 180 : 10;
3902 cur_state = MFI_STATE_OPERATIONAL;
3903 break;
3947 if (fw_state == cur_state) {
3948 delay(1 * drv_usectohz(MILLISEC));
3949 } else {
3950 break;
3951 }
3952 }
3953 if (fw_state == MFI_STATE_DEVICE_SCAN) {
3954 if (prev_abs_reg_val != cur_abs_reg_val) {
3955 continue;
3956 }
3957 }
3958
3959 /* return error if fw_state hasn't changed after max_wait */
3960 if (fw_state == cur_state) {
3961 con_log(CL_ANN1, (CE_WARN,
3962 "FW state hasn't changed in %d secs", max_wait));
3963 return (ENODEV);
3964 }
3965 };
3966
3967 /* This may also need to apply to Skinny, but for now, don't worry. */
3968 if (!instance->tbolt && !instance->skinny) {
3969 fw_ctrl = RD_IB_DOORBELL(instance);
3970 con_log(CL_ANN1, (CE_CONT,
3971 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
3972
3973 /*
3974 * Write 0xF to the doorbell register to do the following.
3975 * - Abort all outstanding commands (bit 0).
3976 * - Transition from OPERATIONAL to READY state (bit 1).
3977 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
3978 * - Set to release FW to continue running (i.e. BIOS handshake
3979 * (bit 3).
3980 */
3981 WR_IB_DOORBELL(0xF, instance);
3982 }
3983
3984 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
3985 return (EIO);
3986 }
3987
3988 return (DDI_SUCCESS);
3989 }
3990
3991 /*
3992 * get_seq_num
3993 */
3994 static int
3995 get_seq_num(struct mrsas_instance *instance,
3996 struct mrsas_evt_log_info *eli)
3997 {
3998 int ret = DDI_SUCCESS;
3999
4000 dma_obj_t dcmd_dma_obj;
4001 struct mrsas_cmd *cmd;
4002 struct mrsas_dcmd_frame *dcmd;
4003 struct mrsas_evt_log_info *eli_tmp;
4004 if (instance->tbolt) {
4005 cmd = get_raid_msg_mfi_pkt(instance);
4006 } else {
4007 cmd = mrsas_get_mfi_pkt(instance);
4008 }
4009
4010 if (!cmd) {
4011 cmn_err(CE_WARN, "mr_sas: failed to get a cmd");
4012 DTRACE_PROBE2(seq_num_mfi_err, uint16_t,
4013 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
4014 return (ENOMEM);
4015 }
4016
4017 /* Clear the frame buffer and assign back the context id */
4018 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4019 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
4020 cmd->index);
4021
4022 dcmd = &cmd->frame->dcmd;
4023
4024 /* allocate the data transfer buffer */
4025 dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info);
4026 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
4027 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
4063 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4064 }
4065
4066 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
4067 cmn_err(CE_WARN, "get_seq_num: "
4068 "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO");
4069 ret = DDI_FAILURE;
4070 } else {
4071 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer;
4072 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle,
4073 &eli_tmp->newest_seq_num);
4074 ret = DDI_SUCCESS;
4075 }
4076
4077 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
4078 ret = DDI_FAILURE;
4079
4080 if (instance->tbolt) {
4081 return_raid_msg_mfi_pkt(instance, cmd);
4082 } else {
4083 mrsas_return_mfi_pkt(instance, cmd);
4084 }
4085
4086 return (ret);
4087 }
4088
4089 /*
4090 * start_mfi_aen
4091 */
4092 static int
4093 start_mfi_aen(struct mrsas_instance *instance)
4094 {
4095 int ret = 0;
4096
4097 struct mrsas_evt_log_info eli;
4098 union mrsas_evt_class_locale class_locale;
4099
4100 /* get the latest sequence number from FW */
4101 (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info));
4102
4103 if (get_seq_num(instance, &eli)) {
4116 if (ret) {
4117 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed");
4118 return (-1);
4119 }
4120
4121
4122 return (ret);
4123 }
4124
4125 /*
4126 * flush_cache
4127 */
4128 static void
4129 flush_cache(struct mrsas_instance *instance)
4130 {
4131 struct mrsas_cmd *cmd = NULL;
4132 struct mrsas_dcmd_frame *dcmd;
4133 if (instance->tbolt) {
4134 cmd = get_raid_msg_mfi_pkt(instance);
4135 } else {
4136 cmd = mrsas_get_mfi_pkt(instance);
4137 }
4138
4139 if (!cmd) {
4140 con_log(CL_ANN1, (CE_WARN,
4141 "flush_cache():Failed to get a cmd for flush_cache"));
4142 DTRACE_PROBE2(flush_cache_err, uint16_t,
4143 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
4144 return;
4145 }
4146
4147 /* Clear the frame buffer and assign back the context id */
4148 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4149 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
4150 cmd->index);
4151
4152 dcmd = &cmd->frame->dcmd;
4153
4154 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4155
4156 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4162 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0);
4163 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4164 MR_DCMD_CTRL_CACHE_FLUSH);
4165 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0],
4166 MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE);
4167
4168 cmd->frame_count = 1;
4169
4170 if (instance->tbolt) {
4171 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4172 }
4173
4174 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
4175 con_log(CL_ANN1, (CE_WARN,
4176 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH"));
4177 }
4178 con_log(CL_ANN1, (CE_CONT, "flush_cache done"));
4179 if (instance->tbolt) {
4180 return_raid_msg_mfi_pkt(instance, cmd);
4181 } else {
4182 mrsas_return_mfi_pkt(instance, cmd);
4183 }
4184
4185 }
4186
4187 /*
4188 * service_mfi_aen- Completes an AEN command
4189 * @instance: Adapter soft state
4190 * @cmd: Command to be completed
4191 *
4192 */
4193 void
4194 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
4195 {
4196 uint32_t seq_num;
4197 struct mrsas_evt_detail *evt_detail =
4198 (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
4199 int rval = 0;
4200 int tgt = 0;
4201 uint8_t dtype;
4202 #ifdef PDSUPPORT
4262 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d "
4263 "tgt id = %d index = %d", rval,
4264 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4265 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4266 break;
4267 } /* End of MR_EVT_LD_DELETED */
4268
4269 case MR_EVT_LD_CREATED: {
4270 rval = mrsas_service_evt(instance,
4271 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4272 MRSAS_EVT_CONFIG_TGT, NULL);
4273 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d "
4274 "tgt id = %d index = %d", rval,
4275 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4276 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4277 break;
4278 } /* End of MR_EVT_LD_CREATED */
4279
4280 #ifdef PDSUPPORT
4281 case MR_EVT_PD_REMOVED_EXT: {
4282 if (instance->tbolt || instance->skinny) {
4283 pd_addr = &evt_detail->args.pd_addr;
4284 dtype = pd_addr->scsi_dev_type;
4285 con_log(CL_DLEVEL1, (CE_NOTE,
4286 " MR_EVT_PD_REMOVED_EXT: dtype = %x,"
4287 " arg_type = %d ", dtype, evt_detail->arg_type));
4288 tgt = ddi_get16(acc_handle,
4289 &evt_detail->args.pd.device_id);
4290 mutex_enter(&instance->config_dev_mtx);
4291 instance->mr_tbolt_pd_list[tgt].flag =
4292 (uint8_t)~MRDRV_TGT_VALID;
4293 mutex_exit(&instance->config_dev_mtx);
4294 rval = mrsas_service_evt(instance, ddi_get16(
4295 acc_handle, &evt_detail->args.pd.device_id),
4296 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4297 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4298 "rval = %d tgt id = %d ", rval,
4299 ddi_get16(acc_handle,
4300 &evt_detail->args.pd.device_id)));
4301 }
4302 break;
4303 } /* End of MR_EVT_PD_REMOVED_EXT */
4304
4305 case MR_EVT_PD_INSERTED_EXT: {
4306 if (instance->tbolt || instance->skinny) {
4307 rval = mrsas_service_evt(instance,
4308 ddi_get16(acc_handle,
4309 &evt_detail->args.pd.device_id),
4310 1, MRSAS_EVT_CONFIG_TGT, NULL);
4311 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_INSERTEDi_EXT:"
4312 "rval = %d tgt id = %d ", rval,
4313 ddi_get16(acc_handle,
4314 &evt_detail->args.pd.device_id)));
4315 }
4316 break;
4317 } /* End of MR_EVT_PD_INSERTED_EXT */
4318
4319 case MR_EVT_PD_STATE_CHANGE: {
4320 if (instance->tbolt || instance->skinny) {
4321 tgt = ddi_get16(acc_handle,
4322 &evt_detail->args.pd.device_id);
4323 if ((evt_detail->args.pd_state.prevState ==
4324 PD_SYSTEM) &&
4325 (evt_detail->args.pd_state.newState != PD_SYSTEM)) {
4326 mutex_enter(&instance->config_dev_mtx);
4327 instance->mr_tbolt_pd_list[tgt].flag =
4328 (uint8_t)~MRDRV_TGT_VALID;
4329 mutex_exit(&instance->config_dev_mtx);
4330 rval = mrsas_service_evt(instance,
4331 ddi_get16(acc_handle,
4332 &evt_detail->args.pd.device_id),
4333 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4334 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4335 "rval = %d tgt id = %d ", rval,
4336 ddi_get16(acc_handle,
4337 &evt_detail->args.pd.device_id)));
4338 break;
4339 }
4340 if ((evt_detail->args.pd_state.prevState
4536 pkt->pkt_state = STATE_GOT_BUS
4537 | STATE_GOT_TARGET | STATE_SENT_CMD
4538 | STATE_XFERRED_DATA | STATE_GOT_STATUS;
4539
4540 con_log(CL_ANN, (CE_CONT,
4541 "CDB[0] = %x completed for %s: size %lx context %x",
4542 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
4543 acmd->cmd_dmacount, hdr->context));
4544 DTRACE_PROBE3(softintr_cdb, uint8_t, pkt->pkt_cdbp[0],
4545 uint_t, acmd->cmd_cdblen, ulong_t,
4546 acmd->cmd_dmacount);
4547
4548 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
4549 struct scsi_inquiry *inq;
4550
4551 if (acmd->cmd_dmacount != 0) {
4552 bp_mapin(acmd->cmd_buf);
4553 inq = (struct scsi_inquiry *)
4554 acmd->cmd_buf->b_un.b_addr;
4555
4556 #ifdef PDSUPPORT
4557 if (hdr->cmd_status == MFI_STAT_OK) {
4558 display_scsi_inquiry(
4559 (caddr_t)inq);
4560 }
4561 #else
4562 /* don't expose physical drives to OS */
4563 if (acmd->islogical &&
4564 (hdr->cmd_status == MFI_STAT_OK)) {
4565 display_scsi_inquiry(
4566 (caddr_t)inq);
4567 } else if ((hdr->cmd_status ==
4568 MFI_STAT_OK) && inq->inq_dtype ==
4569 DTYPE_DIRECT) {
4570
4571 display_scsi_inquiry(
4572 (caddr_t)inq);
4573
4574 /* for physical disk */
4575 hdr->cmd_status =
4576 MFI_STAT_DEVICE_NOT_FOUND;
4577 }
4578 #endif /* PDSUPPORT */
4579 }
4580 }
4581
4582 DTRACE_PROBE2(softintr_done, uint8_t, hdr->cmd,
4583 uint8_t, hdr->cmd_status);
4584
4585 switch (hdr->cmd_status) {
4586 case MFI_STAT_OK:
4587 pkt->pkt_scbp[0] = STATUS_GOOD;
4588 break;
4589 case MFI_STAT_LD_CC_IN_PROGRESS:
4590 case MFI_STAT_LD_RECON_IN_PROGRESS:
4591 pkt->pkt_scbp[0] = STATUS_GOOD;
4592 break;
4593 case MFI_STAT_LD_INIT_IN_PROGRESS:
4594 con_log(CL_ANN,
4595 (CE_WARN, "Initialization in Progress"));
4596 pkt->pkt_reason = CMD_TRAN_ERR;
4597
4598 break;
4667 con_log(CL_ANN, (CE_CONT, "Unknown status!"));
4668 pkt->pkt_reason = CMD_TRAN_ERR;
4669
4670 break;
4671 }
4672
4673 atomic_add_16(&instance->fw_outstanding, (-1));
4674
4675 (void) mrsas_common_check(instance, cmd);
4676
4677 if (acmd->cmd_dmahandle) {
4678 if (mrsas_check_dma_handle(
4679 acmd->cmd_dmahandle) != DDI_SUCCESS) {
4680 ddi_fm_service_impact(instance->dip,
4681 DDI_SERVICE_UNAFFECTED);
4682 pkt->pkt_reason = CMD_TRAN_ERR;
4683 pkt->pkt_statistics = 0;
4684 }
4685 }
4686
4687 mrsas_return_mfi_pkt(instance, cmd);
4688
4689 /* Call the callback routine */
4690 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4691 pkt->pkt_comp) {
4692 (*pkt->pkt_comp)(pkt);
4693 }
4694
4695 break;
4696
4697 case MFI_CMD_OP_SMP:
4698 case MFI_CMD_OP_STP:
4699 complete_cmd_in_sync_mode(instance, cmd);
4700 break;
4701
4702 case MFI_CMD_OP_DCMD:
4703 /* see if got an event notification */
4704 if (ddi_get32(cmd->frame_dma_obj.acc_handle,
4705 &cmd->frame->dcmd.opcode) ==
4706 MR_DCMD_CTRL_EVENT_WAIT) {
4707 if ((instance->aen_cmd == cmd) &&
4708 (instance->aen_cmd->abort_aen)) {
4709 con_log(CL_ANN, (CE_WARN,
4710 "mrsas_softintr: "
4711 "aborted_aen returned"));
4712 } else {
4713 atomic_add_16(&instance->fw_outstanding,
4714 (-1));
5101 {
5102 uint16_t flags = 0;
5103 uint32_t i;
5104 uint32_t context;
5105 uint32_t sge_bytes;
5106 uint32_t tmp_data_xfer_len;
5107 ddi_acc_handle_t acc_handle;
5108 struct mrsas_cmd *cmd;
5109 struct mrsas_sge64 *mfi_sgl;
5110 struct mrsas_sge_ieee *mfi_sgl_ieee;
5111 struct scsa_cmd *acmd = PKT2CMD(pkt);
5112 struct mrsas_pthru_frame *pthru;
5113 struct mrsas_io_frame *ldio;
5114
5115 /* find out if this is logical or physical drive command. */
5116 acmd->islogical = MRDRV_IS_LOGICAL(ap);
5117 acmd->device_id = MAP_DEVICE_ID(instance, ap);
5118 *cmd_done = 0;
5119
5120 /* get the command packet */
5121 if (!(cmd = mrsas_get_mfi_pkt(instance))) {
5122 DTRACE_PROBE2(build_cmd_mfi_err, uint16_t,
5123 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
5124 return (NULL);
5125 }
5126
5127 acc_handle = cmd->frame_dma_obj.acc_handle;
5128
5129 /* Clear the frame buffer and assign back the context id */
5130 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
5131 ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index);
5132
5133 cmd->pkt = pkt;
5134 cmd->cmd = acmd;
5135 DTRACE_PROBE3(build_cmds, uint8_t, pkt->pkt_cdbp[0],
5136 ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len);
5137
5138 /* lets get the command directions */
5139 if (acmd->cmd_flags & CFLAG_DMASEND) {
5140 flags = MFI_FRAME_DIR_WRITE;
5141
5149
5150 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5151 (void) ddi_dma_sync(acmd->cmd_dmahandle,
5152 acmd->cmd_dma_offset, acmd->cmd_dma_len,
5153 DDI_DMA_SYNC_FORCPU);
5154 }
5155 } else {
5156 flags = MFI_FRAME_DIR_NONE;
5157 }
5158
5159 if (instance->flag_ieee) {
5160 flags |= MFI_FRAME_IEEE;
5161 }
5162 flags |= MFI_FRAME_SGL64;
5163
5164 switch (pkt->pkt_cdbp[0]) {
5165
5166 /*
5167 * case SCMD_SYNCHRONIZE_CACHE:
5168 * flush_cache(instance);
5169 * mrsas_return_mfi_pkt(instance, cmd);
5170 * *cmd_done = 1;
5171 *
5172 * return (NULL);
5173 */
5174
5175 case SCMD_READ:
5176 case SCMD_WRITE:
5177 case SCMD_READ_G1:
5178 case SCMD_WRITE_G1:
5179 case SCMD_READ_G4:
5180 case SCMD_WRITE_G4:
5181 case SCMD_READ_G5:
5182 case SCMD_WRITE_G5:
5183 if (acmd->islogical) {
5184 ldio = (struct mrsas_io_frame *)cmd->frame;
5185
5186 /*
5187 * preare the Logical IO frame:
5188 * 2nd bit is zero for all read cmds
5189 */
5258 ((uint32_t)(pkt->pkt_cdbp[13])) |
5259 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
5260 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
5261 ((uint32_t)(pkt->pkt_cdbp[10]) << 24)));
5262
5263 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5264 ((uint32_t)(pkt->pkt_cdbp[9])) |
5265 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5266 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5267 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5268
5269 ddi_put32(acc_handle, &ldio->start_lba_hi, (
5270 ((uint32_t)(pkt->pkt_cdbp[5])) |
5271 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5272 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5273 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5274 }
5275
5276 break;
5277 }
5278 /* fall through For all non-rd/wr and physical disk cmds */
5279 default:
5280
5281 switch (pkt->pkt_cdbp[0]) {
5282 case SCMD_MODE_SENSE:
5283 case SCMD_MODE_SENSE_G1: {
5284 union scsi_cdb *cdbp;
5285 uint16_t page_code;
5286
5287 cdbp = (void *)pkt->pkt_cdbp;
5288 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
5289 switch (page_code) {
5290 case 0x3:
5291 case 0x4:
5292 (void) mrsas_mode_sense_build(pkt);
5293 mrsas_return_mfi_pkt(instance, cmd);
5294 *cmd_done = 1;
5295 return (NULL);
5296 }
5297 break;
5298 }
5299 default:
5300 break;
5301 }
5302
5303 pthru = (struct mrsas_pthru_frame *)cmd->frame;
5304
5305 /* prepare the DCDB frame */
5306 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ?
5307 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI);
5308 ddi_put8(acc_handle, &pthru->cmd_status, 0x0);
5309 ddi_put8(acc_handle, &pthru->scsi_status, 0x0);
5310 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id);
5311 ddi_put8(acc_handle, &pthru->lun, 0);
5312 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen);
5313 ddi_put16(acc_handle, &pthru->timeout, 0);
6307 }
6308
6309 return (rval);
6310 }
6311
6312 /*
6313 * handle_mfi_ioctl
6314 */
6315 static int
6316 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6317 int mode)
6318 {
6319 int rval = DDI_SUCCESS;
6320
6321 struct mrsas_header *hdr;
6322 struct mrsas_cmd *cmd;
6323
6324 if (instance->tbolt) {
6325 cmd = get_raid_msg_mfi_pkt(instance);
6326 } else {
6327 cmd = mrsas_get_mfi_pkt(instance);
6328 }
6329 if (!cmd) {
6330 con_log(CL_ANN, (CE_WARN, "mr_sas: "
6331 "failed to get a cmd packet"));
6332 DTRACE_PROBE2(mfi_ioctl_err, uint16_t,
6333 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
6334 return (DDI_FAILURE);
6335 }
6336
6337 /* Clear the frame buffer and assign back the context id */
6338 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6339 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6340 cmd->index);
6341
6342 hdr = (struct mrsas_header *)&ioctl->frame[0];
6343
6344 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
6345 case MFI_CMD_OP_DCMD:
6346 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
6347 break;
6351 case MFI_CMD_OP_STP:
6352 rval = issue_mfi_stp(instance, ioctl, cmd, mode);
6353 break;
6354 case MFI_CMD_OP_LD_SCSI:
6355 case MFI_CMD_OP_PD_SCSI:
6356 rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
6357 break;
6358 default:
6359 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
6360 "invalid mfi ioctl hdr->cmd = %d", hdr->cmd));
6361 rval = DDI_FAILURE;
6362 break;
6363 }
6364
6365 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
6366 rval = DDI_FAILURE;
6367
6368 if (instance->tbolt) {
6369 return_raid_msg_mfi_pkt(instance, cmd);
6370 } else {
6371 mrsas_return_mfi_pkt(instance, cmd);
6372 }
6373
6374 return (rval);
6375 }
6376
6377 /*
6378 * AEN
6379 */
6380 static int
6381 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen)
6382 {
6383 int rval = 0;
6384
6385 rval = register_mfi_aen(instance, instance->aen_seq_num,
6386 aen->class_locale_word);
6387
6388 aen->cmd_status = (uint8_t)rval;
6389
6390 return (rval);
6391 }
6448 if (prev_aen.members.class < curr_aen.members.class)
6449 curr_aen.members.class = prev_aen.members.class;
6450
6451 ret_val = abort_aen_cmd(instance, aen_cmd);
6452
6453 if (ret_val) {
6454 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
6455 "failed to abort prevous AEN command"));
6456
6457 return (ret_val);
6458 }
6459 }
6460 } else {
6461 curr_aen.word = LE_32(class_locale_word);
6462 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6463 }
6464
6465 if (instance->tbolt) {
6466 cmd = get_raid_msg_mfi_pkt(instance);
6467 } else {
6468 cmd = mrsas_get_mfi_pkt(instance);
6469 }
6470
6471 if (!cmd) {
6472 DTRACE_PROBE2(mfi_aen_err, uint16_t, instance->fw_outstanding,
6473 uint16_t, instance->max_fw_cmds);
6474 return (ENOMEM);
6475 }
6476
6477 /* Clear the frame buffer and assign back the context id */
6478 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6479 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6480 cmd->index);
6481
6482 dcmd = &cmd->frame->dcmd;
6483
6484 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
6485 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
6486
6487 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
6488 sizeof (struct mrsas_evt_detail));
6741 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6742 "ISSUED CMD TO FW : called : cmd:"
6743 ": %p instance : %p pkt : %p pkt_time : %x\n",
6744 gethrtime(), (void *)cmd, (void *)instance,
6745 (void *)pkt, cmd->drv_pkt_time));
6746 if (instance->adapterresetinprogress) {
6747 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6748 con_log(CL_ANN1, (CE_NOTE, "Reset the scsi_pkt timer"));
6749 } else {
6750 push_pending_mfi_pkt(instance, cmd);
6751 }
6752
6753 } else {
6754 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6755 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
6756 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
6757 }
6758
6759 mutex_enter(&instance->reg_write_mtx);
6760 /* Issue the command to the FW */
6761 WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6762 (((cmd->frame_count - 1) << 1) | 1), instance);
6763 mutex_exit(&instance->reg_write_mtx);
6764
6765 }
6766
6767 /*
6768 * issue_cmd_in_sync_mode
6769 */
6770 static int
6771 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance,
6772 struct mrsas_cmd *cmd)
6773 {
6774 int i;
6775 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6776 struct mrsas_header *hdr = &cmd->frame->hdr;
6777
6778 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called"));
6779
6780 if (instance->adapterresetinprogress) {
6781 cmd->drv_pkt_time = ddi_get16(
6782 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
6783 if (cmd->drv_pkt_time < debug_timeout_g)
6784 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6785
6786 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: "
6787 "issue and return in reset case\n"));
6788 WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6789 (((cmd->frame_count - 1) << 1) | 1), instance);
6790
6791 return (DDI_SUCCESS);
6792 } else {
6793 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: pushing the pkt\n"));
6794 push_pending_mfi_pkt(instance, cmd);
6795 }
6796
6797 cmd->cmd_status = ENODATA;
6798
6799 mutex_enter(&instance->reg_write_mtx);
6800 /* Issue the command to the FW */
6801 WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6802 (((cmd->frame_count - 1) << 1) | 1), instance);
6803 mutex_exit(&instance->reg_write_mtx);
6804
6805 mutex_enter(&instance->int_cmd_mtx);
6806 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
6807 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
6808 }
6809 mutex_exit(&instance->int_cmd_mtx);
6810
6811 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done"));
6812
6813 if (i < (msecs -1)) {
6814 return (DDI_SUCCESS);
6815 } else {
6816 return (DDI_FAILURE);
6817 }
6818 }
6819
6820 /*
6821 * issue_cmd_in_poll_mode
6823 static int
6824 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance,
6825 struct mrsas_cmd *cmd)
6826 {
6827 int i;
6828 uint16_t flags;
6829 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6830 struct mrsas_header *frame_hdr;
6831
6832 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called"));
6833
6834 frame_hdr = (struct mrsas_header *)cmd->frame;
6835 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
6836 MFI_CMD_STATUS_POLL_MODE);
6837 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
6838 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
6839
6840 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
6841
6842 /* issue the frame using inbound queue port */
6843 WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6844 (((cmd->frame_count - 1) << 1) | 1), instance);
6845
6846 /* wait for cmd_status to change from 0xFF */
6847 for (i = 0; i < msecs && (
6848 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6849 == MFI_CMD_STATUS_POLL_MODE); i++) {
6850 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
6851 }
6852
6853 if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6854 == MFI_CMD_STATUS_POLL_MODE) {
6855 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
6856 "cmd polling timed out"));
6857 return (DDI_FAILURE);
6858 }
6859
6860 return (DDI_SUCCESS);
6861 }
6862
6863 static void
6864 enable_intr_ppc(struct mrsas_instance *instance)
6865 {
6866 uint32_t mask;
6867
6868 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called"));
6869
6870 if (instance->skinny) {
6871 /* For SKINNY, write ~0x1, from BSD's mfi driver. */
6872 WR_OB_INTR_MASK(0xfffffffe, instance);
6873 } else {
6874 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
6875 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
6876
6877 /* WR_OB_INTR_MASK(~0x80000000, instance); */
6878 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance);
6879 }
6880
6881 /* dummy read to force PCI flush */
6882 mask = RD_OB_INTR_MASK(instance);
6883
6884 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
6885 "outbound_intr_mask = 0x%x", mask));
6886 }
6887
6888 static void
6889 disable_intr_ppc(struct mrsas_instance *instance)
6890 {
6891 uint32_t mask;
6892
6893 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called"));
6894
6895 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
6896 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6897
6898 /* For now, assume there are no extras needed for Skinny support. */
6899
6900 WR_OB_INTR_MASK(OB_INTR_MASK, instance);
6901
6902 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
6903 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6904
6905 /* dummy read to force PCI flush */
6906 mask = RD_OB_INTR_MASK(instance);
6907 #ifdef lint
6908 mask = mask;
6909 #endif
6910 }
6911
6912 static int
6913 intr_ack_ppc(struct mrsas_instance *instance)
6914 {
6915 uint32_t status;
6916 int ret = DDI_INTR_CLAIMED;
6917
6918 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called"));
6919
6920 /* check if it is our interrupt */
6921 status = RD_OB_INTR_STATUS(instance);
6922
6923 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status));
6924
6925 /*
6926 * NOTE: Some drivers call out SKINNY here, but the return is the same
6927 * for SKINNY and 2108.
6928 */
6929 if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) {
6930 ret = DDI_INTR_UNCLAIMED;
6931 }
6932
6933 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
6934 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
6935 ret = DDI_INTR_UNCLAIMED;
6936 }
6937
6938 if (ret == DDI_INTR_UNCLAIMED) {
6939 return (ret);
6940 }
6941
6942 /*
6943 * Clear the interrupt by writing back the same value.
6944 * Another case where SKINNY is slightly different.
6945 */
6946 if (instance->skinny) {
6947 WR_OB_INTR_STATUS(status, instance);
6948 } else {
6949 WR_OB_DOORBELL_CLEAR(status, instance);
6950 }
6951
6952 /* dummy READ */
6953 status = RD_OB_INTR_STATUS(instance);
6954
6955 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared"));
6956
6957 return (ret);
6958 }
6959
6960 /*
6961 * Marks HBA as bad. This will be called either when an
6962 * IO packet times out even after 3 FW resets
6963 * or FW is found to be fault even after 3 continuous resets.
6964 */
6965
6966 static int
6967 mrsas_kill_adapter(struct mrsas_instance *instance)
6968 {
6969 if (instance->deadadapter == 1)
6970 return (DDI_FAILURE);
7516 /* Hold nexus during bus_config */
7517 ndi_devi_enter(parent, &config);
7518 switch (op) {
7519 case BUS_CONFIG_ONE: {
7520
7521 /* parse wwid/target name out of name given */
7522 if ((ptr = strchr((char *)arg, '@')) == NULL) {
7523 rval = NDI_FAILURE;
7524 break;
7525 }
7526 ptr++;
7527
7528 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) {
7529 rval = NDI_FAILURE;
7530 break;
7531 }
7532
7533 if (lun == 0) {
7534 rval = mrsas_config_ld(instance, tgt, lun, childp);
7535 #ifdef PDSUPPORT
7536 } else if ((instance->tbolt || instance->skinny) && lun != 0) {
7537 rval = mrsas_tbolt_config_pd(instance,
7538 tgt, lun, childp);
7539 #endif
7540 } else {
7541 rval = NDI_FAILURE;
7542 }
7543
7544 break;
7545 }
7546 case BUS_CONFIG_DRIVER:
7547 case BUS_CONFIG_ALL: {
7548
7549 rval = mrsas_config_all_devices(instance);
7550
7551 rval = NDI_SUCCESS;
7552 break;
7553 }
7554 }
7555
7556 if (rval == NDI_SUCCESS) {
7559 }
7560 ndi_devi_exit(parent, config);
7561
7562 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x",
7563 rval));
7564 return (rval);
7565 }
7566
7567 static int
7568 mrsas_config_all_devices(struct mrsas_instance *instance)
7569 {
7570 int rval, tgt;
7571
7572 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
7573 (void) mrsas_config_ld(instance, tgt, 0, NULL);
7574
7575 }
7576
7577 #ifdef PDSUPPORT
7578 /* Config PD devices connected to the card */
7579 if (instance->tbolt || instance->skinny) {
7580 for (tgt = 0; tgt < instance->mr_tbolt_pd_max; tgt++) {
7581 (void) mrsas_tbolt_config_pd(instance, tgt, 1, NULL);
7582 }
7583 }
7584 #endif
7585
7586 rval = NDI_SUCCESS;
7587 return (rval);
7588 }
7589
7590 static int
7591 mrsas_parse_devname(char *devnm, int *tgt, int *lun)
7592 {
7593 char devbuf[SCSI_MAXNAMELEN];
7594 char *addr;
7595 char *p, *tp, *lp;
7596 long num;
7597
7598 /* Parse dev name and address */
7599 (void) strcpy(devbuf, devnm);
7813 dip = instance->mr_ld_list[mrevt->tgt].dip;
7814 mutex_exit(&instance->config_dev_mtx);
7815 #ifdef PDSUPPORT
7816 } else {
7817 mutex_enter(&instance->config_dev_mtx);
7818 dip = instance->mr_tbolt_pd_list[mrevt->tgt].dip;
7819 mutex_exit(&instance->config_dev_mtx);
7820 #endif
7821 }
7822
7823
7824 ndi_devi_enter(instance->dip, &circ1);
7825 switch (mrevt->event) {
7826 case MRSAS_EVT_CONFIG_TGT:
7827 if (dip == NULL) {
7828
7829 if (mrevt->lun == 0) {
7830 (void) mrsas_config_ld(instance, mrevt->tgt,
7831 0, NULL);
7832 #ifdef PDSUPPORT
7833 } else if (instance->tbolt || instance->skinny) {
7834 (void) mrsas_tbolt_config_pd(instance,
7835 mrevt->tgt,
7836 1, NULL);
7837 #endif
7838 }
7839 con_log(CL_ANN1, (CE_NOTE,
7840 "mr_sas: EVT_CONFIG_TGT called:"
7841 " for tgt %d lun %d event %d",
7842 mrevt->tgt, mrevt->lun, mrevt->event));
7843
7844 } else {
7845 con_log(CL_ANN1, (CE_NOTE,
7846 "mr_sas: EVT_CONFIG_TGT dip != NULL:"
7847 " for tgt %d lun %d event %d",
7848 mrevt->tgt, mrevt->lun, mrevt->event));
7849 }
7850 break;
7851 case MRSAS_EVT_UNCONFIG_TGT:
7852 if (dip) {
7853 if (i_ddi_devi_attached(dip)) {
|