4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2017, Joyent, Inc.
26 * Copyright 2014 OmniTI Computer Consulting, Inc. All rights reserved.
27 * Copyright (c) 2014, Tegile Systems Inc. All rights reserved.
28 */
29
30 /*
31 * Copyright (c) 2000 to 2010, LSI Corporation.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms of all code within
35 * this file that is exclusively owned by LSI, with or without
36 * modification, is permitted provided that, in addition to the CDDL 1.0
37 * License requirements, the following conditions are met:
38 *
39 * Neither the name of the author nor the names of its contributors may be
40 * used to endorse or promote products derived from this software without
41 * specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
62 #if defined(lint) || defined(DEBUG)
63 #define MPTSAS_DEBUG
64 #endif
65
66 /*
67 * standard header files.
68 */
69 #include <sys/note.h>
70 #include <sys/scsi/scsi.h>
71 #include <sys/pci.h>
72 #include <sys/file.h>
73 #include <sys/policy.h>
74 #include <sys/model.h>
75 #include <sys/sysevent.h>
76 #include <sys/sysevent/eventdefs.h>
77 #include <sys/sysevent/dr.h>
78 #include <sys/sata/sata_defs.h>
79 #include <sys/sata/sata_hba.h>
80 #include <sys/scsi/generic/sas.h>
81 #include <sys/scsi/impl/scsi_sas.h>
82
83 #pragma pack(1)
84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
87 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
88 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
89 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
90 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
91 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
92 #pragma pack()
93
94 /*
95 * private header files.
96 *
97 */
98 #include <sys/scsi/impl/scsi_reset_notify.h>
99 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
100 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
101 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
102 #include <sys/scsi/adapters/mpt_sas/mptsas_hash.h>
103 #include <sys/raidioctl.h>
104
105 #include <sys/fs/dv_node.h> /* devfs_clean */
106
107 /*
108 * FMA header files
109 */
110 #include <sys/ddifm.h>
111 #include <sys/fm/protocol.h>
112 #include <sys/fm/util.h>
113 #include <sys/fm/io/ddi.h>
114
115 /*
116 * autoconfiguration data and routines.
117 */
118 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
119 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
120 static int mptsas_power(dev_info_t *dip, int component, int level);
121
122 /*
123 * cb_ops function
124 */
125 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
126 cred_t *credp, int *rval);
340 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
341
342 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
343 int *lun);
344 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
345
346 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt,
347 mptsas_phymask_t phymask, uint8_t phy);
348 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt,
349 mptsas_phymask_t phymask, uint64_t wwid);
350 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt,
351 mptsas_phymask_t phymask, uint64_t wwid);
352
353 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
354 uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
355
356 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
357 uint16_t *handle, mptsas_target_t **pptgt);
358 static void mptsas_update_phymask(mptsas_t *mpt);
359
360 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
361 uint32_t *status, uint8_t cmd);
362 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
363 mptsas_phymask_t *phymask);
364 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
365 mptsas_phymask_t phymask);
366 static int mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt);
367
368
369 /*
370 * Enumeration / DR functions
371 */
372 static void mptsas_config_all(dev_info_t *pdip);
373 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
374 dev_info_t **lundip);
375 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
376 dev_info_t **lundip);
377
378 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
379 static int mptsas_offline_target(dev_info_t *pdip, char *name);
380
381 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
382 dev_info_t **dip);
383
384 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
385 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
386 dev_info_t **dip, mptsas_target_t *ptgt);
387
388 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
389 dev_info_t **dip, mptsas_target_t *ptgt, int lun);
390
391 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
392 char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
393 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
394 char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
395 int lun);
396
397 static void mptsas_offline_missed_luns(dev_info_t *pdip,
398 uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
399 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
400 mdi_pathinfo_t *rpip, uint_t flags);
401
402 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
403 dev_info_t **smp_dip);
404 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
405 uint_t flags);
406
407 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
408 int mode, int *rval);
409 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
410 int mode, int *rval);
411 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
412 int mode, int *rval);
413 static void mptsas_record_event(void *args);
414 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
415 int mode);
416
417 mptsas_target_t *mptsas_tgt_alloc(refhash_t *, uint16_t, uint64_t,
418 uint32_t, mptsas_phymask_t, uint8_t);
419 static mptsas_smp_t *mptsas_smp_alloc(mptsas_t *, mptsas_smp_t *);
420 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
421 dev_info_t **smp_dip);
422
423 /*
424 * Power management functions
425 */
452 static void mptsas_fm_init(mptsas_t *mpt);
453 static void mptsas_fm_fini(mptsas_t *mpt);
454 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
455
456 extern pri_t minclsyspri, maxclsyspri;
457
458 /*
459 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
460 * under this device that the paths to a physical device are created when
461 * MPxIO is used.
462 */
463 extern dev_info_t *scsi_vhci_dip;
464
465 /*
466 * Tunable timeout value for Inquiry VPD page 0x83
467 * By default the value is 30 seconds.
468 */
469 int mptsas_inq83_retry_timeout = 30;
470
471 /*
472 * This is used to allocate memory for message frame storage, not for
473 * data I/O DMA. All message frames must be stored in the first 4G of
474 * physical memory.
475 */
476 ddi_dma_attr_t mptsas_dma_attrs = {
477 DMA_ATTR_V0, /* attribute layout version */
478 0x0ull, /* address low - should be 0 (longlong) */
479 0xffffffffull, /* address high - 32-bit max range */
480 0x00ffffffull, /* count max - max DMA object size */
481 4, /* allocation alignment requirements */
482 0x78, /* burstsizes - binary encoded values */
483 1, /* minxfer - gran. of DMA engine */
484 0x00ffffffull, /* maxxfer - gran. of DMA engine */
485 0xffffffffull, /* max segment size (DMA boundary) */
486 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
487 512, /* granularity - device transfer size */
488 0 /* flags, set to 0 */
489 };
490
491 /*
715 int
716 _info(struct modinfo *modinfop)
717 {
718 /* CONSTCOND */
719 ASSERT(NO_COMPETING_THREADS);
720 NDBG0(("mptsas _info"));
721
722 return (mod_info(&modlinkage, modinfop));
723 }
724
725 static int
726 mptsas_target_eval_devhdl(const void *op, void *arg)
727 {
728 uint16_t dh = *(uint16_t *)arg;
729 const mptsas_target_t *tp = op;
730
731 return ((int)tp->m_devhdl - (int)dh);
732 }
733
734 static int
735 mptsas_target_eval_slot(const void *op, void *arg)
736 {
737 mptsas_led_control_t *lcp = arg;
738 const mptsas_target_t *tp = op;
739
740 if (tp->m_enclosure != lcp->Enclosure)
741 return ((int)tp->m_enclosure - (int)lcp->Enclosure);
742
743 return ((int)tp->m_slot_num - (int)lcp->Slot);
744 }
745
746 static int
747 mptsas_target_eval_nowwn(const void *op, void *arg)
748 {
749 uint8_t phy = *(uint8_t *)arg;
750 const mptsas_target_t *tp = op;
751
752 if (tp->m_addr.mta_wwn != 0)
753 return (-1);
754
755 return ((int)tp->m_phynum - (int)phy);
756 }
757
758 static int
759 mptsas_smp_eval_devhdl(const void *op, void *arg)
760 {
761 uint16_t dh = *(uint16_t *)arg;
762 const mptsas_smp_t *sp = op;
763
764 return ((int)sp->m_devhdl - (int)dh);
765 }
766
1187 mutex_exit(&mptsas_global_mutex);
1188
1189 /* report idle status to pm framework */
1190 if (mpt->m_options & MPTSAS_OPT_PM) {
1191 (void) pm_idle_component(dip, 0);
1192 }
1193
1194 return (DDI_SUCCESS);
1195
1196 default:
1197 return (DDI_FAILURE);
1198
1199 }
1200
1201 instance = ddi_get_instance(dip);
1202
1203 /*
1204 * Allocate softc information.
1205 */
1206 if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1207 mptsas_log(NULL, CE_WARN,
1208 "mptsas%d: cannot allocate soft state", instance);
1209 goto fail;
1210 }
1211
1212 mpt = ddi_get_soft_state(mptsas_state, instance);
1213
1214 if (mpt == NULL) {
1215 mptsas_log(NULL, CE_WARN,
1216 "mptsas%d: cannot get soft state", instance);
1217 goto fail;
1218 }
1219
1220 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1221 scsi_size_clean(dip);
1222
1223 mpt->m_dip = dip;
1224 mpt->m_instance = instance;
1225
1226 /* Make a per-instance copy of the structures */
1227 mpt->m_io_dma_attr = mptsas_dma_attrs64;
1228 if (mptsas_use_64bit_msgaddr) {
1229 mpt->m_msg_dma_attr = mptsas_dma_attrs64;
1230 } else {
1231 mpt->m_msg_dma_attr = mptsas_dma_attrs;
1232 }
1233 mpt->m_reg_acc_attr = mptsas_dev_attr;
1234 mpt->m_dev_acc_attr = mptsas_dev_attr;
1235
1236 /*
1281 event_taskq_create++;
1282
1283 /*
1284 * A taskq is created for dealing with dr events
1285 */
1286 if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1287 "mptsas_dr_taskq",
1288 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1289 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1290 "failed");
1291 goto fail;
1292 }
1293 dr_taskq_create++;
1294
1295 mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1296 0, "mptsas_doneq_thread_threshold_prop", 10);
1297 mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1298 0, "mptsas_doneq_length_threshold_prop", 8);
1299 mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1300 0, "mptsas_doneq_thread_n_prop", 8);
1301
1302 if (mpt->m_doneq_thread_n) {
1303 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1304 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1305
1306 mutex_enter(&mpt->m_doneq_mutex);
1307 mpt->m_doneq_thread_id =
1308 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1309 * mpt->m_doneq_thread_n, KM_SLEEP);
1310
1311 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1312 cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1313 CV_DRIVER, NULL);
1314 mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1315 MUTEX_DRIVER, NULL);
1316 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1317 mpt->m_doneq_thread_id[j].flag |=
1318 MPTSAS_DONEQ_THREAD_ACTIVE;
1319 mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1320 mpt->m_doneq_thread_id[j].arg.t = j;
1321 mpt->m_doneq_thread_id[j].threadp =
1339 goto fail;
1340 intr_added++;
1341
1342 /* Initialize mutex used in interrupt handler */
1343 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1344 DDI_INTR_PRI(mpt->m_intr_pri));
1345 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1346 mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1347 DDI_INTR_PRI(mpt->m_intr_pri));
1348 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1349 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1350 NULL, MUTEX_DRIVER,
1351 DDI_INTR_PRI(mpt->m_intr_pri));
1352 }
1353
1354 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1355 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1356 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1357 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1358 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1359 cv_init(&mpt->m_extreq_sense_refcount_cv, NULL, CV_DRIVER, NULL);
1360 mutex_init_done++;
1361
1362 mutex_enter(&mpt->m_mutex);
1363 /*
1364 * Initialize power management component
1365 */
1366 if (mpt->m_options & MPTSAS_OPT_PM) {
1367 if (mptsas_init_pm(mpt)) {
1368 mutex_exit(&mpt->m_mutex);
1369 mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1370 "failed");
1371 goto fail;
1372 }
1373 }
1374
1375 /*
1376 * Initialize chip using Message Unit Reset, if allowed
1377 */
1378 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1379 if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1380 mutex_exit(&mpt->m_mutex);
1381 mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1432 if (mptsas_hba_setup(mpt) == FALSE)
1433 goto fail;
1434 hba_attach_setup++;
1435
1436 if (mptsas_smp_setup(mpt) == FALSE)
1437 goto fail;
1438 smp_attach_setup++;
1439
1440 if (mptsas_enc_setup(mpt) == FALSE)
1441 goto fail;
1442 enc_attach_setup++;
1443
1444 if (mptsas_cache_create(mpt) == FALSE)
1445 goto fail;
1446
1447 mpt->m_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1448 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1449 if (mpt->m_scsi_reset_delay == 0) {
1450 mptsas_log(mpt, CE_NOTE,
1451 "scsi_reset_delay of 0 is not recommended,"
1452 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1453 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1454 }
1455
1456 /*
1457 * Initialize the wait and done FIFO queue
1458 */
1459 mpt->m_donetail = &mpt->m_doneq;
1460 mpt->m_waitqtail = &mpt->m_waitq;
1461 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1462 mpt->m_tx_draining = 0;
1463
1464 /*
1465 * ioc cmd queue initialize
1466 */
1467 mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1468 mpt->m_dev_handle = 0xFFFF;
1469
1470 MPTSAS_ENABLE_INTR(mpt);
1471
1472 /*
1632 }
1633 if (event_taskq_create) {
1634 ddi_taskq_destroy(mpt->m_event_taskq);
1635 }
1636 if (dr_taskq_create) {
1637 ddi_taskq_destroy(mpt->m_dr_taskq);
1638 }
1639 if (mutex_init_done) {
1640 mutex_destroy(&mpt->m_tx_waitq_mutex);
1641 mutex_destroy(&mpt->m_passthru_mutex);
1642 mutex_destroy(&mpt->m_mutex);
1643 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1644 mutex_destroy(
1645 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1646 }
1647 cv_destroy(&mpt->m_cv);
1648 cv_destroy(&mpt->m_passthru_cv);
1649 cv_destroy(&mpt->m_fw_cv);
1650 cv_destroy(&mpt->m_config_cv);
1651 cv_destroy(&mpt->m_fw_diag_cv);
1652 cv_destroy(&mpt->m_extreq_sense_refcount_cv);
1653 }
1654
1655 if (map_setup) {
1656 mptsas_cfg_fini(mpt);
1657 }
1658 if (config_setup) {
1659 mptsas_config_space_fini(mpt);
1660 }
1661 mptsas_free_handshake_msg(mpt);
1662 mptsas_hba_fini(mpt);
1663
1664 mptsas_fm_fini(mpt);
1665 ddi_soft_state_free(mptsas_state, instance);
1666 ddi_prop_remove_all(dip);
1667 }
1668 return (DDI_FAILURE);
1669 }
1670
1671 static int
1672 mptsas_suspend(dev_info_t *devi)
1880
1881 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1882
1883 if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1884 return (DDI_FAILURE);
1885
1886 mpt = TRAN2MPT(tran);
1887 if (!mpt) {
1888 return (DDI_FAILURE);
1889 }
1890 /*
1891 * Still have pathinfo child, should not detach mpt driver
1892 */
1893 if (scsi_hba_iport_unit_address(dip)) {
1894 if (mpt->m_mpxio_enable) {
1895 /*
1896 * MPxIO enabled for the iport
1897 */
1898 ndi_devi_enter(scsi_vhci_dip, &circ1);
1899 ndi_devi_enter(dip, &circ);
1900 while (pip = mdi_get_next_client_path(dip, NULL)) {
1901 if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1902 continue;
1903 }
1904 ndi_devi_exit(dip, circ);
1905 ndi_devi_exit(scsi_vhci_dip, circ1);
1906 NDBG12(("detach failed because of "
1907 "outstanding path info"));
1908 return (DDI_FAILURE);
1909 }
1910 ndi_devi_exit(dip, circ);
1911 ndi_devi_exit(scsi_vhci_dip, circ1);
1912 (void) mdi_phci_unregister(dip, 0);
1913 }
1914
1915 ddi_prop_remove_all(dip);
1916
1917 return (DDI_SUCCESS);
1918 }
1919
1920 /* Make sure power level is D0 before accessing registers */
1921 if (mpt->m_options & MPTSAS_OPT_PM) {
1922 (void) pm_busy_component(dip, 0);
1923 if (mpt->m_power_level != PM_LEVEL_D0) {
1924 if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1925 DDI_SUCCESS) {
1926 mptsas_log(mpt, CE_WARN,
1927 "mptsas%d: Raise power request failed.",
1928 mpt->m_instance);
1929 (void) pm_idle_component(dip, 0);
1930 return (DDI_FAILURE);
1931 }
1932 }
1933 }
1934
1935 /*
1936 * Send RAID action system shutdown to sync IR. After action, send a
1937 * Message Unit Reset. Since after that DMA resource will be freed,
1938 * set ioc to READY state will avoid HBA initiated DMA operation.
1939 */
1940 mutex_enter(&mpt->m_mutex);
1941 MPTSAS_DISABLE_INTR(mpt);
1942 mptsas_raid_action_system_shutdown(mpt);
1943 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1944 (void) mptsas_ioc_reset(mpt, FALSE);
1945 mutex_exit(&mpt->m_mutex);
1946 mptsas_rem_intrs(mpt);
1947 ddi_taskq_destroy(mpt->m_event_taskq);
1948 ddi_taskq_destroy(mpt->m_dr_taskq);
2034
2035 mptsas_destroy_hashes(mpt);
2036
2037 /*
2038 * Delete nt_active.
2039 */
2040 mutex_enter(&mpt->m_mutex);
2041 mptsas_free_active_slots(mpt);
2042 mutex_exit(&mpt->m_mutex);
2043
2044 /* deallocate everything that was allocated in mptsas_attach */
2045 mptsas_cache_destroy(mpt);
2046
2047 mptsas_hba_fini(mpt);
2048 mptsas_cfg_fini(mpt);
2049
2050 /* Lower the power informing PM Framework */
2051 if (mpt->m_options & MPTSAS_OPT_PM) {
2052 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
2053 mptsas_log(mpt, CE_WARN,
2054 "!mptsas%d: Lower power request failed "
2055 "during detach, ignoring.",
2056 mpt->m_instance);
2057 }
2058
2059 mutex_destroy(&mpt->m_tx_waitq_mutex);
2060 mutex_destroy(&mpt->m_passthru_mutex);
2061 mutex_destroy(&mpt->m_mutex);
2062 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
2063 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
2064 }
2065 cv_destroy(&mpt->m_cv);
2066 cv_destroy(&mpt->m_passthru_cv);
2067 cv_destroy(&mpt->m_fw_cv);
2068 cv_destroy(&mpt->m_config_cv);
2069 cv_destroy(&mpt->m_fw_diag_cv);
2070 cv_destroy(&mpt->m_extreq_sense_refcount_cv);
2071
2072 mptsas_smp_teardown(mpt);
2073 mptsas_enc_teardown(mpt);
2074 mptsas_hba_teardown(mpt);
2075
2076 mptsas_config_space_fini(mpt);
2077
2078 mptsas_free_handshake_msg(mpt);
2079
2080 mptsas_fm_fini(mpt);
2081 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
2082 ddi_prop_remove_all(dip);
2083
2084 return (DDI_SUCCESS);
2085 }
2086
2087 static void
2088 mptsas_list_add(mptsas_t *mpt)
2089 {
2090 rw_enter(&mptsas_global_rwlock, RW_WRITER);
2091
2312 static void
2313 mptsas_smp_teardown(mptsas_t *mpt)
2314 {
2315 (void) smp_hba_detach(mpt->m_dip);
2316 if (mpt->m_smptran != NULL) {
2317 smp_hba_tran_free(mpt->m_smptran);
2318 mpt->m_smptran = NULL;
2319 }
2320 mpt->m_smp_devhdl = 0;
2321 }
2322
2323 static int
2324 mptsas_enc_setup(mptsas_t *mpt)
2325 {
2326 list_create(&mpt->m_enclosures, sizeof (mptsas_enclosure_t),
2327 offsetof(mptsas_enclosure_t, me_link));
2328 return (TRUE);
2329 }
2330
2331 static void
2332 mptsas_enc_teardown(mptsas_t *mpt)
2333 {
2334 mptsas_enclosure_t *mep;
2335
2336 while ((mep = list_remove_head(&mpt->m_enclosures)) != NULL) {
2337 kmem_free(mep, sizeof (mptsas_enclosure_t));
2338 }
2339 list_destroy(&mpt->m_enclosures);
2340 }
2341
2342 static mptsas_enclosure_t *
2343 mptsas_enc_lookup(mptsas_t *mpt, uint16_t hdl)
2344 {
2345 mptsas_enclosure_t *mep;
2346
2347 ASSERT(MUTEX_HELD(&mpt->m_mutex));
2348
2349 for (mep = list_head(&mpt->m_enclosures); mep != NULL;
2350 mep = list_next(&mpt->m_enclosures, mep)) {
2351 if (hdl == mep->me_enchdl) {
2352 return (mep);
2353 }
2354 }
2355
2356 return (NULL);
2357 }
2454 /*
2455 * If IOC is not in operational state, try to hard reset it.
2456 */
2457 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2458 MPI2_IOC_STATE_OPERATIONAL) {
2459 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2460 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2461 mptsas_log(mpt, CE_WARN,
2462 "mptsas_power: hard reset failed");
2463 mutex_exit(&mpt->m_mutex);
2464 return (DDI_FAILURE);
2465 }
2466 }
2467 mpt->m_power_level = PM_LEVEL_D0;
2468 break;
2469 case PM_LEVEL_D3:
2470 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2471 MPTSAS_POWER_OFF(mpt);
2472 break;
2473 default:
2474 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2475 mpt->m_instance, level);
2476 rval = DDI_FAILURE;
2477 break;
2478 }
2479 mutex_exit(&mpt->m_mutex);
2480 return (rval);
2481 }
2482
2483 /*
2484 * Initialize configuration space and figure out which
2485 * chip and revison of the chip the mpt driver is using.
2486 */
2487 static int
2488 mptsas_config_space_init(mptsas_t *mpt)
2489 {
2490 NDBG0(("mptsas_config_space_init"));
2491
2492 if (mpt->m_config_handle != NULL)
2493 return (TRUE);
2494
2495 if (pci_config_setup(mpt->m_dip,
2684 mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2685 mpt->m_req_frame = memp;
2686
2687 /*
2688 * Clear the request frame pool.
2689 */
2690 bzero(mpt->m_req_frame, mem_size);
2691
2692 return (DDI_SUCCESS);
2693 }
2694
2695 static int
2696 mptsas_alloc_sense_bufs(mptsas_t *mpt)
2697 {
2698 ddi_dma_attr_t sense_dma_attrs;
2699 caddr_t memp;
2700 ddi_dma_cookie_t cookie;
2701 size_t mem_size;
2702 int num_extrqsense_bufs;
2703
2704 ASSERT(mpt->m_extreq_sense_refcount == 0);
2705
2706 /*
2707 * re-alloc when it has already alloced
2708 */
2709 if (mpt->m_dma_req_sense_hdl) {
2710 rmfreemap(mpt->m_erqsense_map);
2711 mptsas_dma_addr_destroy(&mpt->m_dma_req_sense_hdl,
2712 &mpt->m_acc_req_sense_hdl);
2713 }
2714
2715 /*
2716 * The size of the request sense pool is:
2717 * (Number of Request Frames - 2 ) * Request Sense Size +
2718 * extra memory for extended sense requests.
2719 */
2720 mem_size = ((mpt->m_max_requests - 2) * mpt->m_req_sense_size) +
2721 mptsas_extreq_sense_bufsize;
2722
2723 /*
2724 * set the DMA attributes. ARQ buffers
2725 * aligned on a 16-byte boundry.
3028 if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
3029 DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
3030 /*
3031 * Stick in the address of form "pPHY,LUN"
3032 */
3033 reallen = snprintf(name, len, "p%x,%x", phynum, lun);
3034 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
3035 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
3036 == DDI_PROP_SUCCESS) {
3037 /*
3038 * Stick in the address of the form "wWWN,LUN"
3039 */
3040 reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
3041 ddi_prop_free(sas_wwn);
3042 } else {
3043 return (DDI_FAILURE);
3044 }
3045
3046 ASSERT(reallen < len);
3047 if (reallen >= len) {
3048 mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
3049 "length too small, it needs to be %d bytes", reallen + 1);
3050 }
3051 return (DDI_SUCCESS);
3052 }
3053
3054 /*
3055 * tran_tgt_init(9E) - target device instance initialization
3056 */
3057 static int
3058 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3059 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3060 {
3061 #ifndef __lock_lint
3062 _NOTE(ARGUNUSED(hba_tran))
3063 #endif
3064
3065 /*
3066 * At this point, the scsi_device structure already exists
3067 * and has been initialized.
3068 *
3075 int lun = sd->sd_address.a_lun;
3076 mdi_pathinfo_t *pip = NULL;
3077 mptsas_tgt_private_t *tgt_private = NULL;
3078 mptsas_target_t *ptgt = NULL;
3079 char *psas_wwn = NULL;
3080 mptsas_phymask_t phymask = 0;
3081 uint64_t sas_wwn = 0;
3082 mptsas_target_addr_t addr;
3083 mpt = SDEV2MPT(sd);
3084
3085 ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
3086
3087 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
3088 (void *)hba_dip, (void *)tgt_dip, lun));
3089
3090 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
3091 (void) ndi_merge_node(tgt_dip, mptsas_name_child);
3092 ddi_set_name_addr(tgt_dip, NULL);
3093 return (DDI_FAILURE);
3094 }
3095 /*
3096 * phymask is 0 means the virtual port for RAID
3097 */
3098 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
3099 "phymask", 0);
3100 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
3101 if ((pip = (void *)(sd->sd_private)) == NULL) {
3102 /*
3103 * Very bad news if this occurs. Somehow scsi_vhci has
3104 * lost the pathinfo node for this target.
3105 */
3106 return (DDI_NOT_WELL_FORMED);
3107 }
3108
3109 if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
3110 DDI_PROP_SUCCESS) {
3111 mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
3112 return (DDI_FAILURE);
3113 }
3114
3115 if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
3116 &psas_wwn) == MDI_SUCCESS) {
3117 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
3118 sas_wwn = 0;
3119 }
3120 (void) mdi_prop_free(psas_wwn);
3121 }
3122 } else {
3123 lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
3124 DDI_PROP_DONTPASS, LUN_PROP, 0);
3125 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
3126 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
3127 DDI_PROP_SUCCESS) {
3128 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
3129 sas_wwn = 0;
3130 }
3131 ddi_prop_free(psas_wwn);
3132 } else {
3133 sas_wwn = 0;
3134 }
3135 }
3136
3137 ASSERT((sas_wwn != 0) || (phymask != 0));
3138 addr.mta_wwn = sas_wwn;
3139 addr.mta_phymask = phymask;
3140 mutex_enter(&mpt->m_mutex);
3141 ptgt = refhash_lookup(mpt->m_targets, &addr);
3142 mutex_exit(&mpt->m_mutex);
3143 if (ptgt == NULL) {
3144 mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
3145 "gone already! phymask:%x, saswwn %"PRIx64, phymask,
3146 sas_wwn);
3147 return (DDI_FAILURE);
3148 }
3149 if (hba_tran->tran_tgt_private == NULL) {
3150 tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
3151 KM_SLEEP);
3152 tgt_private->t_lun = lun;
3153 tgt_private->t_private = ptgt;
3154 hba_tran->tran_tgt_private = tgt_private;
3155 }
3156
3157 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
3158 return (DDI_SUCCESS);
3159 }
3160 mutex_enter(&mpt->m_mutex);
3161
3162 if (ptgt->m_deviceinfo &
3163 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
3164 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
3169 struct sata_id *sid = NULL;
3170 char model[SATA_ID_MODEL_LEN + 1];
3171 char fw[SATA_ID_FW_LEN + 1];
3172 char *vid, *pid;
3173
3174 mutex_exit(&mpt->m_mutex);
3175 /*
3176 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
3177 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
3178 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
3179 */
3180 inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
3181 rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
3182 inq89, inq89_len, &reallen, 1);
3183
3184 if (rval != 0) {
3185 if (inq89 != NULL) {
3186 kmem_free(inq89, inq89_len);
3187 }
3188
3189 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
3190 "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
3191 return (DDI_SUCCESS);
3192 }
3193 sid = (void *)(&inq89[60]);
3194
3195 swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
3196 swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
3197
3198 model[SATA_ID_MODEL_LEN] = 0;
3199 fw[SATA_ID_FW_LEN] = 0;
3200
3201 sata_split_model(model, &vid, &pid);
3202
3203 /*
3204 * override SCSA "inquiry-*" properties
3205 */
3206 if (vid)
3207 (void) scsi_device_prop_update_inqstring(sd,
3208 INQUIRY_VENDOR_ID, vid, strlen(vid));
3209 if (pid)
3422 /*
3423 * The command should be allowed to retry by returning
3424 * TRAN_BUSY to stall the I/O's which come from
3425 * scsi_vhci since the device/path is in unstable state
3426 * now.
3427 */
3428 return (TRAN_BUSY);
3429 } else {
3430 /*
3431 * The device is offline, just fail the command by
3432 * return TRAN_FATAL_ERROR.
3433 */
3434 return (TRAN_FATAL_ERROR);
3435 }
3436 }
3437 rval = mptsas_accept_pkt(mpt, cmd);
3438
3439 return (rval);
3440 }
3441
3442 static int
3443 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3444 {
3445 int rval = TRAN_ACCEPT;
3446 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3447
3448 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3449
3450 ASSERT(mutex_owned(&mpt->m_mutex));
3451
3452 if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3453 rval = mptsas_prepare_pkt(cmd);
3454 if (rval != TRAN_ACCEPT) {
3455 cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3456 return (rval);
3457 }
3458 }
3459
3460 /*
3461 * reset the throttle if we were draining
3462 */
3463 if ((ptgt->m_t_ncmds == 0) &&
3464 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3465 NDBG23(("reset throttle"));
3466 ASSERT(ptgt->m_reset_delay == 0);
3467 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3468 }
3469
3470 /*
3471 * If HBA is being reset, the DevHandles are being re-initialized,
3472 * which means that they could be invalid even if the target is still
3473 * attached. Check if being reset and if DevHandle is being
3474 * re-initialized. If this is the case, return BUSY so the I/O can be
3475 * retried later.
3476 */
3477 if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3478 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3479 if (cmd->cmd_flags & CFLAG_TXQ) {
3480 mptsas_doneq_add(mpt, cmd);
3481 mptsas_doneq_empty(mpt);
3482 return (rval);
3483 } else {
3484 return (TRAN_BUSY);
3485 }
3486 }
3487
3488 /*
3489 * If device handle has already been invalidated, just
3490 * fail the command. In theory, command from scsi_vhci
3491 * client is impossible send down command with invalid
3492 * devhdl since devhdl is set after path offline, target
3493 * driver is not suppose to select a offlined path.
3494 */
3495 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3496 NDBG3(("rejecting command, it might because invalid devhdl "
3497 "request."));
3498 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3499 if (cmd->cmd_flags & CFLAG_TXQ) {
3500 mptsas_doneq_add(mpt, cmd);
3501 mptsas_doneq_empty(mpt);
3502 return (rval);
3503 } else {
3504 return (TRAN_FATAL_ERROR);
3505 }
3506 }
3507 /*
3508 * The first case is the normal case. mpt gets a command from the
3509 * target driver and starts it.
3510 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3511 * commands is m_max_requests - 2.
3512 */
3513 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3514 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3515 (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3516 (ptgt->m_reset_delay == 0) &&
3517 (ptgt->m_t_nwait == 0) &&
3518 ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3519 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3520 (void) mptsas_start_cmd(mpt, cmd);
3521 } else {
3522 mptsas_waitq_add(mpt, cmd);
3523 }
3524 } else {
3525 /*
3526 * Add this pkt to the work queue
3527 */
3600 * Initialize expiration time for passthrough commands,
3601 */
3602 cmd->cmd_active_expiration = gethrtime() +
3603 (hrtime_t)cmd->cmd_pkt->pkt_time * NANOSEC;
3604 }
3605 return (TRUE);
3606 }
3607
3608 /*
3609 * prepare the pkt:
3610 * the pkt may have been resubmitted or just reused so
3611 * initialize some fields and do some checks.
3612 */
3613 static int
3614 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3615 {
3616 struct scsi_pkt *pkt = CMD2PKT(cmd);
3617
3618 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3619
3620 /*
3621 * Reinitialize some fields that need it; the packet may
3622 * have been resubmitted
3623 */
3624 pkt->pkt_reason = CMD_CMPLT;
3625 pkt->pkt_state = 0;
3626 pkt->pkt_statistics = 0;
3627 pkt->pkt_resid = 0;
3628 cmd->cmd_age = 0;
3629 cmd->cmd_pkt_flags = pkt->pkt_flags;
3630
3631 /*
3632 * zero status byte.
3633 */
3634 *(pkt->pkt_scbp) = 0;
3635
3636 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3637 pkt->pkt_resid = cmd->cmd_dmacount;
3638
3639 /*
3712 save_dma_handle = cmd->cmd_dmahandle;
3713 bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3714 cmd->cmd_dmahandle = save_dma_handle;
3715
3716 pkt = (void *)((uchar_t *)cmd +
3717 sizeof (struct mptsas_cmd));
3718 pkt->pkt_ha_private = (opaque_t)cmd;
3719 pkt->pkt_address = *ap;
3720 pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3721 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3722 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3723 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3724 cmd->cmd_cdblen = (uchar_t)cmdlen;
3725 cmd->cmd_scblen = statuslen;
3726 cmd->cmd_rqslen = SENSE_LENGTH;
3727 cmd->cmd_tgt_addr = ptgt;
3728
3729 if ((cmdlen > sizeof (cmd->cmd_cdb)) ||
3730 (tgtlen > PKT_PRIV_LEN) ||
3731 (statuslen > EXTCMDS_STATUS_SIZE)) {
3732 int failure;
3733
3734 /*
3735 * We are going to allocate external packet space which
3736 * might include the sense data buffer for DMA so we
3737 * need to increase the reference counter here. In a
3738 * case the HBA is in reset we just simply free the
3739 * allocated packet and bail out.
3740 */
3741 mutex_enter(&mpt->m_mutex);
3742 if (mpt->m_in_reset) {
3743 mutex_exit(&mpt->m_mutex);
3744
3745 cmd->cmd_flags = CFLAG_FREE;
3746 kmem_cache_free(mpt->m_kmem_cache, cmd);
3747 return (NULL);
3748 }
3749 mpt->m_extreq_sense_refcount++;
3750 ASSERT(mpt->m_extreq_sense_refcount > 0);
3751 mutex_exit(&mpt->m_mutex);
3752
3753 /*
3754 * if extern alloc fails, all will be
3755 * deallocated, including cmd
3756 */
3757 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3758 cmdlen, tgtlen, statuslen, kf);
3759
3760 if (failure != 0 || cmd->cmd_extrqslen == 0) {
3761 /*
3762 * If the external packet space allocation
3763 * failed, or we didn't allocate the sense
3764 * data buffer for DMA we need to decrease the
3765 * reference counter.
3766 */
3767 mutex_enter(&mpt->m_mutex);
3768 ASSERT(mpt->m_extreq_sense_refcount > 0);
3769 mpt->m_extreq_sense_refcount--;
3770 if (mpt->m_extreq_sense_refcount == 0)
3771 cv_broadcast(
3772 &mpt->m_extreq_sense_refcount_cv);
3773 mutex_exit(&mpt->m_mutex);
3774
3775 if (failure != 0) {
3776 /*
3777 * if extern allocation fails, it will
3778 * deallocate the new pkt as well
3779 */
3780 return (NULL);
3781 }
3782 }
3783 }
3784 new_cmd = cmd;
3785
3786 } else {
3787 cmd = PKT2CMD(pkt);
3788 new_cmd = NULL;
3789 }
3790
3791
3792 /* grab cmd->cmd_cookiec here as oldcookiec */
3793
3794 oldcookiec = cmd->cmd_cookiec;
3795
3796 /*
3797 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3798 * greater than 0 and we'll need to grab the next dma window
3799 */
3800 /*
3801 * SLM-not doing extra command frame right now; may add later
3802 */
3803
3804 if (cmd->cmd_nwin > 0) {
3805
3806 /*
3807 * Make sure we havn't gone past the the total number
3886 case DDI_DMA_NOMAPPING:
3887 bioerror(bp, EFAULT);
3888 break;
3889 case DDI_DMA_TOOBIG:
3890 default:
3891 bioerror(bp, EINVAL);
3892 break;
3893 }
3894 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3895 if (new_cmd) {
3896 mptsas_scsi_destroy_pkt(ap, pkt);
3897 }
3898 return ((struct scsi_pkt *)NULL);
3899 }
3900
3901 get_dma_cookies:
3902 cmd->cmd_flags |= CFLAG_DMAVALID;
3903 ASSERT(cmd->cmd_cookiec > 0);
3904
3905 if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3906 mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3907 cmd->cmd_cookiec);
3908 bioerror(bp, EINVAL);
3909 if (new_cmd) {
3910 mptsas_scsi_destroy_pkt(ap, pkt);
3911 }
3912 return ((struct scsi_pkt *)NULL);
3913 }
3914
3915 /*
3916 * Allocate extra SGL buffer if needed.
3917 */
3918 if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3919 (cmd->cmd_extra_frames == NULL)) {
3920 if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3921 DDI_FAILURE) {
3922 mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3923 "failed");
3924 bioerror(bp, ENOMEM);
3925 if (new_cmd) {
3926 mptsas_scsi_destroy_pkt(ap, pkt);
4048 ap->a_target, (void *)pkt));
4049
4050 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4051 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4052 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4053 }
4054
4055 if (cmd->cmd_sg) {
4056 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
4057 cmd->cmd_sg = NULL;
4058 }
4059
4060 mptsas_free_extra_sgl_frame(mpt, cmd);
4061
4062 if ((cmd->cmd_flags &
4063 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
4064 CFLAG_SCBEXTERN)) == 0) {
4065 cmd->cmd_flags = CFLAG_FREE;
4066 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4067 } else {
4068 boolean_t extrqslen = cmd->cmd_extrqslen != 0;
4069
4070 mptsas_pkt_destroy_extern(mpt, cmd);
4071
4072 /*
4073 * If the packet had the sense data buffer for DMA allocated we
4074 * need to decrease the reference counter.
4075 */
4076 if (extrqslen) {
4077 mutex_enter(&mpt->m_mutex);
4078 ASSERT(mpt->m_extreq_sense_refcount > 0);
4079 mpt->m_extreq_sense_refcount--;
4080 if (mpt->m_extreq_sense_refcount == 0)
4081 cv_broadcast(&mpt->m_extreq_sense_refcount_cv);
4082 mutex_exit(&mpt->m_mutex);
4083 }
4084 }
4085 }
4086
4087 /*
4088 * kmem cache constructor and destructor:
4089 * When constructing, we bzero the cmd and allocate the dma handle
4090 * When destructing, just free the dma handle
4091 */
4092 static int
4093 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
4094 {
4095 mptsas_cmd_t *cmd = buf;
4096 mptsas_t *mpt = cdrarg;
4097 int (*callback)(caddr_t);
4098
4099 callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
4100
4101 NDBG4(("mptsas_kmem_cache_constructor"));
4102
4103 /*
4104 * allocate a dma handle
5223 mptsas_handle_scsi_io_success(mptsas_t *mpt,
5224 pMpi2ReplyDescriptorsUnion_t reply_desc)
5225 {
5226 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success;
5227 uint16_t SMID;
5228 mptsas_slots_t *slots = mpt->m_active;
5229 mptsas_cmd_t *cmd = NULL;
5230 struct scsi_pkt *pkt;
5231
5232 ASSERT(mutex_owned(&mpt->m_mutex));
5233
5234 scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
5235 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
5236
5237 /*
5238 * This is a success reply so just complete the IO. First, do a sanity
5239 * check on the SMID. The final slot is used for TM requests, which
5240 * would not come into this reply handler.
5241 */
5242 if ((SMID == 0) || (SMID > slots->m_n_normal)) {
5243 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
5244 SMID);
5245 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5246 return;
5247 }
5248
5249 cmd = slots->m_slot[SMID];
5250
5251 /*
5252 * print warning and return if the slot is empty
5253 */
5254 if (cmd == NULL) {
5255 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
5256 "in slot %d", SMID);
5257 return;
5258 }
5259
5260 pkt = CMD2PKT(cmd);
5261 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
5262 STATE_GOT_STATUS);
5263 if (cmd->cmd_flags & CFLAG_DMAVALID) {
5264 pkt->pkt_state |= STATE_XFERRED_DATA;
5265 }
5266 pkt->pkt_resid = 0;
5267
5268 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
5269 cmd->cmd_flags |= CFLAG_FINISHED;
5270 cv_broadcast(&mpt->m_passthru_cv);
5271 return;
5272 } else {
5273 mptsas_remove_cmd(mpt, cmd);
5274 }
5275
5276 if (cmd->cmd_flags & CFLAG_RETRY) {
5277 /*
5278 * The target returned QFULL or busy, do not add tihs
5279 * pkt to the doneq since the hba will retry
5280 * this cmd.
5304 m_replyh_arg_t *args;
5305 int reply_frame_no;
5306
5307 ASSERT(mutex_owned(&mpt->m_mutex));
5308
5309 address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
5310 reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
5311 &address_reply->ReplyFrameAddress);
5312 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
5313
5314 /*
5315 * If reply frame is not in the proper range we should ignore this
5316 * message and exit the interrupt handler.
5317 */
5318 reply_frame_dma_baseaddr = mpt->m_reply_frame_dma_addr & 0xffffffffu;
5319 if ((reply_addr < reply_frame_dma_baseaddr) ||
5320 (reply_addr >= (reply_frame_dma_baseaddr +
5321 (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
5322 ((reply_addr - reply_frame_dma_baseaddr) %
5323 mpt->m_reply_frame_size != 0)) {
5324 mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
5325 "address 0x%x\n", reply_addr);
5326 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5327 return;
5328 }
5329
5330 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
5331 DDI_DMA_SYNC_FORCPU);
5332 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
5333 reply_frame_dma_baseaddr));
5334 function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
5335
5336 NDBG31(("mptsas_handle_address_reply: function 0x%x, reply_addr=0x%x",
5337 function, reply_addr));
5338
5339 /*
5340 * don't get slot information and command for events since these values
5341 * don't exist
5342 */
5343 if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
5344 (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
5345 /*
5346 * This could be a TM reply, which use the last allocated SMID,
5347 * so allow for that.
5348 */
5349 if ((SMID == 0) || (SMID > (slots->m_n_normal + 1))) {
5350 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
5351 "%d\n", SMID);
5352 ddi_fm_service_impact(mpt->m_dip,
5353 DDI_SERVICE_UNAFFECTED);
5354 return;
5355 }
5356
5357 cmd = slots->m_slot[SMID];
5358
5359 /*
5360 * print warning and return if the slot is empty
5361 */
5362 if (cmd == NULL) {
5363 mptsas_log(mpt, CE_WARN, "?NULL command for address "
5364 "reply in slot %d", SMID);
5365 return;
5366 }
5367 if ((cmd->cmd_flags &
5368 (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
5369 cmd->cmd_rfm = reply_addr;
5370 cmd->cmd_flags |= CFLAG_FINISHED;
5371 cv_broadcast(&mpt->m_passthru_cv);
5372 cv_broadcast(&mpt->m_config_cv);
5373 cv_broadcast(&mpt->m_fw_diag_cv);
5374 return;
5375 } else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
5376 mptsas_remove_cmd(mpt, cmd);
5377 }
5378 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
5379 }
5380 /*
5381 * Depending on the function, we need to handle
5382 * the reply frame (and cmd) differently.
5383 */
5456 &reply->IOCStatus);
5457 buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
5458 &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
5459 if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
5460 pBuffer =
5461 &mpt->m_fw_diag_buffer_list[buffer_type];
5462 pBuffer->valid_data = TRUE;
5463 pBuffer->owned_by_firmware = FALSE;
5464 pBuffer->immediate = FALSE;
5465 }
5466 } else {
5467 /*
5468 * Normal handling of diag post reply with SMID.
5469 */
5470 cmd = slots->m_slot[SMID];
5471
5472 /*
5473 * print warning and return if the slot is empty
5474 */
5475 if (cmd == NULL) {
5476 mptsas_log(mpt, CE_WARN, "?NULL command for "
5477 "address reply in slot %d", SMID);
5478 return;
5479 }
5480 cmd->cmd_rfm = reply_addr;
5481 cmd->cmd_flags |= CFLAG_FINISHED;
5482 cv_broadcast(&mpt->m_fw_diag_cv);
5483 }
5484 return;
5485 default:
5486 mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
5487 break;
5488 }
5489
5490 /*
5491 * Return the reply frame to the free queue.
5492 */
5493 ddi_put32(mpt->m_acc_free_queue_hdl,
5494 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
5495 reply_addr);
5496 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5541
5542 scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
5543 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5544 scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
5545 xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
5546 sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
5547 responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
5548 &reply->ResponseInfo);
5549
5550 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
5551 sas_wwn = ptgt->m_addr.mta_wwn;
5552 phy = ptgt->m_phynum;
5553 if (sas_wwn == 0) {
5554 (void) sprintf(wwn_str, "p%x", phy);
5555 } else {
5556 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
5557 }
5558 loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
5559 &reply->IOCLogInfo);
5560 mptsas_log(mpt, CE_NOTE,
5561 "?Log info 0x%x received for target %d %s.\n"
5562 "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5563 loginfo, Tgt(cmd), wwn_str, scsi_status, ioc_status,
5564 scsi_state);
5565 }
5566
5567 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5568 scsi_status, ioc_status, scsi_state));
5569
5570 pkt = CMD2PKT(cmd);
5571 *(pkt->pkt_scbp) = scsi_status;
5572
5573 if (loginfo == 0x31170000) {
5574 /*
5575 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
5576 * 0x31170000 comes, that means the device missing delay
5577 * is in progressing, the command need retry later.
5578 */
5579 *(pkt->pkt_scbp) = STATUS_BUSY;
5580 return;
5581 }
5582
5583 if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
5584 ((ioc_status & MPI2_IOCSTATUS_MASK) ==
5585 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
5586 pkt->pkt_reason = CMD_INCOMPLETE;
5587 pkt->pkt_state |= STATE_GOT_BUS;
5588 if (ptgt->m_reset_delay == 0) {
5589 mptsas_set_throttle(mpt, ptgt,
5590 DRAIN_THROTTLE);
5591 }
5592 return;
5593 }
5594
5595 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5596 responsedata &= 0x000000FF;
5597 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5598 mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5599 pkt->pkt_reason = CMD_TLR_OFF;
5600 return;
5601 }
5602 }
5603
5604
5605 switch (scsi_status) {
5606 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5607 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5608 arqstat = (void*)(pkt->pkt_scbp);
5609 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5610 (pkt->pkt_scbp));
5611 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5612 STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5613 if (cmd->cmd_flags & CFLAG_XARQ) {
5614 pkt->pkt_state |= STATE_XARQ_DONE;
5615 }
5616 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5617 pkt->pkt_state |= STATE_XFERRED_DATA;
5618 }
5654 }
5655
5656 /*
5657 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5658 * ASC/ASCQ=0x25/0x00 means invalid lun
5659 */
5660 if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5661 (scsi_sense_asc(sensedata) == 0x3F) &&
5662 (scsi_sense_ascq(sensedata) == 0x0E)) ||
5663 ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5664 (scsi_sense_asc(sensedata) == 0x25) &&
5665 (scsi_sense_ascq(sensedata) == 0x00))) {
5666 mptsas_topo_change_list_t *topo_node = NULL;
5667
5668 topo_node = kmem_zalloc(
5669 sizeof (mptsas_topo_change_list_t),
5670 KM_NOSLEEP);
5671 if (topo_node == NULL) {
5672 mptsas_log(mpt, CE_NOTE, "No memory"
5673 "resource for handle SAS dynamic"
5674 "reconfigure.\n");
5675 break;
5676 }
5677 topo_node->mpt = mpt;
5678 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5679 topo_node->un.phymask = ptgt->m_addr.mta_phymask;
5680 topo_node->devhdl = ptgt->m_devhdl;
5681 topo_node->object = (void *)ptgt;
5682 topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5683
5684 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5685 mptsas_handle_dr,
5686 (void *)topo_node,
5687 DDI_NOSLEEP)) != DDI_SUCCESS) {
5688 kmem_free(topo_node,
5689 sizeof (mptsas_topo_change_list_t));
5690 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5691 "for handle SAS dynamic reconfigure"
5692 "failed. \n");
5693 }
5694 }
5695 break;
5696 case MPI2_SCSI_STATUS_GOOD:
5697 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5698 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5699 pkt->pkt_reason = CMD_DEV_GONE;
5700 pkt->pkt_state |= STATE_GOT_BUS;
5701 if (ptgt->m_reset_delay == 0) {
5702 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5703 }
5704 NDBG31(("lost disk for target%d, command:%x",
5705 Tgt(cmd), pkt->pkt_cdbp[0]));
5706 break;
5707 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5708 NDBG31(("data overrun: xferred=%d", xferred));
5709 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5710 pkt->pkt_reason = CMD_DATA_OVR;
5711 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5712 | STATE_SENT_CMD | STATE_GOT_STATUS
5752 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5753 case MPI2_IOCSTATUS_BUSY:
5754 /*
5755 * set throttles to drain
5756 */
5757 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
5758 ptgt = refhash_next(mpt->m_targets, ptgt)) {
5759 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5760 }
5761
5762 /*
5763 * retry command
5764 */
5765 cmd->cmd_flags |= CFLAG_RETRY;
5766 cmd->cmd_pkt_flags |= FLAG_HEAD;
5767
5768 (void) mptsas_accept_pkt(mpt, cmd);
5769 break;
5770 default:
5771 mptsas_log(mpt, CE_WARN,
5772 "unknown ioc_status = %x\n", ioc_status);
5773 mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5774 "count = %x, scsi_status = %x", scsi_state,
5775 xferred, scsi_status);
5776 break;
5777 }
5778 break;
5779 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5780 mptsas_handle_qfull(mpt, cmd);
5781 break;
5782 case MPI2_SCSI_STATUS_BUSY:
5783 NDBG31(("scsi_status busy received"));
5784 break;
5785 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5786 NDBG31(("scsi_status reservation conflict received"));
5787 break;
5788 default:
5789 mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5790 scsi_status, ioc_status);
5791 mptsas_log(mpt, CE_WARN,
5792 "mptsas_process_intr: invalid scsi status\n");
5793 break;
5794 }
5795 }
5796
5797 static void
5798 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5799 mptsas_cmd_t *cmd)
5800 {
5801 uint8_t task_type;
5802 uint16_t ioc_status;
5803 uint32_t log_info;
5804 uint16_t dev_handle;
5805 struct scsi_pkt *pkt = CMD2PKT(cmd);
5806
5807 task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5808 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5809 log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5810 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5811
5812 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5813 mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5814 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5815 task_type, ioc_status, log_info, dev_handle);
5816 pkt->pkt_reason = CMD_INCOMPLETE;
5817 return;
5818 }
5819
5820 switch (task_type) {
5821 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5822 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5823 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5824 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5825 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5826 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5827 break;
5828 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5829 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5830 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5831 /*
5832 * Check for invalid DevHandle of 0 in case application
5833 * sends bad command. DevHandle of 0 could cause problems.
5834 */
5835 if (dev_handle == 0) {
5836 mptsas_log(mpt, CE_WARN, "!Can't flush target with"
5837 " DevHandle of 0.");
5838 } else {
5839 mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5840 task_type);
5841 }
5842 break;
5843 default:
5844 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5845 task_type);
5846 mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5847 break;
5848 }
5849 }
5850
5851 static void
5852 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5853 {
5854 mptsas_t *mpt = arg->mpt;
5855 uint64_t t = arg->t;
5856 mptsas_cmd_t *cmd;
6003 pMpi2ReplyDescriptorsUnion_t reply_desc_union)
6004 {
6005 uint8_t reply_type;
6006
6007 ASSERT(mutex_owned(&mpt->m_mutex));
6008
6009 /*
6010 * The reply is valid, process it according to its
6011 * type. Also, set a flag for updated the reply index
6012 * after they've all been processed.
6013 */
6014 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
6015 &reply_desc_union->Default.ReplyFlags);
6016 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
6017 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
6018 reply_type == MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS) {
6019 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
6020 } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
6021 mptsas_handle_address_reply(mpt, reply_desc_union);
6022 } else {
6023 mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
6024 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
6025 }
6026
6027 /*
6028 * Clear the reply descriptor for re-use and increment
6029 * index.
6030 */
6031 ddi_put64(mpt->m_acc_post_queue_hdl,
6032 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
6033 0xFFFFFFFFFFFFFFFF);
6034 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
6035 DDI_DMA_SYNC_FORDEV);
6036 }
6037
6038 /*
6039 * handle qfull condition
6040 */
6041 static void
6042 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
6043 {
6447 }
6448 (void) sprintf(phy_mask_name, "%x", phymask);
6449 }
6450 parent = scsi_hba_iport_find(mpt->m_dip,
6451 phy_mask_name);
6452 if (parent == NULL) {
6453 mptsas_log(mpt, CE_WARN, "Failed to find an "
6454 "iport, should not happen!");
6455 goto out;
6456 }
6457
6458 }
6459 ASSERT(parent);
6460 handle_topo_change:
6461
6462 mutex_enter(&mpt->m_mutex);
6463 /*
6464 * If HBA is being reset, don't perform operations depending
6465 * on the IOC. We must free the topo list, however.
6466 */
6467 if (!mpt->m_in_reset)
6468 mptsas_handle_topo_change(topo_node, parent);
6469 else
6470 NDBG20(("skipping topo change received during reset"));
6471 save_node = topo_node;
6472 topo_node = topo_node->next;
6473 ASSERT(save_node);
6474 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
6475 mutex_exit(&mpt->m_mutex);
6476
6477 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6478 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
6479 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
6480 /*
6481 * If direct attached device associated, make sure
6482 * reset the parent before start the next one. But
6483 * all devices associated with expander shares the
6484 * parent. Also, reset parent if this is for RAID.
6485 */
6486 parent = NULL;
6487 }
6488 }
6489 out:
6490 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6536 if (ptgt == NULL) {
6537 /*
6538 * If a Phys Disk was deleted, RAID info needs to be
6539 * updated to reflect the new topology.
6540 */
6541 (void) mptsas_get_raid_info(mpt);
6542
6543 /*
6544 * Get sas device page 0 by DevHandle to make sure if
6545 * SSP/SATA end device exist.
6546 */
6547 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
6548 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
6549 topo_node->devhdl;
6550
6551 rval = mptsas_get_target_device_info(mpt, page_address,
6552 &devhdl, &ptgt);
6553 if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
6554 mptsas_log(mpt, CE_NOTE,
6555 "mptsas_handle_topo_change: target %d is "
6556 "not a SAS/SATA device. \n",
6557 topo_node->devhdl);
6558 } else if (rval == DEV_INFO_FAIL_ALLOC) {
6559 mptsas_log(mpt, CE_NOTE,
6560 "mptsas_handle_topo_change: could not "
6561 "allocate memory. \n");
6562 } else if (rval == DEV_INFO_FAIL_GUID) {
6563 mptsas_log(mpt, CE_NOTE,
6564 "mptsas_handle_topo_change: could not "
6565 "get SATA GUID for target %d. \n",
6566 topo_node->devhdl);
6567 }
6568 /*
6569 * If rval is DEV_INFO_PHYS_DISK or indicates failure
6570 * then there is nothing else to do, just leave.
6571 */
6572 if (rval != DEV_INFO_SUCCESS) {
6573 return;
6574 }
6575 }
6576
6577 ASSERT(ptgt->m_devhdl == topo_node->devhdl);
6578
6579 mutex_exit(&mpt->m_mutex);
6580 flags = topo_node->flags;
6581
6582 if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
6583 phymask = ptgt->m_addr.mta_phymask;
6584 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6585 (void) sprintf(phy_mask_name, "%x", phymask);
6772 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6773 MPTSAS_NUM_PHYS);
6774 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6775 "prop update failed");
6776 mutex_enter(&mpt->m_mutex);
6777 break;
6778 }
6779 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6780 MPTSAS_VIRTUAL_PORT, 1) !=
6781 DDI_PROP_SUCCESS) {
6782 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6783 MPTSAS_VIRTUAL_PORT);
6784 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6785 "prop update failed");
6786 mutex_enter(&mpt->m_mutex);
6787 break;
6788 }
6789 }
6790
6791 mutex_enter(&mpt->m_mutex);
6792 ptgt->m_led_status = 0;
6793 (void) mptsas_flush_led_status(mpt, ptgt);
6794 if (rval == DDI_SUCCESS) {
6795 refhash_remove(mpt->m_targets, ptgt);
6796 ptgt = NULL;
6797 } else {
6798 /*
6799 * clean DR_INTRANSITION flag to allow I/O down to
6800 * PHCI driver since failover finished.
6801 * Invalidate the devhdl
6802 */
6803 ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6804 ptgt->m_tgt_unconfigured = 0;
6805 mutex_enter(&mpt->m_tx_waitq_mutex);
6806 ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6807 mutex_exit(&mpt->m_tx_waitq_mutex);
6808 }
6809
6810 /*
6811 * Send SAS IO Unit Control to free the dev handle
6812 */
6813 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6873
6874 mutex_enter(&mpt->m_mutex);
6875 break;
6876 }
6877 case MPTSAS_DR_EVENT_OFFLINE_SMP:
6878 {
6879 devhdl = topo_node->devhdl;
6880 uint32_t dev_info;
6881
6882 psmp = refhash_linear_search(mpt->m_smp_targets,
6883 mptsas_smp_eval_devhdl, &devhdl);
6884 if (psmp == NULL)
6885 break;
6886 /*
6887 * The mptsas_smp_t data is released only if the dip is offlined
6888 * successfully.
6889 */
6890 mutex_exit(&mpt->m_mutex);
6891
6892 ndi_devi_enter(parent, &circ1);
6893 rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
6894 ndi_devi_exit(parent, circ1);
6895
6896 dev_info = psmp->m_deviceinfo;
6897 if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6898 DEVINFO_DIRECT_ATTACHED) {
6899 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6900 MPTSAS_VIRTUAL_PORT, 1) !=
6901 DDI_PROP_SUCCESS) {
6902 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6903 MPTSAS_VIRTUAL_PORT);
6904 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6905 "prop update failed");
6906 return;
6907 }
6908 /*
6909 * Check whether the smp connected to the iport,
6910 */
6911 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6912 MPTSAS_NUM_PHYS, 0) !=
6913 DDI_PROP_SUCCESS) {
6914 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6915 MPTSAS_NUM_PHYS);
6916 mptsas_log(mpt, CE_WARN, "mptsas num phys"
6917 "prop update failed");
6918 return;
6919 }
6920 /*
6921 * Clear parent's attached-port props
6922 */
6923 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6924 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6925 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6926 DDI_PROP_SUCCESS) {
6927 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6928 SCSI_ADDR_PROP_ATTACHED_PORT);
6929 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6930 "prop update failed");
6931 return;
6932 }
6933 }
6934
6935 mutex_enter(&mpt->m_mutex);
6936 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6937 "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
6938 if (rval == DDI_SUCCESS) {
6939 refhash_remove(mpt->m_smp_targets, psmp);
6940 } else {
6941 psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
6942 }
6943
6944 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6945
6946 break;
6947 }
6948 default:
6949 return;
6950 }
7045 static int
7046 mptsas_handle_event_sync(void *args)
7047 {
7048 m_replyh_arg_t *replyh_arg;
7049 pMpi2EventNotificationReply_t eventreply;
7050 uint32_t event, rfm;
7051 mptsas_t *mpt;
7052 uint_t iocstatus;
7053
7054 replyh_arg = (m_replyh_arg_t *)args;
7055 rfm = replyh_arg->rfm;
7056 mpt = replyh_arg->mpt;
7057
7058 ASSERT(mutex_owned(&mpt->m_mutex));
7059
7060 eventreply = (pMpi2EventNotificationReply_t)
7061 (mpt->m_reply_frame + (rfm -
7062 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
7063 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7064
7065 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7066 &eventreply->IOCStatus)) {
7067 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7068 mptsas_log(mpt, CE_WARN,
7069 "!mptsas_handle_event_sync: event 0x%x, "
7070 "IOCStatus=0x%x, "
7071 "IOCLogInfo=0x%x", event, iocstatus,
7072 ddi_get32(mpt->m_acc_reply_frame_hdl,
7073 &eventreply->IOCLogInfo));
7074 } else {
7075 mptsas_log(mpt, CE_WARN,
7076 "mptsas_handle_event_sync: event 0x%x, "
7077 "IOCStatus=0x%x, "
7078 "(IOCLogInfo=0x%x)", event, iocstatus,
7079 ddi_get32(mpt->m_acc_reply_frame_hdl,
7080 &eventreply->IOCLogInfo));
7081 }
7082 }
7083
7084 /*
7085 * figure out what kind of event we got and handle accordingly
7086 */
7087 switch (event) {
7088 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7089 {
7521 break;
7522 }
7523 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
7524 mpt->m_instance, phy, dev_handle, string, curr,
7525 prev));
7526 }
7527 if (topo_head != NULL) {
7528 /*
7529 * Launch DR taskq to handle topology change
7530 */
7531 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7532 mptsas_handle_dr, (void *)topo_head,
7533 DDI_NOSLEEP)) != DDI_SUCCESS) {
7534 while (topo_head != NULL) {
7535 topo_node = topo_head;
7536 topo_head = topo_head->next;
7537 kmem_free(topo_node,
7538 sizeof (mptsas_topo_change_list_t));
7539 }
7540 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7541 "for handle SAS DR event failed. \n");
7542 }
7543 }
7544 break;
7545 }
7546 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7547 {
7548 Mpi2EventDataIrConfigChangeList_t *irChangeList;
7549 mptsas_topo_change_list_t *topo_head = NULL;
7550 mptsas_topo_change_list_t *topo_tail = NULL;
7551 mptsas_topo_change_list_t *topo_node = NULL;
7552 mptsas_target_t *ptgt;
7553 uint8_t num_entries, i, reason;
7554 uint16_t volhandle, diskhandle;
7555
7556 irChangeList = (pMpi2EventDataIrConfigChangeList_t)
7557 eventreply->EventData;
7558 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
7559 &irChangeList->NumElements);
7560
7561 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
7705 }
7706 default:
7707 break;
7708 }
7709 }
7710
7711 if (topo_head != NULL) {
7712 /*
7713 * Launch DR taskq to handle topology change
7714 */
7715 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7716 mptsas_handle_dr, (void *)topo_head,
7717 DDI_NOSLEEP)) != DDI_SUCCESS) {
7718 while (topo_head != NULL) {
7719 topo_node = topo_head;
7720 topo_head = topo_head->next;
7721 kmem_free(topo_node,
7722 sizeof (mptsas_topo_change_list_t));
7723 }
7724 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7725 "for handle SAS DR event failed. \n");
7726 }
7727 }
7728 break;
7729 }
7730 default:
7731 return (DDI_FAILURE);
7732 }
7733
7734 return (DDI_SUCCESS);
7735 }
7736
7737 /*
7738 * handle events from ioc
7739 */
7740 static void
7741 mptsas_handle_event(void *args)
7742 {
7743 m_replyh_arg_t *replyh_arg;
7744 pMpi2EventNotificationReply_t eventreply;
7745 uint32_t event, iocloginfo, rfm;
7750
7751 replyh_arg = (m_replyh_arg_t *)args;
7752 rfm = replyh_arg->rfm;
7753 mpt = replyh_arg->mpt;
7754
7755 mutex_enter(&mpt->m_mutex);
7756 /*
7757 * If HBA is being reset, drop incoming event.
7758 */
7759 if (mpt->m_in_reset) {
7760 NDBG20(("dropping event received prior to reset"));
7761 mutex_exit(&mpt->m_mutex);
7762 return;
7763 }
7764
7765 eventreply = (pMpi2EventNotificationReply_t)
7766 (mpt->m_reply_frame + (rfm -
7767 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
7768 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7769
7770 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7771 &eventreply->IOCStatus)) {
7772 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7773 mptsas_log(mpt, CE_WARN,
7774 "!mptsas_handle_event: IOCStatus=0x%x, "
7775 "IOCLogInfo=0x%x", iocstatus,
7776 ddi_get32(mpt->m_acc_reply_frame_hdl,
7777 &eventreply->IOCLogInfo));
7778 } else {
7779 mptsas_log(mpt, CE_WARN,
7780 "mptsas_handle_event: IOCStatus=0x%x, "
7781 "IOCLogInfo=0x%x", iocstatus,
7782 ddi_get32(mpt->m_acc_reply_frame_hdl,
7783 &eventreply->IOCLogInfo));
7784 }
7785 }
7786
7787 /*
7788 * figure out what kind of event we got and handle accordingly
7789 */
7790 switch (event) {
7791 case MPI2_EVENT_LOG_ENTRY_ADDED:
7792 break;
7793 case MPI2_EVENT_LOG_DATA:
7794 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7865 uint16_t enchdl;
7866 char string[80];
7867 mptsas_enclosure_t *mep;
7868
7869 encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7870 eventreply->EventData;
7871
7872 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7873 &encstatus->ReasonCode);
7874 enchdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7875 &encstatus->EnclosureHandle);
7876
7877 switch (rc) {
7878 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7879 (void) sprintf(string, "added");
7880 break;
7881 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7882 mep = mptsas_enc_lookup(mpt, enchdl);
7883 if (mep != NULL) {
7884 list_remove(&mpt->m_enclosures, mep);
7885 kmem_free(mep, sizeof (*mep));
7886 }
7887 (void) sprintf(string, ", not responding");
7888 break;
7889 default:
7890 break;
7891 }
7892 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure "
7893 "%x%s\n", mpt->m_instance,
7894 ddi_get16(mpt->m_acc_reply_frame_hdl,
7895 &encstatus->EnclosureHandle), string));
7896
7897 /*
7898 * No matter what has happened, update all of our device state
7899 * for enclosures, by retriggering an evaluation.
7900 */
7901 mpt->m_done_traverse_enc = 0;
7902 mptsas_update_hashtab(mpt);
7903 break;
7904 }
7905
8132 found = TRUE;
8133 break;
8134 }
8135 }
8136 }
8137 if (!found) {
8138 break;
8139 }
8140
8141 switch (irVolume->ReasonCode) {
8142 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
8143 {
8144 uint32_t i;
8145 mpt->m_raidconfig[config].m_raidvol[vol].m_settings =
8146 state;
8147
8148 i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
8149 mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
8150 ", auto-config of hot-swap drives is %s"
8151 ", write caching is %s"
8152 ", hot-spare pool mask is %02x\n",
8153 vol, state &
8154 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
8155 ? "disabled" : "enabled",
8156 i == MPI2_RAIDVOL0_SETTING_UNCHANGED
8157 ? "controlled by member disks" :
8158 i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
8159 ? "disabled" :
8160 i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
8161 ? "enabled" :
8162 "incorrectly set",
8163 (state >> 16) & 0xff);
8164 break;
8165 }
8166 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
8167 {
8168 mpt->m_raidconfig[config].m_raidvol[vol].m_state =
8169 (uint8_t)state;
8170
8171 mptsas_log(mpt, CE_NOTE,
8172 "Volume %d is now %s\n", vol,
8173 state == MPI2_RAID_VOL_STATE_OPTIMAL
8174 ? "optimal" :
8175 state == MPI2_RAID_VOL_STATE_DEGRADED
8176 ? "degraded" :
8177 state == MPI2_RAID_VOL_STATE_ONLINE
8178 ? "online" :
8179 state == MPI2_RAID_VOL_STATE_INITIALIZING
8180 ? "initializing" :
8181 state == MPI2_RAID_VOL_STATE_FAILED
8182 ? "failed" :
8183 state == MPI2_RAID_VOL_STATE_MISSING
8184 ? "missing" :
8185 "state unknown");
8186 break;
8187 }
8188 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
8189 {
8190 mpt->m_raidconfig[config].m_raidvol[vol].
8191 m_statusflags = state;
8192
8193 mptsas_log(mpt, CE_NOTE,
8194 " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
8195 vol,
8196 state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
8197 ? ", enabled" : ", disabled",
8198 state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
8199 ? ", quiesced" : "",
8200 state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
8201 ? ", inactive" : ", active",
8202 state &
8203 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
8204 ? ", bad block table is full" : "",
8205 state &
8206 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
8207 ? ", resync in progress" : "",
8208 state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
8209 ? ", background initialization in progress" : "",
8210 state &
8211 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
8212 ? ", capacity expansion in progress" : "",
8213 state &
8214 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
8244 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
8245 &irPhysDisk->ReasonCode);
8246
8247 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
8248
8249 switch (reason) {
8250 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
8251 mptsas_log(mpt, CE_NOTE,
8252 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8253 "for enclosure with handle 0x%x is now in hot "
8254 "spare pool %d",
8255 physdisknum, devhandle, slot, enchandle,
8256 (state >> 16) & 0xff);
8257 break;
8258
8259 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
8260 status = state;
8261 mptsas_log(mpt, CE_NOTE,
8262 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8263 "for enclosure with handle 0x%x is now "
8264 "%s%s%s%s%s\n", physdisknum, devhandle, slot,
8265 enchandle,
8266 status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
8267 ? ", inactive" : ", active",
8268 status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
8269 ? ", out of sync" : "",
8270 status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
8271 ? ", quiesced" : "",
8272 status &
8273 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
8274 ? ", write cache enabled" : "",
8275 status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
8276 ? ", capacity expansion target" : "");
8277 break;
8278
8279 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
8280 mptsas_log(mpt, CE_NOTE,
8281 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8282 "for enclosure with handle 0x%x is now %s\n",
8283 physdisknum, devhandle, slot, enchandle,
8284 state == MPI2_RAID_PD_STATE_OPTIMAL
8285 ? "optimal" :
8286 state == MPI2_RAID_PD_STATE_REBUILDING
8287 ? "rebuilding" :
8288 state == MPI2_RAID_PD_STATE_DEGRADED
8289 ? "degraded" :
8290 state == MPI2_RAID_PD_STATE_HOT_SPARE
8291 ? "a hot spare" :
8292 state == MPI2_RAID_PD_STATE_ONLINE
8293 ? "online" :
8294 state == MPI2_RAID_PD_STATE_OFFLINE
8295 ? "offline" :
8296 state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
8297 ? "not compatible" :
8298 state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
8299 ? "not configured" :
8300 "state unknown");
8301 break;
8302 }
8303 break;
8304 }
8305 default:
8306 NDBG20(("mptsas%d: unknown event %x received",
8307 mpt->m_instance, event));
8308 break;
8309 }
8310
8311 /*
8312 * Return the reply frame to the free queue.
8313 */
8314 ddi_put32(mpt->m_acc_free_queue_hdl,
8315 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
8316 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
8317 DDI_DMA_SYNC_FORDEV);
8318 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
8319 mpt->m_free_index = 0;
8320 }
8321 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
8322 mpt->m_free_index);
8323 mutex_exit(&mpt->m_mutex);
8324 }
8325
8532
8533 /*
8534 * A Bus Reset could occur at any time and flush the tx_waitq,
8535 * so we cannot count on the tx_waitq to contain even one cmd.
8536 * And when the m_tx_waitq_mutex is released and run
8537 * mptsas_accept_pkt(), the tx_waitq may be flushed.
8538 */
8539 cmd = mpt->m_tx_waitq;
8540 for (;;) {
8541 if ((cmd = mpt->m_tx_waitq) == NULL) {
8542 mpt->m_tx_draining = 0;
8543 break;
8544 }
8545 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
8546 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8547 }
8548 cmd->cmd_linkp = NULL;
8549 mutex_exit(&mpt->m_tx_waitq_mutex);
8550 if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
8551 cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
8552 "to accept cmd on queue\n");
8553 mutex_enter(&mpt->m_tx_waitq_mutex);
8554 }
8555 }
8556
8557
8558 /*
8559 * mpt tag type lookup
8560 */
8561 static char mptsas_tag_lookup[] =
8562 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
8563
8564 static int
8565 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8566 {
8567 struct scsi_pkt *pkt = CMD2PKT(cmd);
8568 uint32_t control = 0;
8569 caddr_t mem, arsbuf;
8570 pMpi2SCSIIORequest_t io_request;
8571 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
8572 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
8618 }
8619 return (DDI_FAILURE);
8620 }
8621
8622 /*
8623 * Set correct tag bits.
8624 */
8625 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
8626 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
8627 FLAG_TAGMASK) >> 12)]) {
8628 case MSG_SIMPLE_QTAG:
8629 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8630 break;
8631 case MSG_HEAD_QTAG:
8632 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
8633 break;
8634 case MSG_ORDERED_QTAG:
8635 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
8636 break;
8637 default:
8638 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
8639 break;
8640 }
8641 } else {
8642 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
8643 ptgt->m_t_throttle = 1;
8644 }
8645 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8646 }
8647
8648 if (cmd->cmd_pkt_flags & FLAG_TLR) {
8649 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
8650 }
8651
8652 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
8653 io_request = (pMpi2SCSIIORequest_t)mem;
8654 if (cmd->cmd_extrqslen != 0) {
8655 /*
8656 * Mapping of the buffer was done in mptsas_pkt_alloc_extern().
8657 * Calculate the DMA address with the same offset.
8658 */
8699 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
8700 MPI2_SGE_FLAGS_END_OF_BUFFER |
8701 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
8702 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
8703 }
8704
8705 /*
8706 * save ARQ information
8707 */
8708 ddi_put8(acc_hdl, &io_request->SenseBufferLength, ars_size);
8709 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress, ars_dmaaddrlow);
8710
8711 ddi_put32(acc_hdl, &io_request->Control, control);
8712
8713 NDBG31(("starting message=%d(0x%p), with cmd=0x%p",
8714 SMID, (void *)io_request, (void *)cmd));
8715
8716 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8717 (void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
8718 DDI_DMA_SYNC_FORDEV);
8719
8720 /*
8721 * Build request descriptor and write it to the request desc post reg.
8722 */
8723 request_desc |= (SMID << 16);
8724 request_desc |= (uint64_t)ptgt->m_devhdl << 48;
8725 MPTSAS_START_CMD(mpt, request_desc);
8726
8727 /*
8728 * Start timeout.
8729 */
8730 cmd->cmd_active_expiration =
8731 gethrtime() + (hrtime_t)pkt->pkt_time * NANOSEC;
8732 #ifdef MPTSAS_TEST
8733 /*
8734 * Force timeouts to happen immediately.
8735 */
8736 if (mptsas_test_timeouts)
8737 cmd->cmd_active_expiration = gethrtime();
8738 #endif
8739 c = TAILQ_FIRST(&ptgt->m_active_cmdq);
8740 if (c == NULL ||
8741 c->cmd_active_expiration < cmd->cmd_active_expiration) {
8742 /*
8743 * Common case is that this is the last pending expiration
8744 * (or queue is empty). Insert at head of the queue.
8745 */
8746 TAILQ_INSERT_HEAD(&ptgt->m_active_cmdq, cmd, cmd_active_link);
8747 } else {
8748 /*
8749 * Queue is not empty and first element expires later than
8750 * this command. Search for element expiring sooner.
8751 */
9233 }
9234
9235 void
9236 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
9237 {
9238
9239 NDBG25(("mptsas_set_throttle: throttle=%x", what));
9240
9241 /*
9242 * if the bus is draining/quiesced, no changes to the throttles
9243 * are allowed. Not allowing change of throttles during draining
9244 * limits error recovery but will reduce draining time
9245 *
9246 * all throttles should have been set to HOLD_THROTTLE
9247 */
9248 if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
9249 return;
9250 }
9251
9252 if (what == HOLD_THROTTLE) {
9253 ptgt->m_t_throttle = HOLD_THROTTLE;
9254 } else if (ptgt->m_reset_delay == 0) {
9255 ptgt->m_t_throttle = what;
9256 }
9257 }
9258
9259 /*
9260 * Clean up from a device reset.
9261 * For the case of target reset, this function clears the waitq of all
9262 * commands for a particular target. For the case of abort task set, this
9263 * function clears the waitq of all commonds for a particular target/lun.
9264 */
9265 static void
9266 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
9267 {
9268 mptsas_slots_t *slots = mpt->m_active;
9269 mptsas_cmd_t *cmd, *next_cmd;
9270 int slot;
9271 uchar_t reason;
9272 uint_t stat;
9273 hrtime_t timestamp;
9274
9385 cmd = mpt->m_tx_waitq;
9386 while (cmd != NULL) {
9387 next_cmd = cmd->cmd_linkp;
9388 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9389 mptsas_tx_waitq_delete(mpt, cmd);
9390 mutex_exit(&mpt->m_tx_waitq_mutex);
9391 mptsas_set_pkt_reason(mpt, cmd,
9392 reason, stat);
9393 mptsas_doneq_add(mpt, cmd);
9394 mutex_enter(&mpt->m_tx_waitq_mutex);
9395 }
9396 cmd = next_cmd;
9397 }
9398 mutex_exit(&mpt->m_tx_waitq_mutex);
9399 break;
9400 default:
9401 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
9402 tasktype);
9403 break;
9404 }
9405 }
9406
9407 /*
9408 * Clean up hba state, abort all outstanding command and commands in waitq
9409 * reset timeout of all targets.
9410 */
9411 static void
9412 mptsas_flush_hba(mptsas_t *mpt)
9413 {
9414 mptsas_slots_t *slots = mpt->m_active;
9415 mptsas_cmd_t *cmd;
9416 int slot;
9417
9418 NDBG25(("mptsas_flush_hba"));
9419
9420 /*
9421 * The I/O Controller should have already sent back
9422 * all commands via the scsi I/O reply frame. Make
9423 * sure all commands have been flushed.
9424 * Account for TM request, which use the last SMID.
9676 mptsas_slots_t *slots = mpt->m_active;
9677 int rval = FALSE;
9678
9679 ASSERT(mutex_owned(&mpt->m_mutex));
9680
9681 /*
9682 * Abort the command pkt on the target/lun in ap. If pkt is
9683 * NULL, abort all outstanding commands on that target/lun.
9684 * If you can abort them, return 1, else return 0.
9685 * Each packet that's aborted should be sent back to the target
9686 * driver through the callback routine, with pkt_reason set to
9687 * CMD_ABORTED.
9688 *
9689 * abort cmd pkt on HBA hardware; clean out of outstanding
9690 * command lists, etc.
9691 */
9692 if (pkt != NULL) {
9693 /* abort the specified packet */
9694 sp = PKT2CMD(pkt);
9695
9696 if (sp->cmd_queued) {
9697 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
9698 (void *)sp));
9699 mptsas_waitq_delete(mpt, sp);
9700 mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
9701 STAT_ABORTED);
9702 mptsas_doneq_add(mpt, sp);
9703 rval = TRUE;
9704 goto done;
9705 }
9706
9707 /*
9708 * Have mpt firmware abort this command
9709 */
9710
9711 if (slots->m_slot[sp->cmd_slot] != NULL) {
9712 rval = mptsas_ioc_task_management(mpt,
9713 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
9714 lun, NULL, 0, 0);
9715
9966
9967 /*PRINTFLIKE3*/
9968 void
9969 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
9970 {
9971 dev_info_t *dev;
9972 va_list ap;
9973
9974 if (mpt) {
9975 dev = mpt->m_dip;
9976 } else {
9977 dev = 0;
9978 }
9979
9980 mutex_enter(&mptsas_log_mutex);
9981
9982 va_start(ap, fmt);
9983 (void) vsprintf(mptsas_log_buf, fmt, ap);
9984 va_end(ap);
9985
9986 if (level == CE_CONT) {
9987 scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
9988 } else {
9989 scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
9990 }
9991
9992 mutex_exit(&mptsas_log_mutex);
9993 }
9994
9995 #ifdef MPTSAS_DEBUG
9996 /*
9997 * Use a circular buffer to log messages to private memory.
9998 * Increment idx atomically to minimize risk to miss lines.
9999 * It's fast and does not hold up the proceedings too much.
10000 */
10001 static const size_t mptsas_dbglog_linecnt = MPTSAS_DBGLOG_LINECNT;
10002 static const size_t mptsas_dbglog_linelen = MPTSAS_DBGLOG_LINELEN;
10003 static char mptsas_dbglog_bufs[MPTSAS_DBGLOG_LINECNT][MPTSAS_DBGLOG_LINELEN];
10004 static uint32_t mptsas_dbglog_idx = 0;
10005
10006 /*PRINTFLIKE1*/
10007 void
10008 mptsas_debug_log(char *fmt, ...)
10009 {
10037 #else
10038 scsi_log(dev, mptsas_label, CE_CONT, "!%s\n", mptsas_log_buf);
10039 #endif
10040 mutex_exit(&mptsas_log_mutex);
10041 }
10042 #endif
10043
10044 /*
10045 * timeout handling
10046 */
10047 static void
10048 mptsas_watch(void *arg)
10049 {
10050 #ifndef __lock_lint
10051 _NOTE(ARGUNUSED(arg))
10052 #endif
10053
10054 mptsas_t *mpt;
10055 uint32_t doorbell;
10056
10057 NDBG30(("mptsas_watch"));
10058
10059 rw_enter(&mptsas_global_rwlock, RW_READER);
10060 for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
10061
10062 mutex_enter(&mpt->m_mutex);
10063
10064 /* Skip device if not powered on */
10065 if (mpt->m_options & MPTSAS_OPT_PM) {
10066 if (mpt->m_power_level == PM_LEVEL_D0) {
10067 (void) pm_busy_component(mpt->m_dip, 0);
10068 mpt->m_busy = 1;
10069 } else {
10070 mutex_exit(&mpt->m_mutex);
10071 continue;
10072 }
10073 }
10074
10075 /*
10076 * Check if controller is in a FAULT state. If so, reset it.
10080 doorbell &= MPI2_DOORBELL_DATA_MASK;
10081 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
10082 "code: %04x", doorbell);
10083 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
10084 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10085 mptsas_log(mpt, CE_WARN, "Reset failed"
10086 "after fault was detected");
10087 }
10088 }
10089
10090 /*
10091 * For now, always call mptsas_watchsubr.
10092 */
10093 mptsas_watchsubr(mpt);
10094
10095 if (mpt->m_options & MPTSAS_OPT_PM) {
10096 mpt->m_busy = 0;
10097 (void) pm_idle_component(mpt->m_dip, 0);
10098 }
10099
10100 mutex_exit(&mpt->m_mutex);
10101 }
10102 rw_exit(&mptsas_global_rwlock);
10103
10104 mutex_enter(&mptsas_global_mutex);
10105 if (mptsas_timeouts_enabled)
10106 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
10107 mutex_exit(&mptsas_global_mutex);
10108 }
10109
10110 static void
10111 mptsas_watchsubr_tgt(mptsas_t *mpt, mptsas_target_t *ptgt, hrtime_t timestamp)
10112 {
10113 mptsas_cmd_t *cmd;
10114
10115 /*
10116 * If we were draining due to a qfull condition,
10117 * go back to full throttle.
10118 */
10119 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
10120 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
10121 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
10122 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10123 mptsas_restart_hba(mpt);
10124 }
10125
10126 cmd = TAILQ_LAST(&ptgt->m_active_cmdq, mptsas_active_cmdq);
10127 if (cmd == NULL)
10654 mpi_pre_fw_25_download(mpt, pt);
10655 return;
10656 }
10657
10658 /*
10659 * User requests should come in with the Transaction
10660 * context element where the SGL will go. Putting the
10661 * SGL after that seems to work, but don't really know
10662 * why. Other drivers tend to create an extra SGL and
10663 * refer to the TCE through that.
10664 */
10665 req = (pMpi2FWDownloadRequest)pt->request;
10666 tcsge = (pMpi2FWDownloadTCSGE_t)&req->SGL;
10667 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10668 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10669 mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10670 }
10671
10672 pt->sgl_offset = offsetof(MPI2_FW_DOWNLOAD_REQUEST, SGL) +
10673 sizeof (*tcsge);
10674 if (pt->request_size != pt->sgl_offset)
10675 NDBG15(("mpi_pre_fw_download(): Incorrect req size, "
10676 "0x%x, should be 0x%x, dataoutsz 0x%x",
10677 (int)pt->request_size, (int)pt->sgl_offset,
10678 (int)pt->dataout_size));
10679 if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY))
10680 NDBG15(("mpi_pre_fw_download(): Incorrect rep size, "
10681 "0x%x, should be 0x%x", pt->data_size,
10682 (int)sizeof (MPI2_FW_DOWNLOAD_REPLY)));
10683 }
10684
10685 /*
10686 * Prepare the pt for a SAS3 FW_DOWNLOAD request.
10687 */
10688 static void
10689 mpi_pre_fw_25_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10690 {
10691 pMpi2FWDownloadTCSGE_t tcsge;
10692 pMpi2FWDownloadRequest req2;
10693 pMpi25FWDownloadRequest req25;
10694
10695 /*
10696 * User requests should come in with the Transaction
10697 * context element where the SGL will go. The new firmware
10698 * Doesn't use TCE and has space in the main request for
10699 * this information. So move to the right place.
10700 */
10701 req2 = (pMpi2FWDownloadRequest)pt->request;
10702 req25 = (pMpi25FWDownloadRequest)pt->request;
10703 tcsge = (pMpi2FWDownloadTCSGE_t)&req2->SGL;
10704 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10705 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10706 mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10707 }
10708 req25->ImageOffset = tcsge->ImageOffset;
10709 req25->ImageSize = tcsge->ImageSize;
10710
10711 pt->sgl_offset = offsetof(MPI25_FW_DOWNLOAD_REQUEST, SGL);
10712 if (pt->request_size != pt->sgl_offset)
10713 NDBG15(("mpi_pre_fw_25_download(): Incorrect req size, "
10714 "0x%x, should be 0x%x, dataoutsz 0x%x",
10715 pt->request_size, pt->sgl_offset,
10716 pt->dataout_size));
10717 if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY))
10718 NDBG15(("mpi_pre_fw_25_download(): Incorrect rep size, "
10719 "0x%x, should be 0x%x", pt->data_size,
10720 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10721 }
10722
10723 /*
10724 * Prepare the pt for a SAS2 FW_UPLOAD request.
10725 */
10726 static void
10727 mpi_pre_fw_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10728 {
10729 pMpi2FWUploadTCSGE_t tcsge;
10730 pMpi2FWUploadRequest_t req;
10731
10732 /*
10733 * If SAS3, call separate function.
10734 */
10735 if (mpt->m_MPI25) {
10736 mpi_pre_fw_25_upload(mpt, pt);
10737 return;
10738 }
10739
10740 /*
10741 * User requests should come in with the Transaction
10742 * context element where the SGL will go. Putting the
10743 * SGL after that seems to work, but don't really know
10744 * why. Other drivers tend to create an extra SGL and
10745 * refer to the TCE through that.
10746 */
10747 req = (pMpi2FWUploadRequest_t)pt->request;
10748 tcsge = (pMpi2FWUploadTCSGE_t)&req->SGL;
10749 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10750 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10751 mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10752 }
10753
10754 pt->sgl_offset = offsetof(MPI2_FW_UPLOAD_REQUEST, SGL) +
10755 sizeof (*tcsge);
10756 if (pt->request_size != pt->sgl_offset)
10757 NDBG15(("mpi_pre_fw_upload(): Incorrect req size, "
10758 "0x%x, should be 0x%x, dataoutsz 0x%x",
10759 pt->request_size, pt->sgl_offset,
10760 pt->dataout_size));
10761 if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY))
10762 NDBG15(("mpi_pre_fw_upload(): Incorrect rep size, "
10763 "0x%x, should be 0x%x", pt->data_size,
10764 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10765 }
10766
10767 /*
10768 * Prepare the pt a SAS3 FW_UPLOAD request.
10769 */
10770 static void
10771 mpi_pre_fw_25_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10772 {
10773 pMpi2FWUploadTCSGE_t tcsge;
10774 pMpi2FWUploadRequest_t req2;
10775 pMpi25FWUploadRequest_t req25;
10776
10777 /*
10778 * User requests should come in with the Transaction
10779 * context element where the SGL will go. The new firmware
10780 * Doesn't use TCE and has space in the main request for
10781 * this information. So move to the right place.
10782 */
10783 req2 = (pMpi2FWUploadRequest_t)pt->request;
10784 req25 = (pMpi25FWUploadRequest_t)pt->request;
10785 tcsge = (pMpi2FWUploadTCSGE_t)&req2->SGL;
10786 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10787 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10788 mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10789 }
10790 req25->ImageOffset = tcsge->ImageOffset;
10791 req25->ImageSize = tcsge->ImageSize;
10792
10793 pt->sgl_offset = offsetof(MPI25_FW_UPLOAD_REQUEST, SGL);
10794 if (pt->request_size != pt->sgl_offset)
10795 NDBG15(("mpi_pre_fw_25_upload(): Incorrect req size, "
10796 "0x%x, should be 0x%x, dataoutsz 0x%x",
10797 pt->request_size, pt->sgl_offset,
10798 pt->dataout_size));
10799 if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY))
10800 NDBG15(("mpi_pre_fw_25_upload(): Incorrect rep size, "
10801 "0x%x, should be 0x%x", pt->data_size,
10802 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10803 }
10804
10805 /*
10806 * Prepare the pt for an IOC_FACTS request.
10807 */
10808 static void
10809 mpi_pre_ioc_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10810 {
10811 #ifndef __lock_lint
10812 _NOTE(ARGUNUSED(mpt))
10813 #endif
10814 if (pt->request_size != sizeof (MPI2_IOC_FACTS_REQUEST))
10815 NDBG15(("mpi_pre_ioc_facts(): Incorrect req size, "
10816 "0x%x, should be 0x%x, dataoutsz 0x%x",
10817 pt->request_size,
10818 (int)sizeof (MPI2_IOC_FACTS_REQUEST),
10819 pt->dataout_size));
10820 if (pt->data_size != sizeof (MPI2_IOC_FACTS_REPLY))
10821 NDBG15(("mpi_pre_ioc_facts(): Incorrect rep size, "
10822 "0x%x, should be 0x%x", pt->data_size,
10823 (int)sizeof (MPI2_IOC_FACTS_REPLY)));
10824 pt->sgl_offset = (uint16_t)pt->request_size;
10825 }
10826
10827 /*
10828 * Prepare the pt for a PORT_FACTS request.
10829 */
10830 static void
10831 mpi_pre_port_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10832 {
10833 #ifndef __lock_lint
10834 _NOTE(ARGUNUSED(mpt))
10835 #endif
10836 if (pt->request_size != sizeof (MPI2_PORT_FACTS_REQUEST))
10837 NDBG15(("mpi_pre_port_facts(): Incorrect req size, "
10838 "0x%x, should be 0x%x, dataoutsz 0x%x",
10839 pt->request_size,
10840 (int)sizeof (MPI2_PORT_FACTS_REQUEST),
10841 pt->dataout_size));
10842 if (pt->data_size != sizeof (MPI2_PORT_FACTS_REPLY))
10843 NDBG15(("mpi_pre_port_facts(): Incorrect rep size, "
10844 "0x%x, should be 0x%x", pt->data_size,
10845 (int)sizeof (MPI2_PORT_FACTS_REPLY)));
10846 pt->sgl_offset = (uint16_t)pt->request_size;
10847 }
10848
10849 /*
10850 * Prepare pt for a SATA_PASSTHROUGH request.
10851 */
10852 static void
10853 mpi_pre_sata_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
10854 {
10855 #ifndef __lock_lint
10856 _NOTE(ARGUNUSED(mpt))
10857 #endif
10858 pt->sgl_offset = offsetof(MPI2_SATA_PASSTHROUGH_REQUEST, SGL);
10859 if (pt->request_size != pt->sgl_offset)
10860 NDBG15(("mpi_pre_sata_passthrough(): Incorrect req size, "
10861 "0x%x, should be 0x%x, dataoutsz 0x%x",
10862 pt->request_size, pt->sgl_offset,
10863 pt->dataout_size));
10864 if (pt->data_size != sizeof (MPI2_SATA_PASSTHROUGH_REPLY))
10865 NDBG15(("mpi_pre_sata_passthrough(): Incorrect rep size, "
10866 "0x%x, should be 0x%x", pt->data_size,
10867 (int)sizeof (MPI2_SATA_PASSTHROUGH_REPLY)));
10868 }
10869
10870 static void
10871 mpi_pre_smp_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
10872 {
10873 #ifndef __lock_lint
10874 _NOTE(ARGUNUSED(mpt))
10875 #endif
10876 pt->sgl_offset = offsetof(MPI2_SMP_PASSTHROUGH_REQUEST, SGL);
10877 if (pt->request_size != pt->sgl_offset)
10878 NDBG15(("mpi_pre_smp_passthrough(): Incorrect req size, "
10879 "0x%x, should be 0x%x, dataoutsz 0x%x",
10880 pt->request_size, pt->sgl_offset,
10881 pt->dataout_size));
10882 if (pt->data_size != sizeof (MPI2_SMP_PASSTHROUGH_REPLY))
10883 NDBG15(("mpi_pre_smp_passthrough(): Incorrect rep size, "
10884 "0x%x, should be 0x%x", pt->data_size,
10885 (int)sizeof (MPI2_SMP_PASSTHROUGH_REPLY)));
10886 }
10887
10888 /*
10889 * Prepare pt for a CONFIG request.
10890 */
10891 static void
10892 mpi_pre_config(mptsas_t *mpt, mptsas_pt_request_t *pt)
10893 {
10894 #ifndef __lock_lint
10895 _NOTE(ARGUNUSED(mpt))
10896 #endif
10897 pt->sgl_offset = offsetof(MPI2_CONFIG_REQUEST, PageBufferSGE);
10898 if (pt->request_size != pt->sgl_offset)
10899 NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
10900 "should be 0x%x, dataoutsz 0x%x", pt->request_size,
10901 pt->sgl_offset, pt->dataout_size));
10902 if (pt->data_size != sizeof (MPI2_CONFIG_REPLY))
10903 NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
10904 "should be 0x%x", pt->data_size,
10905 (int)sizeof (MPI2_CONFIG_REPLY)));
10906 pt->simple = 1;
10907 }
10908
10909 /*
10910 * Prepare pt for a SCSI_IO_REQ request.
10911 */
10912 static void
10913 mpi_pre_scsi_io_req(mptsas_t *mpt, mptsas_pt_request_t *pt)
10914 {
10915 #ifndef __lock_lint
10916 _NOTE(ARGUNUSED(mpt))
10917 #endif
10918 pt->sgl_offset = offsetof(MPI2_SCSI_IO_REQUEST, SGL);
10919 if (pt->request_size != pt->sgl_offset)
10920 NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
10921 "should be 0x%x, dataoutsz 0x%x", pt->request_size,
10922 pt->sgl_offset,
10923 pt->dataout_size));
10924 if (pt->data_size != sizeof (MPI2_SCSI_IO_REPLY))
10925 NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
10926 "should be 0x%x", pt->data_size,
10927 (int)sizeof (MPI2_SCSI_IO_REPLY)));
10928 }
10929
10930 /*
10931 * Prepare the mptsas_cmd for a SAS_IO_UNIT_CONTROL request.
10932 */
10933 static void
10934 mpi_pre_sas_io_unit_control(mptsas_t *mpt, mptsas_pt_request_t *pt)
10935 {
10936 #ifndef __lock_lint
10937 _NOTE(ARGUNUSED(mpt))
10938 #endif
10939 pt->sgl_offset = (uint16_t)pt->request_size;
10940 }
10941
10942 /*
10943 * A set of functions to prepare an mptsas_cmd for the various
10944 * supported requests.
10945 */
10946 static struct mptsas_func {
10947 U8 Function;
11112 cmd->ioc_cmd_slot = (uint32_t)(rvalue);
11113
11114 pt.request = (uint8_t *)request_msg;
11115 pt.direction = direction;
11116 pt.simple = 0;
11117 pt.request_size = request_size;
11118 pt.data_size = data_size;
11119 pt.dataout_size = dataout_size;
11120 pt.data_cookie = data_dma_state.cookie;
11121 pt.dataout_cookie = dataout_dma_state.cookie;
11122 mptsas_prep_sgl_offset(mpt, &pt);
11123
11124 /*
11125 * Form a blank cmd/pkt to store the acknowledgement message
11126 */
11127 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb[0];
11128 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
11129 pkt->pkt_ha_private = (opaque_t)&pt;
11130 pkt->pkt_flags = FLAG_HEAD;
11131 pkt->pkt_time = timeout;
11132 cmd->cmd_pkt = pkt;
11133 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_PASSTHRU;
11134
11135 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
11136 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
11137 uint8_t com, cdb_group_id;
11138 boolean_t ret;
11139
11140 pkt->pkt_cdbp = ((pMpi2SCSIIORequest_t)request_msg)->CDB.CDB32;
11141 com = pkt->pkt_cdbp[0];
11142 cdb_group_id = CDB_GROUPID(com);
11143 switch (cdb_group_id) {
11144 case CDB_GROUPID_0: cmd->cmd_cdblen = CDB_GROUP0; break;
11145 case CDB_GROUPID_1: cmd->cmd_cdblen = CDB_GROUP1; break;
11146 case CDB_GROUPID_2: cmd->cmd_cdblen = CDB_GROUP2; break;
11147 case CDB_GROUPID_4: cmd->cmd_cdblen = CDB_GROUP4; break;
11148 case CDB_GROUPID_5: cmd->cmd_cdblen = CDB_GROUP5; break;
11149 default:
11150 NDBG27(("mptsas_do_passthru: SCSI_IO, reserved "
11151 "CDBGROUP 0x%x requested!", cdb_group_id));
12564 driverdata.RegData);
12565 break;
12566
12567 default:
12568 status = EINVAL;
12569 break;
12570 }
12571 } else {
12572 status = EFAULT;
12573 }
12574
12575 mutex_exit(&mpt->m_mutex);
12576 return (status);
12577 }
12578
12579 static int
12580 led_control(mptsas_t *mpt, intptr_t data, int mode)
12581 {
12582 int ret = 0;
12583 mptsas_led_control_t lc;
12584 mptsas_target_t *ptgt;
12585
12586 if (ddi_copyin((void *)data, &lc, sizeof (lc), mode) != 0) {
12587 return (EFAULT);
12588 }
12589
12590 if ((lc.Command != MPTSAS_LEDCTL_FLAG_SET &&
12591 lc.Command != MPTSAS_LEDCTL_FLAG_GET) ||
12592 lc.Led < MPTSAS_LEDCTL_LED_MIN ||
12593 lc.Led > MPTSAS_LEDCTL_LED_MAX ||
12594 (lc.Command == MPTSAS_LEDCTL_FLAG_SET && lc.LedStatus != 0 &&
12595 lc.LedStatus != 1)) {
12596 return (EINVAL);
12597 }
12598
12599 if ((lc.Command == MPTSAS_LEDCTL_FLAG_SET && (mode & FWRITE) == 0) ||
12600 (lc.Command == MPTSAS_LEDCTL_FLAG_GET && (mode & FREAD) == 0))
12601 return (EACCES);
12602
12603 /* Locate the target we're interrogating... */
12604 mutex_enter(&mpt->m_mutex);
12605 ptgt = refhash_linear_search(mpt->m_targets,
12606 mptsas_target_eval_slot, &lc);
12607 if (ptgt == NULL) {
12608 /* We could not find a target for that enclosure/slot. */
12609 mutex_exit(&mpt->m_mutex);
12610 return (ENOENT);
12611 }
12612
12613 if (lc.Command == MPTSAS_LEDCTL_FLAG_SET) {
12614 /* Update our internal LED state. */
12615 ptgt->m_led_status &= ~(1 << (lc.Led - 1));
12616 ptgt->m_led_status |= lc.LedStatus << (lc.Led - 1);
12617
12618 /* Flush it to the controller. */
12619 ret = mptsas_flush_led_status(mpt, ptgt);
12620 mutex_exit(&mpt->m_mutex);
12621 return (ret);
12622 }
12623
12624 /* Return our internal LED state. */
12625 lc.LedStatus = (ptgt->m_led_status >> (lc.Led - 1)) & 1;
12626 mutex_exit(&mpt->m_mutex);
12627
12628 if (ddi_copyout(&lc, (void *)data, sizeof (lc), mode) != 0) {
12629 return (EFAULT);
12630 }
12631
12632 return (0);
12633 }
12634
12635 static int
12636 get_disk_info(mptsas_t *mpt, intptr_t data, int mode)
12637 {
12638 uint16_t i = 0;
12639 uint16_t count = 0;
12640 int ret = 0;
12641 mptsas_target_t *ptgt;
12642 mptsas_disk_info_t *di;
12643 STRUCT_DECL(mptsas_get_disk_info, gdi);
12644
12645 if ((mode & FREAD) == 0)
12747 if (mpt == NULL) {
12748 /*
12749 * Called from iport node, get the states
12750 */
12751 iport_flag = 1;
12752 dip = mptsas_get_dip_from_dev(dev, &phymask);
12753 if (dip == NULL) {
12754 return (ENXIO);
12755 }
12756 mpt = DIP2MPT(dip);
12757 }
12758 /* Make sure power level is D0 before accessing registers */
12759 mutex_enter(&mpt->m_mutex);
12760 if (mpt->m_options & MPTSAS_OPT_PM) {
12761 (void) pm_busy_component(mpt->m_dip, 0);
12762 if (mpt->m_power_level != PM_LEVEL_D0) {
12763 mutex_exit(&mpt->m_mutex);
12764 if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
12765 DDI_SUCCESS) {
12766 mptsas_log(mpt, CE_WARN,
12767 "mptsas%d: mptsas_ioctl: Raise power "
12768 "request failed.", mpt->m_instance);
12769 (void) pm_idle_component(mpt->m_dip, 0);
12770 return (ENXIO);
12771 }
12772 } else {
12773 mutex_exit(&mpt->m_mutex);
12774 }
12775 } else {
12776 mutex_exit(&mpt->m_mutex);
12777 }
12778
12779 if (iport_flag) {
12780 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
12781 if (status != 0) {
12782 goto out;
12783 }
12784 /*
12785 * The following code control the OK2RM LED, it doesn't affect
12786 * the ioctl return status.
12787 */
12788 if ((cmd == DEVCTL_DEVICE_ONLINE) ||
12789 (cmd == DEVCTL_DEVICE_OFFLINE)) {
12790 if (ndi_dc_allochdl((void *)data, &dcp) !=
12791 NDI_SUCCESS) {
12792 goto out;
12793 }
12794 addr = ndi_dc_getaddr(dcp);
12795 ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
12796 if (ptgt == NULL) {
12797 NDBG14(("mptsas_ioctl led control: tgt %s not "
12798 "found", addr));
12799 ndi_dc_freehdl(dcp);
12800 goto out;
12801 }
12802 mutex_enter(&mpt->m_mutex);
12803 if (cmd == DEVCTL_DEVICE_ONLINE) {
12804 ptgt->m_tgt_unconfigured = 0;
12805 } else if (cmd == DEVCTL_DEVICE_OFFLINE) {
12806 ptgt->m_tgt_unconfigured = 1;
12807 }
12808 if (cmd == DEVCTL_DEVICE_OFFLINE) {
12809 ptgt->m_led_status |=
12810 (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
12811 } else {
12812 ptgt->m_led_status &=
12813 ~(1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
12814 }
12815 (void) mptsas_flush_led_status(mpt, ptgt);
12816 mutex_exit(&mpt->m_mutex);
12817 ndi_dc_freehdl(dcp);
12818 }
12819 goto out;
12820 }
12821 switch (cmd) {
12822 case MPTIOCTL_GET_DISK_INFO:
12823 status = get_disk_info(mpt, data, mode);
12824 break;
12825 case MPTIOCTL_LED_CONTROL:
12826 status = led_control(mpt, data, mode);
12827 break;
12828 case MPTIOCTL_UPDATE_FLASH:
12829 if (ddi_copyin((void *)data, &flashdata,
12830 sizeof (struct mptsas_update_flash), mode)) {
12831 status = EFAULT;
12832 break;
12833 }
12834
12835 mutex_enter(&mpt->m_mutex);
12836 if (mptsas_update_flash(mpt,
12983 }
12984
12985 int
12986 mptsas_restart_ioc(mptsas_t *mpt)
12987 {
12988 int rval = DDI_SUCCESS;
12989 mptsas_target_t *ptgt = NULL;
12990
12991 ASSERT(mutex_owned(&mpt->m_mutex));
12992
12993 /*
12994 * Set a flag telling I/O path that we're processing a reset. This is
12995 * needed because after the reset is complete, the hash table still
12996 * needs to be rebuilt. If I/Os are started before the hash table is
12997 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
12998 * so that they can be retried.
12999 */
13000 mpt->m_in_reset = TRUE;
13001
13002 /*
13003 * Wait until all the allocated sense data buffers for DMA are freed.
13004 */
13005 while (mpt->m_extreq_sense_refcount > 0)
13006 cv_wait(&mpt->m_extreq_sense_refcount_cv, &mpt->m_mutex);
13007
13008 /*
13009 * Set all throttles to HOLD
13010 */
13011 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
13012 ptgt = refhash_next(mpt->m_targets, ptgt)) {
13013 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
13014 }
13015
13016 /*
13017 * Disable interrupts
13018 */
13019 MPTSAS_DISABLE_INTR(mpt);
13020
13021 /*
13022 * Abort all commands: outstanding commands, commands in waitq and
13023 * tx_waitq.
13024 */
13025 mptsas_flush_hba(mpt);
13026
13027 /*
13028 * Reinitialize the chip.
13097 }
13098 /*
13099 * Setup configuration space
13100 */
13101 if (mptsas_config_space_init(mpt) == FALSE) {
13102 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
13103 "failed!");
13104 goto fail;
13105 }
13106
13107 /*
13108 * IOC facts can change after a diag reset so all buffers that are
13109 * based on these numbers must be de-allocated and re-allocated. Get
13110 * new IOC facts each time chip is initialized.
13111 */
13112 if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
13113 mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
13114 goto fail;
13115 }
13116
13117 if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
13118 goto fail;
13119 }
13120 /*
13121 * Allocate request message frames, reply free queue, reply descriptor
13122 * post queue, and reply message frames using latest IOC facts.
13123 */
13124 if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
13125 mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
13126 goto fail;
13127 }
13128 if (mptsas_alloc_sense_bufs(mpt) == DDI_FAILURE) {
13129 mptsas_log(mpt, CE_WARN, "mptsas_alloc_sense_bufs failed");
13130 goto fail;
13131 }
13132 if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
13133 mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
13134 goto fail;
13135 }
13136 if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
13137 mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
13138 goto fail;
13139 }
13140 if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
13141 mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
13142 goto fail;
13143 }
13144
13145 mur:
13146 /*
13147 * Re-Initialize ioc to operational state
13148 */
13149 if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
13150 mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
13151 goto fail;
13152 }
13153
13154 mptsas_alloc_reply_args(mpt);
13155
13156 /*
13157 * Initialize reply post index. Reply free index is initialized after
13158 * the next loop.
13159 */
13160 mpt->m_post_index = 0;
13161
13162 /*
13163 * Initialize the Reply Free Queue with the physical addresses of our
13164 * reply frames.
13268 */
13269 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
13270 & PCI_STAT_CAP) {
13271 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
13272 PCI_CONF_CAP_PTR), 4);
13273 } else {
13274 caps_ptr = PCI_CAP_NEXT_PTR_NULL;
13275 }
13276
13277 /*
13278 * Walk capabilities if supported.
13279 */
13280 for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
13281
13282 /*
13283 * Check that we haven't exceeded the maximum number of
13284 * capabilities and that the pointer is in a valid range.
13285 */
13286 if (++cap_count > 48) {
13287 mptsas_log(mpt, CE_WARN,
13288 "too many device capabilities.\n");
13289 break;
13290 }
13291 if (caps_ptr < 64) {
13292 mptsas_log(mpt, CE_WARN,
13293 "capabilities pointer 0x%x out of range.\n",
13294 caps_ptr);
13295 break;
13296 }
13297
13298 /*
13299 * Get next capability and check that it is valid.
13300 * For now, we only support power management.
13301 */
13302 cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
13303 switch (cap) {
13304 case PCI_CAP_ID_PM:
13305 mptsas_log(mpt, CE_NOTE,
13306 "?mptsas%d supports power management.\n",
13307 mpt->m_instance);
13308 mpt->m_options |= MPTSAS_OPT_PM;
13309
13310 /* Save PMCSR offset */
13311 mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
13312 break;
13313 /*
13314 * The following capabilities are valid. Any others
13315 * will cause a message to be logged.
13316 */
13317 case PCI_CAP_ID_VPD:
13318 case PCI_CAP_ID_MSI:
13319 case PCI_CAP_ID_PCIX:
13320 case PCI_CAP_ID_PCI_E:
13321 case PCI_CAP_ID_MSI_X:
13322 break;
13323 default:
13324 mptsas_log(mpt, CE_NOTE,
13325 "?mptsas%d unrecognized capability "
13326 "0x%x.\n", mpt->m_instance, cap);
13327 break;
13328 }
13329
13330 /*
13331 * Get next capabilities pointer and clear bits 0,1.
13332 */
13333 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
13334 (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
13335 }
13336 return (TRUE);
13337 }
13338
13339 static int
13340 mptsas_init_pm(mptsas_t *mpt)
13341 {
13342 char pmc_name[16];
13343 char *pmc[] = {
13344 NULL,
13345 "0=Off (PCI D3 State)",
13346 "3=On (PCI D0 State)",
13347 NULL
13348 };
13349 uint16_t pmcsr_stat;
13350
13351 if (mptsas_get_pci_cap(mpt) == FALSE) {
13352 return (DDI_FAILURE);
13353 }
13354 /*
13355 * If PCI's capability does not support PM, then don't need
13356 * to registe the pm-components
13357 */
13358 if (!(mpt->m_options & MPTSAS_OPT_PM))
13359 return (DDI_SUCCESS);
13360 /*
13361 * If power management is supported by this chip, create
13362 * pm-components property for the power management framework
13363 */
13364 (void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
13365 pmc[0] = pmc_name;
13366 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
13367 "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
13368 mpt->m_options &= ~MPTSAS_OPT_PM;
13369 mptsas_log(mpt, CE_WARN,
13370 "mptsas%d: pm-component property creation failed.",
13371 mpt->m_instance);
13372 return (DDI_FAILURE);
13373 }
13374
13375 /*
13376 * Power on device.
13377 */
13378 (void) pm_busy_component(mpt->m_dip, 0);
13379 pmcsr_stat = pci_config_get16(mpt->m_config_handle,
13380 mpt->m_pmcsr_offset);
13381 if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
13382 mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
13383 mpt->m_instance);
13384 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
13385 PCI_PMCSR_D0);
13386 }
13387 if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
13388 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
13389 return (DDI_FAILURE);
13390 }
13391 mpt->m_power_level = PM_LEVEL_D0;
13392 /*
13393 * Set pm idle delay.
13394 */
13395 mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
13396 mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
13397
13398 return (DDI_SUCCESS);
13399 }
13400
13401 static int
13402 mptsas_register_intrs(mptsas_t *mpt)
13403 {
13404 dev_info_t *dip;
13405 int intr_types;
13406
13407 dip = mpt->m_dip;
13408
13409 /* Get supported interrupt types */
13410 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
13411 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
13412 "failed\n");
13413 return (FALSE);
13414 }
13415
13416 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
13417
13418 /*
13419 * Try MSI, but fall back to FIXED
13420 */
13421 if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
13422 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
13423 NDBG0(("Using MSI interrupt type"));
13424 mpt->m_intr_type = DDI_INTR_TYPE_MSI;
13425 return (TRUE);
13426 }
13427 }
13428 if (intr_types & DDI_INTR_TYPE_FIXED) {
13429 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
13430 NDBG0(("Using FIXED interrupt type"));
13431 mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
13432 return (TRUE);
13446 }
13447
13448 /*
13449 * mptsas_add_intrs:
13450 *
13451 * Register FIXED or MSI interrupts.
13452 */
13453 static int
13454 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
13455 {
13456 dev_info_t *dip = mpt->m_dip;
13457 int avail, actual, count = 0;
13458 int i, flag, ret;
13459
13460 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
13461
13462 /* Get number of interrupts */
13463 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
13464 if ((ret != DDI_SUCCESS) || (count <= 0)) {
13465 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
13466 "ret %d count %d\n", ret, count);
13467
13468 return (DDI_FAILURE);
13469 }
13470
13471 /* Get number of available interrupts */
13472 ret = ddi_intr_get_navail(dip, intr_type, &avail);
13473 if ((ret != DDI_SUCCESS) || (avail == 0)) {
13474 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
13475 "ret %d avail %d\n", ret, avail);
13476
13477 return (DDI_FAILURE);
13478 }
13479
13480 if (avail < count) {
13481 mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
13482 "navail() returned %d", count, avail);
13483 }
13484
13485 /* Mpt only have one interrupt routine */
13486 if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
13487 count = 1;
13488 }
13489
13490 /* Allocate an array of interrupt handles */
13491 mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
13492 mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
13493
13494 flag = DDI_INTR_ALLOC_NORMAL;
13495
13496 /* call ddi_intr_alloc() */
13497 ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
13498 count, &actual, flag);
13499
13500 if ((ret != DDI_SUCCESS) || (actual == 0)) {
13501 mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
13502 ret);
13503 kmem_free(mpt->m_htable, mpt->m_intr_size);
13504 return (DDI_FAILURE);
13505 }
13506
13507 /* use interrupt count returned or abort? */
13508 if (actual < count) {
13509 mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
13510 count, actual);
13511 }
13512
13513 mpt->m_intr_cnt = actual;
13514
13515 /*
13516 * Get priority for first msi, assume remaining are all the same
13517 */
13518 if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
13519 &mpt->m_intr_pri)) != DDI_SUCCESS) {
13520 mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
13521
13522 /* Free already allocated intr */
13523 for (i = 0; i < actual; i++) {
13524 (void) ddi_intr_free(mpt->m_htable[i]);
13525 }
13526
13527 kmem_free(mpt->m_htable, mpt->m_intr_size);
13528 return (DDI_FAILURE);
13529 }
13530
13531 /* Test for high level mutex */
13532 if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
13533 mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
13534 "Hi level interrupt not supported\n");
13535
13536 /* Free already allocated intr */
13537 for (i = 0; i < actual; i++) {
13538 (void) ddi_intr_free(mpt->m_htable[i]);
13539 }
13540
13541 kmem_free(mpt->m_htable, mpt->m_intr_size);
13542 return (DDI_FAILURE);
13543 }
13544
13545 /* Call ddi_intr_add_handler() */
13546 for (i = 0; i < actual; i++) {
13547 if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
13548 (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
13549 mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
13550 "failed %d\n", ret);
13551
13552 /* Free already allocated intr */
13553 for (i = 0; i < actual; i++) {
13554 (void) ddi_intr_free(mpt->m_htable[i]);
13555 }
13556
13557 kmem_free(mpt->m_htable, mpt->m_intr_size);
13558 return (DDI_FAILURE);
13559 }
13560 }
13561
13562 if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
13563 != DDI_SUCCESS) {
13564 mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
13565
13566 /* Free already allocated intr */
13567 for (i = 0; i < actual; i++) {
13568 (void) ddi_intr_free(mpt->m_htable[i]);
13569 }
13570
13571 kmem_free(mpt->m_htable, mpt->m_intr_size);
13572 return (DDI_FAILURE);
13573 }
13574
13575 /*
13576 * Enable interrupts
13577 */
13578 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13579 /* Call ddi_intr_block_enable() for MSI interrupts */
13580 (void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
13581 } else {
13582 /* Call ddi_intr_enable for MSI or FIXED interrupts */
13583 for (i = 0; i < mpt->m_intr_cnt; i++) {
13584 (void) ddi_intr_enable(mpt->m_htable[i]);
13845 return (DEV_INFO_SUCCESS);
13846 }
13847
13848 uint64_t
13849 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
13850 {
13851 uint64_t sata_guid = 0, *pwwn = NULL;
13852 int target = ptgt->m_devhdl;
13853 uchar_t *inq83 = NULL;
13854 int inq83_len = 0xFF;
13855 uchar_t *dblk = NULL;
13856 int inq83_retry = 3;
13857 int rval = DDI_FAILURE;
13858
13859 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
13860
13861 inq83_retry:
13862 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
13863 inq83_len, NULL, 1);
13864 if (rval != DDI_SUCCESS) {
13865 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
13866 "0x83 for target:%x, lun:%x failed!", target, lun);
13867 sata_guid = -1;
13868 goto out;
13869 }
13870 /* According to SAT2, the first descriptor is logic unit name */
13871 dblk = &inq83[4];
13872 if ((dblk[1] & 0x30) != 0) {
13873 mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
13874 goto out;
13875 }
13876 pwwn = (uint64_t *)(void *)(&dblk[4]);
13877 if ((dblk[4] & 0xf0) == 0x50) {
13878 sata_guid = BE_64(*pwwn);
13879 goto out;
13880 } else if (dblk[4] == 'A') {
13881 NDBG20(("SATA drive has no NAA format GUID."));
13882 goto out;
13883 } else {
13884 /* The data is not ready, wait and retry */
13885 inq83_retry--;
13886 if (inq83_retry <= 0) {
13887 goto out;
13888 }
13889 NDBG20(("The GUID is not ready, retry..."));
13890 delay(1 * drv_usectohz(1000000));
13891 goto inq83_retry;
13892 }
13893 out:
13961 bcopy((caddr_t)mpt->m_tran,
13962 (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
13963 tgt_private = kmem_alloc(
13964 sizeof (mptsas_tgt_private_t), KM_SLEEP);
13965 if (tgt_private == NULL) {
13966 goto out;
13967 }
13968 tgt_private->t_lun = ap->a_lun;
13969 tgt_private->t_private = ptgt;
13970 tran_clone->tran_tgt_private = tgt_private;
13971 ap->a_hba_tran = tran_clone;
13972
13973 pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
13974 data_bp, cdblen, sizeof (struct scsi_arq_status),
13975 0, PKT_CONSISTENT, NULL, NULL);
13976 if (pktp == NULL) {
13977 goto out;
13978 }
13979 bcopy(cdb, pktp->pkt_cdbp, cdblen);
13980 pktp->pkt_flags = FLAG_NOPARITY;
13981 if (scsi_poll(pktp) < 0) {
13982 goto out;
13983 }
13984 if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
13985 goto out;
13986 }
13987 if (resid != NULL) {
13988 *resid = pktp->pkt_resid;
13989 }
13990
13991 ret = DDI_SUCCESS;
13992 out:
13993 if (pktp) {
13994 scsi_destroy_pkt(pktp);
13995 }
13996 if (tran_clone) {
13997 kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
13998 }
13999 if (tgt_private) {
14000 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
14171 (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
14172 (strncmp((char *)arg, "disk", 4) == 0)) {
14173 bconfig = FALSE;
14174 ndi_hold_devi(*childp);
14175 }
14176 } else {
14177 ret = NDI_FAILURE;
14178 break;
14179 }
14180
14181 /*
14182 * DDI group instructed us to use this flag.
14183 */
14184 mflags |= NDI_MDI_FALLBACK;
14185 break;
14186 case BUS_CONFIG_DRIVER:
14187 case BUS_CONFIG_ALL:
14188 mptsas_config_all(pdip);
14189 ret = NDI_SUCCESS;
14190 break;
14191 }
14192
14193 if ((ret == NDI_SUCCESS) && bconfig) {
14194 ret = ndi_busop_bus_config(pdip, mflags, op,
14195 (devnm == NULL) ? arg : devnm, childp, 0);
14196 }
14197
14198 ndi_devi_exit(pdip, circ1);
14199 ndi_devi_exit(scsi_vhci_dip, circ);
14200 if (devnm != NULL)
14201 kmem_free(devnm, SCSI_MAXNAMELEN);
14202 return (ret);
14203 }
14204
14205 static int
14206 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
14207 mptsas_target_t *ptgt)
14208 {
14209 int rval = DDI_FAILURE;
14210 struct scsi_inquiry *sd_inq = NULL;
14218 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
14219 rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
14220 } else {
14221 rval = DDI_FAILURE;
14222 }
14223
14224 kmem_free(sd_inq, SUN_INQSIZE);
14225 return (rval);
14226 }
14227
14228 static int
14229 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
14230 dev_info_t **lundip)
14231 {
14232 int rval;
14233 mptsas_t *mpt = DIP2MPT(pdip);
14234 int phymask;
14235 mptsas_target_t *ptgt = NULL;
14236
14237 /*
14238 * Get the physical port associated to the iport
14239 */
14240 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14241 "phymask", 0);
14242
14243 ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
14244 if (ptgt == NULL) {
14245 /*
14246 * didn't match any device by searching
14247 */
14248 return (DDI_FAILURE);
14249 }
14250 /*
14251 * If the LUN already exists and the status is online,
14252 * we just return the pointer to dev_info_t directly.
14253 * For the mdi_pathinfo node, we'll handle it in
14254 * mptsas_create_virt_lun()
14255 * TODO should be also in mptsas_handle_dr
14256 */
14257
14258 *lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
14259 if (*lundip != NULL) {
14260 /*
14261 * TODO Another senario is, we hotplug the same disk
14262 * on the same slot, the devhdl changed, is this
14263 * possible?
14264 * tgt_private->t_private != ptgt
14265 */
14266 if (sasaddr != ptgt->m_addr.mta_wwn) {
14267 /*
14268 * The device has changed although the devhdl is the
14269 * same (Enclosure mapping mode, change drive on the
14270 * same slot)
14271 */
14272 return (DDI_FAILURE);
14273 }
14274 return (DDI_SUCCESS);
14275 }
14276
14277 if (phymask == 0) {
14278 /*
14279 * Configure IR volume
14280 */
14281 rval = mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
14282 return (rval);
14283 }
14284 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
14285
14286 return (rval);
14287 }
14288
14289 static int
14290 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
14291 dev_info_t **lundip)
14292 {
14293 int rval;
14294 mptsas_t *mpt = DIP2MPT(pdip);
14295 mptsas_phymask_t phymask;
14296 mptsas_target_t *ptgt = NULL;
14297
14298 /*
14299 * Get the physical port associated to the iport
14300 */
14301 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14302 "phymask", 0);
14303
14304 ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
14305 if (ptgt == NULL) {
14306 /*
14307 * didn't match any device by searching
14308 */
14309 return (DDI_FAILURE);
14310 }
14311
14312 /*
14313 * If the LUN already exists and the status is online,
14314 * we just return the pointer to dev_info_t directly.
14315 * For the mdi_pathinfo node, we'll handle it in
14316 * mptsas_create_virt_lun().
14317 */
14318
14458
14459 if (ret != DDI_SUCCESS)
14460 return (ret);
14461 buffer = (char *)repluns_bp->b_un.b_addr;
14462 /*
14463 * find out the number of luns returned by the SCSI ReportLun call
14464 * and allocate buffer space
14465 */
14466 lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
14467 saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
14468 if (saved_repluns == NULL) {
14469 scsi_free_consistent_buf(repluns_bp);
14470 return (DDI_FAILURE);
14471 }
14472 for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
14473 if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
14474 &lun_num, &lun_addr_type) != DDI_SUCCESS) {
14475 continue;
14476 }
14477 saved_repluns[lun_cnt] = lun_num;
14478 if (cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num))
14479 ret = DDI_SUCCESS;
14480 else
14481 ret = mptsas_probe_lun(pdip, lun_num, &cdip,
14482 ptgt);
14483 if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
14484 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
14485 MPTSAS_DEV_GONE);
14486 }
14487 }
14488 mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
14489 kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
14490 scsi_free_consistent_buf(repluns_bp);
14491 return (DDI_SUCCESS);
14492 }
14493
14494 static int
14495 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
14496 {
14497 int rval = DDI_FAILURE;
14498 struct scsi_inquiry *sd_inq = NULL;
14499 mptsas_t *mpt = DIP2MPT(pdip);
14500 mptsas_target_t *ptgt = NULL;
14501
14502 mutex_enter(&mpt->m_mutex);
14593
14594 if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
14595 DDI_SUCCESS) {
14596 continue;
14597 }
14598
14599 if (wwid == sas_wwn) {
14600 for (i = 0; i < lun_cnt; i++) {
14601 if (repluns[i] == lun) {
14602 find = 1;
14603 break;
14604 }
14605 }
14606 } else {
14607 continue;
14608 }
14609 if (find == 0) {
14610 /*
14611 * The lun has not been there already
14612 */
14613 (void) mptsas_offline_lun(pdip, savechild, NULL,
14614 NDI_DEVI_REMOVE);
14615 }
14616 }
14617
14618 pip = mdi_get_next_client_path(pdip, NULL);
14619 while (pip) {
14620 find = 0;
14621 savepip = pip;
14622 addr = MDI_PI(pip)->pi_addr;
14623
14624 pip = mdi_get_next_client_path(pdip, pip);
14625
14626 if (addr == NULL) {
14627 continue;
14628 }
14629
14630 if (mptsas_parse_address(addr, &sas_wwn, &phy,
14631 &lun) != DDI_SUCCESS) {
14632 continue;
14633 }
14634
14635 if (sas_wwn == wwid) {
14636 for (i = 0; i < lun_cnt; i++) {
14637 if (repluns[i] == lun) {
14638 find = 1;
14639 break;
14640 }
14641 }
14642 } else {
14643 continue;
14644 }
14645
14646 if (find == 0) {
14647 /*
14648 * The lun has not been there already
14649 */
14650 (void) mptsas_offline_lun(pdip, NULL, savepip,
14651 NDI_DEVI_REMOVE);
14652 }
14653 }
14654 }
14655
14656 /*
14657 * If this enclosure doesn't exist in the enclosure list, add it. If it does,
14658 * update it.
14659 */
14660 static void
14661 mptsas_enclosure_update(mptsas_t *mpt, mptsas_enclosure_t *mep)
14662 {
14663 mptsas_enclosure_t *m;
14664
14665 ASSERT(MUTEX_HELD(&mpt->m_mutex));
14666 m = mptsas_enc_lookup(mpt, mep->me_enchdl);
14667 if (m != NULL) {
14668 m->me_flags = mep->me_flags;
14669 return;
14670 }
14671
14672 m = kmem_zalloc(sizeof (*m), KM_SLEEP);
14673 m->me_enchdl = mep->me_enchdl;
14674 m->me_flags = mep->me_flags;
14675 list_insert_tail(&mpt->m_enclosures, m);
14676 }
14677
14678 static void
14679 mptsas_update_hashtab(struct mptsas *mpt)
14680 {
14681 uint32_t page_address;
14682 int rval = 0;
14683 uint16_t dev_handle;
14684 mptsas_target_t *ptgt = NULL;
14685 mptsas_smp_t smp_node;
14686
14687 /*
14688 * Get latest RAID info.
14689 */
14690 (void) mptsas_get_raid_info(mpt);
14691
14692 dev_handle = mpt->m_smp_devhdl;
14693 while (mpt->m_done_traverse_smp == 0) {
14694 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
14785 sp->m_deviceinfo = 0;
14786 }
14787 mpt->m_done_traverse_dev = 0;
14788 mpt->m_done_traverse_smp = 0;
14789 mpt->m_done_traverse_enc = 0;
14790 mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
14791 mptsas_update_hashtab(mpt);
14792 }
14793
14794 static void
14795 mptsas_config_all(dev_info_t *pdip)
14796 {
14797 dev_info_t *smpdip = NULL;
14798 mptsas_t *mpt = DIP2MPT(pdip);
14799 int phymask = 0;
14800 mptsas_phymask_t phy_mask;
14801 mptsas_target_t *ptgt = NULL;
14802 mptsas_smp_t *psmp;
14803
14804 /*
14805 * Get the phymask associated to the iport
14806 */
14807 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14808 "phymask", 0);
14809
14810 /*
14811 * Enumerate RAID volumes here (phymask == 0).
14812 */
14813 if (phymask == 0) {
14814 mptsas_config_all_viport(pdip);
14815 return;
14816 }
14817
14818 mutex_enter(&mpt->m_mutex);
14819
14820 if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp ||
14821 !mpt->m_done_traverse_enc) {
14822 mptsas_update_hashtab(mpt);
14823 }
14824
14825 for (psmp = refhash_first(mpt->m_smp_targets); psmp != NULL;
14826 psmp = refhash_next(mpt->m_smp_targets, psmp)) {
14827 phy_mask = psmp->m_addr.mta_phymask;
14828 if (phy_mask == phymask) {
14829 smpdip = NULL;
14830 mutex_exit(&mpt->m_mutex);
14831 (void) mptsas_online_smp(pdip, psmp, &smpdip);
14832 mutex_enter(&mpt->m_mutex);
14833 }
14880
14881 child = ddi_get_child(pdip);
14882 while (child) {
14883 addr = ddi_get_name_addr(child);
14884 prechild = child;
14885 child = ddi_get_next_sibling(child);
14886
14887 if (addr == NULL) {
14888 continue;
14889 }
14890 if ((cp = strchr(addr, ',')) == NULL) {
14891 continue;
14892 }
14893
14894 s = (uintptr_t)cp - (uintptr_t)addr;
14895
14896 if (strncmp(addr, name, s) != 0) {
14897 continue;
14898 }
14899
14900 tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
14901 NDI_DEVI_REMOVE);
14902 if (tmp_rval != DDI_SUCCESS) {
14903 rval = DDI_FAILURE;
14904 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14905 prechild, MPTSAS_DEV_GONE) !=
14906 DDI_PROP_SUCCESS) {
14907 mptsas_log(mpt, CE_WARN, "mptsas driver "
14908 "unable to create property for "
14909 "SAS %s (MPTSAS_DEV_GONE)", addr);
14910 }
14911 }
14912 }
14913
14914 pip = mdi_get_next_client_path(pdip, NULL);
14915 while (pip) {
14916 addr = MDI_PI(pip)->pi_addr;
14917 savepip = pip;
14918 pip = mdi_get_next_client_path(pdip, pip);
14919 if (addr == NULL) {
14920 continue;
14921 }
14922
14923 if ((cp = strchr(addr, ',')) == NULL) {
14924 continue;
14925 }
14926
14927 s = (uintptr_t)cp - (uintptr_t)addr;
14928
14929 if (strncmp(addr, name, s) != 0) {
14930 continue;
14931 }
14932
14933 (void) mptsas_offline_lun(pdip, NULL, savepip,
14934 NDI_DEVI_REMOVE);
14935 /*
14936 * driver will not invoke mdi_pi_free, so path will not
14937 * be freed forever, return DDI_FAILURE.
14938 */
14939 rval = DDI_FAILURE;
14940 }
14941 return (rval);
14942 }
14943
14944 static int
14945 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
14946 mdi_pathinfo_t *rpip, uint_t flags)
14947 {
14948 int rval = DDI_FAILURE;
14949 char *devname;
14950 dev_info_t *cdip, *parent;
14951
14952 if (rpip != NULL) {
14953 parent = scsi_vhci_dip;
14954 cdip = mdi_pi_get_client(rpip);
14955 } else if (rdip != NULL) {
14956 parent = pdip;
14957 cdip = rdip;
14958 } else {
14959 return (DDI_FAILURE);
14960 }
14961
14962 /*
14963 * Make sure node is attached otherwise
14964 * it won't have related cache nodes to
14965 * clean up. i_ddi_devi_attached is
14966 * similiar to i_ddi_node_state(cdip) >=
14967 * DS_ATTACHED.
14968 */
14969 if (i_ddi_devi_attached(cdip)) {
14970
14971 /* Get full devname */
14972 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
14973 (void) ddi_deviname(cdip, devname);
14974 /* Clean cache */
14975 (void) devfs_clean(parent, devname + 1,
14976 DV_CLEAN_FORCE);
14977 kmem_free(devname, MAXNAMELEN + 1);
14978 }
14979 if (rpip != NULL) {
14980 if (MDI_PI_IS_OFFLINE(rpip)) {
14981 rval = DDI_SUCCESS;
14982 } else {
14983 rval = mdi_pi_offline(rpip, 0);
14984 }
14985 } else {
14986 rval = ndi_devi_offline(cdip, flags);
14987 }
14988
14989 return (rval);
14990 }
14991
14992 static dev_info_t *
14993 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
14994 {
14995 dev_info_t *child = NULL;
14996 char *smp_wwn = NULL;
14997
14998 child = ddi_get_child(parent);
14999 while (child) {
15000 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
15001 DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
15002 != DDI_SUCCESS) {
15003 child = ddi_get_next_sibling(child);
15004 continue;
15005 }
15006
15007 if (strcmp(smp_wwn, str_wwn) == 0) {
15008 ddi_prop_free(smp_wwn);
15009 break;
15010 }
15011 child = ddi_get_next_sibling(child);
15012 ddi_prop_free(smp_wwn);
15013 }
15014 return (child);
15015 }
15016
15017 static int
15018 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
15019 {
15020 int rval = DDI_FAILURE;
15021 char *devname;
15022 char wwn_str[MPTSAS_WWN_STRLEN];
15023 dev_info_t *cdip;
15024
15025 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
15026
15027 cdip = mptsas_find_smp_child(pdip, wwn_str);
15028
15029 if (cdip == NULL)
15030 return (DDI_SUCCESS);
15031
15032 /*
15033 * Make sure node is attached otherwise
15034 * it won't have related cache nodes to
15035 * clean up. i_ddi_devi_attached is
15036 * similiar to i_ddi_node_state(cdip) >=
15037 * DS_ATTACHED.
15038 */
15039 if (i_ddi_devi_attached(cdip)) {
15040
15041 /* Get full devname */
15042 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
15043 (void) ddi_deviname(cdip, devname);
15044 /* Clean cache */
15045 (void) devfs_clean(pdip, devname + 1,
15046 DV_CLEAN_FORCE);
15047 kmem_free(devname, MAXNAMELEN + 1);
15048 }
15049
15050 rval = ndi_devi_offline(cdip, flags);
15051
15052 return (rval);
15053 }
15054
15055 static dev_info_t *
15056 mptsas_find_child(dev_info_t *pdip, char *name)
15057 {
15058 dev_info_t *child = NULL;
15059 char *rname = NULL;
15060 int rval = DDI_FAILURE;
15061
15062 rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15063
15064 child = ddi_get_child(pdip);
15065 while (child) {
15066 rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
15067 if (rval != DDI_SUCCESS) {
15068 child = ddi_get_next_sibling(child);
15069 bzero(rname, SCSI_MAXNAMELEN);
15070 continue;
15071 }
15166 */
15167 if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
15168 sd_inq->inq_dtype == DTYPE_OPTICAL ||
15169 sd_inq->inq_dtype == DTYPE_ESI))
15170 goto create_lun;
15171
15172 /*
15173 * The LCA returns good SCSI status, but corrupt page 83 data the first
15174 * time it is queried. The solution is to keep trying to request page83
15175 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
15176 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
15177 * give up to get VPD page at this stage and fail the enumeration.
15178 */
15179
15180 inq83 = kmem_zalloc(inq83_len1, KM_SLEEP);
15181
15182 for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
15183 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
15184 inq83_len1, &inq83_len, 1);
15185 if (rval != 0) {
15186 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
15187 "0x83 for target:%x, lun:%x failed!", target, lun);
15188 if (mptsas_physical_bind_failed_page_83 != B_FALSE)
15189 goto create_lun;
15190 goto out;
15191 }
15192 /*
15193 * create DEVID from inquiry data
15194 */
15195 if ((rval = ddi_devid_scsi_encode(
15196 DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
15197 sizeof (struct scsi_inquiry), NULL, 0, inq83,
15198 (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
15199 /*
15200 * extract GUID from DEVID
15201 */
15202 guid = ddi_devid_to_guid(devid);
15203
15204 /*
15205 * Do not enable MPXIO if the strlen(guid) is greater
15206 * than MPTSAS_MAX_GUID_LEN, this constrain would be
15207 * handled by framework later.
15208 */
15209 if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
15210 ddi_devid_free_guid(guid);
15211 guid = NULL;
15212 if (mpt->m_mpxio_enable == TRUE) {
15213 mptsas_log(mpt, CE_NOTE, "!Target:%x, "
15214 "lun:%x doesn't have a valid GUID, "
15215 "multipathing for this drive is "
15216 "not enabled", target, lun);
15217 }
15218 }
15219
15220 /*
15221 * devid no longer needed
15222 */
15223 ddi_devid_free(devid);
15224 break;
15225 } else if (rval == DDI_NOT_WELL_FORMED) {
15226 /*
15227 * return value of ddi_devid_scsi_encode equal to
15228 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
15229 * to retry inquiry page 0x83 and get GUID.
15230 */
15231 NDBG20(("Not well formed devid, retry..."));
15232 delay(1 * drv_usectohz(1000000));
15233 continue;
15234 } else {
15235 mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
15236 "path target:%x, lun:%x", target, lun);
15237 rval = DDI_FAILURE;
15238 goto create_lun;
15239 }
15240 }
15241
15242 if (i == mptsas_inq83_retry_timeout) {
15243 mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
15244 "for path target:%x, lun:%x", target, lun);
15245 }
15246
15247 rval = DDI_FAILURE;
15248
15249 create_lun:
15250 if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
15251 rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
15252 ptgt, lun);
15253 }
15254 if (rval != DDI_SUCCESS) {
15255 rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
15256 ptgt, lun);
15257
15258 }
15259 out:
15260 if (guid != NULL) {
15261 /*
15262 * guid no longer needed
15263 */
15311 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
15312 } else {
15313 *pip = mptsas_find_path_phy(pdip, phy);
15314 }
15315
15316 if (*pip != NULL) {
15317 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
15318 ASSERT(*lun_dip != NULL);
15319 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
15320 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
15321 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
15322 if (strncmp(guid, old_guid, strlen(guid)) == 0) {
15323 /*
15324 * Same path back online again.
15325 */
15326 (void) ddi_prop_free(old_guid);
15327 if ((!MDI_PI_IS_ONLINE(*pip)) &&
15328 (!MDI_PI_IS_STANDBY(*pip)) &&
15329 (ptgt->m_tgt_unconfigured == 0)) {
15330 rval = mdi_pi_online(*pip, 0);
15331 mutex_enter(&mpt->m_mutex);
15332 ptgt->m_led_status = 0;
15333 (void) mptsas_flush_led_status(mpt,
15334 ptgt);
15335 mutex_exit(&mpt->m_mutex);
15336 } else {
15337 rval = DDI_SUCCESS;
15338 }
15339 if (rval != DDI_SUCCESS) {
15340 mptsas_log(mpt, CE_WARN, "path:target: "
15341 "%x, lun:%x online failed!", target,
15342 lun);
15343 *pip = NULL;
15344 *lun_dip = NULL;
15345 }
15346 return (rval);
15347 } else {
15348 /*
15349 * The GUID of the LUN has changed which maybe
15350 * because customer mapped another volume to the
15351 * same LUN.
15352 */
15353 mptsas_log(mpt, CE_WARN, "The GUID of the "
15354 "target:%x, lun:%x was changed, maybe "
15355 "because someone mapped another volume "
15356 "to the same LUN", target, lun);
15357 (void) ddi_prop_free(old_guid);
15358 if (!MDI_PI_IS_OFFLINE(*pip)) {
15359 rval = mdi_pi_offline(*pip, 0);
15360 if (rval != MDI_SUCCESS) {
15361 mptsas_log(mpt, CE_WARN, "path:"
15362 "target:%x, lun:%x offline "
15363 "failed!", target, lun);
15364 *pip = NULL;
15365 *lun_dip = NULL;
15366 return (DDI_FAILURE);
15367 }
15368 }
15369 if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
15370 mptsas_log(mpt, CE_WARN, "path:target:"
15371 "%x, lun:%x free failed!", target,
15372 lun);
15373 *pip = NULL;
15374 *lun_dip = NULL;
15375 return (DDI_FAILURE);
15376 }
15377 }
15378 } else {
15379 mptsas_log(mpt, CE_WARN, "Can't get client-guid "
15380 "property for path:target:%x, lun:%x", target, lun);
15381 *pip = NULL;
15382 *lun_dip = NULL;
15383 return (DDI_FAILURE);
15384 }
15385 }
15386 scsi_hba_nodename_compatible_get(inq, NULL,
15387 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
15388
15389 /*
15590 mptsas_log(mpt, CE_WARN, "mptsas driver"
15591 "failed to create pm-capable "
15592 "property, target %d", target);
15593 mdi_rtn = MDI_FAILURE;
15594 goto virt_create_done;
15595 }
15596 }
15597 /*
15598 * Create the phy-num property
15599 */
15600 if (mdi_prop_update_int(*pip, "phy-num",
15601 ptgt->m_phynum) != DDI_SUCCESS) {
15602 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15603 "create phy-num property for target %d lun %d",
15604 target, lun);
15605 mdi_rtn = MDI_FAILURE;
15606 goto virt_create_done;
15607 }
15608 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
15609 mdi_rtn = mdi_pi_online(*pip, 0);
15610 if (mdi_rtn == MDI_SUCCESS) {
15611 mutex_enter(&mpt->m_mutex);
15612 ptgt->m_led_status = 0;
15613 (void) mptsas_flush_led_status(mpt, ptgt);
15614 mutex_exit(&mpt->m_mutex);
15615 }
15616 if (mdi_rtn == MDI_NOT_SUPPORTED) {
15617 mdi_rtn = MDI_FAILURE;
15618 }
15619 virt_create_done:
15620 if (*pip && mdi_rtn != MDI_SUCCESS) {
15621 (void) mdi_pi_free(*pip, 0);
15622 *pip = NULL;
15623 *lun_dip = NULL;
15624 }
15625 }
15626
15627 scsi_hba_nodename_compatible_free(nodename, compatible);
15628 if (lun_addr != NULL) {
15629 kmem_free(lun_addr, SCSI_MAXNAMELEN);
15630 }
15631 if (wwn_str != NULL) {
15632 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
15633 }
15634 if (component != NULL) {
15635 kmem_free(component, MAXPATHLEN);
15636 }
15637
15638 return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15639 }
15640
15641 static int
15968 if (ndi_prop_update_int(DDI_DEV_T_NONE,
15969 *lun_dip, "phy-num", ptgt->m_phynum) !=
15970 DDI_PROP_SUCCESS) {
15971 mptsas_log(mpt, CE_WARN, "mptsas driver "
15972 "failed to create phy-num property for "
15973 "target %d", target);
15974 ndi_rtn = NDI_FAILURE;
15975 goto phys_create_done;
15976 }
15977 }
15978 phys_create_done:
15979 /*
15980 * If props were setup ok, online the lun
15981 */
15982 if (ndi_rtn == NDI_SUCCESS) {
15983 /*
15984 * Try to online the new node
15985 */
15986 ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
15987 }
15988 if (ndi_rtn == NDI_SUCCESS) {
15989 mutex_enter(&mpt->m_mutex);
15990 ptgt->m_led_status = 0;
15991 (void) mptsas_flush_led_status(mpt, ptgt);
15992 mutex_exit(&mpt->m_mutex);
15993 }
15994
15995 /*
15996 * If success set rtn flag, else unwire alloc'd lun
15997 */
15998 if (ndi_rtn != NDI_SUCCESS) {
15999 NDBG12(("mptsas driver unable to online "
16000 "target %d lun %d", target, lun));
16001 ndi_prop_remove_all(*lun_dip);
16002 (void) ndi_devi_free(*lun_dip);
16003 *lun_dip = NULL;
16004 }
16005 }
16006
16007 scsi_hba_nodename_compatible_free(nodename, compatible);
16008
16009 if (wwn_str != NULL) {
16010 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
16011 }
16012 if (component != NULL) {
16013 kmem_free(component, MAXPATHLEN);
16025
16026 /* XXX An HBA driver should not be allocating an smp_device. */
16027 bzero(&smp_sd, sizeof (struct smp_device));
16028 smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
16029 bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
16030
16031 if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
16032 return (NDI_FAILURE);
16033 return (NDI_SUCCESS);
16034 }
16035
16036 static int
16037 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
16038 {
16039 mptsas_t *mpt = DIP2MPT(pdip);
16040 mptsas_smp_t *psmp = NULL;
16041 int rval;
16042 int phymask;
16043
16044 /*
16045 * Get the physical port associated to the iport
16046 * PHYMASK TODO
16047 */
16048 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
16049 "phymask", 0);
16050 /*
16051 * Find the smp node in hash table with specified sas address and
16052 * physical port
16053 */
16054 psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
16055 if (psmp == NULL) {
16056 return (DDI_FAILURE);
16057 }
16058
16059 rval = mptsas_online_smp(pdip, psmp, smp_dip);
16060
16061 return (rval);
16062 }
16063
16064 static int
16065 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
16066 dev_info_t **smp_dip)
16067 {
16660 */
16661 if (ret_data != NULL) {
16662 mptsas_smp_target_copy(data, ret_data);
16663 return (ret_data);
16664 }
16665
16666 ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
16667 bcopy(data, ret_data, sizeof (mptsas_smp_t));
16668 refhash_insert(mpt->m_smp_targets, ret_data);
16669 return (ret_data);
16670 }
16671
16672 /*
16673 * Functions for SGPIO LED support
16674 */
16675 static dev_info_t *
16676 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
16677 {
16678 dev_info_t *dip;
16679 int prop;
16680 dip = e_ddi_hold_devi_by_dev(dev, 0);
16681 if (dip == NULL)
16682 return (dip);
16683 prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
16684 "phymask", 0);
16685 *phymask = (mptsas_phymask_t)prop;
16686 ddi_release_devi(dip);
16687 return (dip);
16688 }
16689 static mptsas_target_t *
16690 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
16691 {
16692 uint8_t phynum;
16693 uint64_t wwn;
16694 int lun;
16695 mptsas_target_t *ptgt = NULL;
16696
16697 if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
16698 return (NULL);
16699 }
16700 if (addr[0] == 'w') {
16701 ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
16702 } else {
16703 ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
16704 }
16705 return (ptgt);
16706 }
16707
16708 static int
16709 mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt)
16710 {
16711 uint32_t slotstatus = 0;
16712
16713 /* Build an MPI2 Slot Status based on our view of the world */
16714 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_IDENT - 1)))
16715 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST;
16716 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_FAIL - 1)))
16717 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT;
16718 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1)))
16719 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
16720
16721 /* Write it to the controller */
16722 NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
16723 slotstatus, ptgt->m_slot_num));
16724 return (mptsas_send_sep(mpt, ptgt, &slotstatus,
16725 MPI2_SEP_REQ_ACTION_WRITE_STATUS));
16726 }
16727
16728 /*
16729 * send sep request, use enclosure/slot addressing
16730 */
16731 static int
16732 mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
16733 uint32_t *status, uint8_t act)
16734 {
16735 Mpi2SepRequest_t req;
16736 Mpi2SepReply_t rep;
16737 int ret;
16738 mptsas_enclosure_t *mep;
16739 uint16_t enctype;
16740
16741 ASSERT(mutex_owned(&mpt->m_mutex));
16742
16743 /*
16744 * We only support SEP control of directly-attached targets, in which
16745 * case the "SEP" we're talking to is a virtual one contained within
16746 * the HBA itself. This is necessary because DA targets typically have
16747 * no other mechanism for LED control. Targets for which a separate
16748 * enclosure service processor exists should be controlled via ses(7d)
16749 * or sgen(7d). Furthermore, since such requests can time out, they
16750 * should be made in user context rather than in response to
16751 * asynchronous fabric changes.
16752 *
16753 * In addition, we do not support this operation for RAID volumes,
16754 * since there is no slot associated with them.
16755 */
16756 if (!(ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED) ||
16757 ptgt->m_addr.mta_phymask == 0) {
16758 return (ENOTTY);
16759 }
16760
16761 /*
16762 * Look through the enclosures and make sure that this enclosure is
16763 * something that is directly attached device. If we didn't find an
16764 * enclosure for this device, don't send the ioctl.
16765 */
16766 mep = mptsas_enc_lookup(mpt, ptgt->m_enclosure);
16767 if (mep == NULL)
16768 return (ENOTTY);
16769 enctype = mep->me_flags & MPI2_SAS_ENCLS0_FLAGS_MNG_MASK;
16770 if (enctype != MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES &&
16771 enctype != MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO &&
16772 enctype != MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO) {
16773 return (ENOTTY);
16774 }
16775
16776 bzero(&req, sizeof (req));
16777 bzero(&rep, sizeof (rep));
16778
16779 req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
16780 req.Action = act;
16781 req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
16782 req.EnclosureHandle = LE_16(ptgt->m_enclosure);
16783 req.Slot = LE_16(ptgt->m_slot_num);
16784 if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16785 req.SlotStatus = LE_32(*status);
16786 }
16787 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
16788 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
16789 if (ret != 0) {
16790 mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
16791 "Processor Request message error %d", ret);
16792 return (ret);
16793 }
16794 /* do passthrough success, check the ioc status */
16795 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
16796 mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
16797 "status:%x loginfo %x", act, LE_16(rep.IOCStatus),
16798 LE_32(rep.IOCLogInfo));
16799 switch (LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) {
16800 case MPI2_IOCSTATUS_INVALID_FUNCTION:
16801 case MPI2_IOCSTATUS_INVALID_VPID:
16802 case MPI2_IOCSTATUS_INVALID_FIELD:
16803 case MPI2_IOCSTATUS_INVALID_STATE:
|
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2019 Nexenta Systems, Inc.
25 * Copyright (c) 2017, Joyent, Inc.
26 * Copyright 2014 OmniTI Computer Consulting, Inc. All rights reserved.
27 * Copyright (c) 2014, Tegile Systems Inc. All rights reserved.
28 */
29
30 /*
31 * Copyright (c) 2000 to 2010, LSI Corporation.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms of all code within
35 * this file that is exclusively owned by LSI, with or without
36 * modification, is permitted provided that, in addition to the CDDL 1.0
37 * License requirements, the following conditions are met:
38 *
39 * Neither the name of the author nor the names of its contributors may be
40 * used to endorse or promote products derived from this software without
41 * specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
62 #if defined(lint) || defined(DEBUG)
63 #define MPTSAS_DEBUG
64 #endif
65
66 /*
67 * standard header files.
68 */
69 #include <sys/note.h>
70 #include <sys/scsi/scsi.h>
71 #include <sys/pci.h>
72 #include <sys/file.h>
73 #include <sys/policy.h>
74 #include <sys/model.h>
75 #include <sys/sysevent.h>
76 #include <sys/sysevent/eventdefs.h>
77 #include <sys/sysevent/dr.h>
78 #include <sys/sata/sata_defs.h>
79 #include <sys/sata/sata_hba.h>
80 #include <sys/scsi/generic/sas.h>
81 #include <sys/scsi/impl/scsi_sas.h>
82 #include <sys/sdt.h>
83 #include <sys/mdi_impldefs.h>
84
85 #pragma pack(1)
86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
87 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
88 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
89 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
90 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
91 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
92 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
93 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
94 #pragma pack()
95
96 /*
97 * private header files.
98 *
99 */
100 #include <sys/scsi/impl/scsi_reset_notify.h>
101 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
102 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
103 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
104 #include <sys/scsi/adapters/mpt_sas/mptsas_hash.h>
105 #include <sys/raidioctl.h>
106
107 /*
108 * FMA header files
109 */
110 #include <sys/ddifm.h>
111 #include <sys/fm/protocol.h>
112 #include <sys/fm/util.h>
113 #include <sys/fm/io/ddi.h>
114
115 /*
116 * autoconfiguration data and routines.
117 */
118 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
119 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
120 static int mptsas_power(dev_info_t *dip, int component, int level);
121
122 /*
123 * cb_ops function
124 */
125 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
126 cred_t *credp, int *rval);
340 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
341
342 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
343 int *lun);
344 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
345
346 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt,
347 mptsas_phymask_t phymask, uint8_t phy);
348 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt,
349 mptsas_phymask_t phymask, uint64_t wwid);
350 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt,
351 mptsas_phymask_t phymask, uint64_t wwid);
352
353 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
354 uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
355
356 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
357 uint16_t *handle, mptsas_target_t **pptgt);
358 static void mptsas_update_phymask(mptsas_t *mpt);
359
360 static int mptsas_flush_led_status(mptsas_t *mpt, mptsas_enclosure_t *mep,
361 uint16_t idx);
362 static int mptsas_send_sep(mptsas_t *mpt, mptsas_enclosure_t *mep, uint16_t idx,
363 uint32_t *status, uint8_t cmd);
364 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
365 mptsas_phymask_t *phymask);
366 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
367 mptsas_phymask_t phymask);
368
369
370 /*
371 * Enumeration / DR functions
372 */
373 static void mptsas_config_all(dev_info_t *pdip);
374 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
375 dev_info_t **lundip);
376 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
377 dev_info_t **lundip);
378
379 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
380 static int mptsas_offline_target(dev_info_t *pdip, char *name);
381
382 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
383 dev_info_t **dip);
384
385 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
386 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
387 dev_info_t **dip, mptsas_target_t *ptgt);
388
389 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
390 dev_info_t **dip, mptsas_target_t *ptgt, int lun);
391
392 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
393 char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
394 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
395 char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
396 int lun);
397
398 static void mptsas_offline_missed_luns(dev_info_t *pdip,
399 uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
400 static int mptsas_offline_lun(dev_info_t *rdip, mdi_pathinfo_t *rpip);
401
402 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
403 dev_info_t **smp_dip);
404 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node);
405
406 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
407 int mode, int *rval);
408 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
409 int mode, int *rval);
410 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
411 int mode, int *rval);
412 static void mptsas_record_event(void *args);
413 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
414 int mode);
415
416 mptsas_target_t *mptsas_tgt_alloc(refhash_t *, uint16_t, uint64_t,
417 uint32_t, mptsas_phymask_t, uint8_t);
418 static mptsas_smp_t *mptsas_smp_alloc(mptsas_t *, mptsas_smp_t *);
419 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
420 dev_info_t **smp_dip);
421
422 /*
423 * Power management functions
424 */
451 static void mptsas_fm_init(mptsas_t *mpt);
452 static void mptsas_fm_fini(mptsas_t *mpt);
453 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
454
455 extern pri_t minclsyspri, maxclsyspri;
456
457 /*
458 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
459 * under this device that the paths to a physical device are created when
460 * MPxIO is used.
461 */
462 extern dev_info_t *scsi_vhci_dip;
463
464 /*
465 * Tunable timeout value for Inquiry VPD page 0x83
466 * By default the value is 30 seconds.
467 */
468 int mptsas_inq83_retry_timeout = 30;
469
470 /*
471 * Tunable for default SCSI pkt timeout. Defaults to 5 seconds, which should
472 * be plenty for INQUIRY and REPORT_LUNS, which are the only commands currently
473 * issued by mptsas directly.
474 */
475 int mptsas_scsi_pkt_time = 5;
476
477 /*
478 * This is used to allocate memory for message frame storage, not for
479 * data I/O DMA. All message frames must be stored in the first 4G of
480 * physical memory.
481 */
482 ddi_dma_attr_t mptsas_dma_attrs = {
483 DMA_ATTR_V0, /* attribute layout version */
484 0x0ull, /* address low - should be 0 (longlong) */
485 0xffffffffull, /* address high - 32-bit max range */
486 0x00ffffffull, /* count max - max DMA object size */
487 4, /* allocation alignment requirements */
488 0x78, /* burstsizes - binary encoded values */
489 1, /* minxfer - gran. of DMA engine */
490 0x00ffffffull, /* maxxfer - gran. of DMA engine */
491 0xffffffffull, /* max segment size (DMA boundary) */
492 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
493 512, /* granularity - device transfer size */
494 0 /* flags, set to 0 */
495 };
496
497 /*
721 int
722 _info(struct modinfo *modinfop)
723 {
724 /* CONSTCOND */
725 ASSERT(NO_COMPETING_THREADS);
726 NDBG0(("mptsas _info"));
727
728 return (mod_info(&modlinkage, modinfop));
729 }
730
731 static int
732 mptsas_target_eval_devhdl(const void *op, void *arg)
733 {
734 uint16_t dh = *(uint16_t *)arg;
735 const mptsas_target_t *tp = op;
736
737 return ((int)tp->m_devhdl - (int)dh);
738 }
739
740 static int
741 mptsas_target_eval_nowwn(const void *op, void *arg)
742 {
743 uint8_t phy = *(uint8_t *)arg;
744 const mptsas_target_t *tp = op;
745
746 if (tp->m_addr.mta_wwn != 0)
747 return (-1);
748
749 return ((int)tp->m_phynum - (int)phy);
750 }
751
752 static int
753 mptsas_smp_eval_devhdl(const void *op, void *arg)
754 {
755 uint16_t dh = *(uint16_t *)arg;
756 const mptsas_smp_t *sp = op;
757
758 return ((int)sp->m_devhdl - (int)dh);
759 }
760
1181 mutex_exit(&mptsas_global_mutex);
1182
1183 /* report idle status to pm framework */
1184 if (mpt->m_options & MPTSAS_OPT_PM) {
1185 (void) pm_idle_component(dip, 0);
1186 }
1187
1188 return (DDI_SUCCESS);
1189
1190 default:
1191 return (DDI_FAILURE);
1192
1193 }
1194
1195 instance = ddi_get_instance(dip);
1196
1197 /*
1198 * Allocate softc information.
1199 */
1200 if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1201 mptsas_log(NULL, CE_WARN, "cannot allocate soft state");
1202 goto fail;
1203 }
1204
1205 mpt = ddi_get_soft_state(mptsas_state, instance);
1206
1207 if (mpt == NULL) {
1208 mptsas_log(NULL, CE_WARN, "cannot get soft state");
1209 goto fail;
1210 }
1211
1212 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1213 scsi_size_clean(dip);
1214
1215 mpt->m_dip = dip;
1216 mpt->m_instance = instance;
1217
1218 /* Make a per-instance copy of the structures */
1219 mpt->m_io_dma_attr = mptsas_dma_attrs64;
1220 if (mptsas_use_64bit_msgaddr) {
1221 mpt->m_msg_dma_attr = mptsas_dma_attrs64;
1222 } else {
1223 mpt->m_msg_dma_attr = mptsas_dma_attrs;
1224 }
1225 mpt->m_reg_acc_attr = mptsas_dev_attr;
1226 mpt->m_dev_acc_attr = mptsas_dev_attr;
1227
1228 /*
1273 event_taskq_create++;
1274
1275 /*
1276 * A taskq is created for dealing with dr events
1277 */
1278 if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1279 "mptsas_dr_taskq",
1280 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1281 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1282 "failed");
1283 goto fail;
1284 }
1285 dr_taskq_create++;
1286
1287 mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1288 0, "mptsas_doneq_thread_threshold_prop", 10);
1289 mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1290 0, "mptsas_doneq_length_threshold_prop", 8);
1291 mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1292 0, "mptsas_doneq_thread_n_prop", 8);
1293 mpt->m_max_tune_throttle = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1294 0, "mptsas_max_throttle", MAX_THROTTLE);
1295
1296 /*
1297 * Error check to make sure value is withing range. If nothing
1298 * is set default to original design value.
1299 */
1300 if (mpt->m_max_tune_throttle < THROTTLE_LO) {
1301 mpt->m_max_tune_throttle = MAX_THROTTLE;
1302 } else if (mpt->m_max_tune_throttle > THROTTLE_HI) {
1303 mpt->m_max_tune_throttle = THROTTLE_HI;
1304 }
1305
1306 if (mpt->m_doneq_thread_n) {
1307 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1308 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1309
1310 mutex_enter(&mpt->m_doneq_mutex);
1311 mpt->m_doneq_thread_id =
1312 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1313 * mpt->m_doneq_thread_n, KM_SLEEP);
1314
1315 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1316 cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1317 CV_DRIVER, NULL);
1318 mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1319 MUTEX_DRIVER, NULL);
1320 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1321 mpt->m_doneq_thread_id[j].flag |=
1322 MPTSAS_DONEQ_THREAD_ACTIVE;
1323 mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1324 mpt->m_doneq_thread_id[j].arg.t = j;
1325 mpt->m_doneq_thread_id[j].threadp =
1343 goto fail;
1344 intr_added++;
1345
1346 /* Initialize mutex used in interrupt handler */
1347 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1348 DDI_INTR_PRI(mpt->m_intr_pri));
1349 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1350 mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1351 DDI_INTR_PRI(mpt->m_intr_pri));
1352 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1353 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1354 NULL, MUTEX_DRIVER,
1355 DDI_INTR_PRI(mpt->m_intr_pri));
1356 }
1357
1358 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1359 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1360 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1361 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1362 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1363 mutex_init_done++;
1364
1365 #ifdef MPTSAS_FAULTINJECTION
1366 TAILQ_INIT(&mpt->m_fminj_cmdq);
1367 #endif
1368
1369 mutex_enter(&mpt->m_mutex);
1370 /*
1371 * Initialize power management component
1372 */
1373 if (mpt->m_options & MPTSAS_OPT_PM) {
1374 if (mptsas_init_pm(mpt)) {
1375 mutex_exit(&mpt->m_mutex);
1376 mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1377 "failed");
1378 goto fail;
1379 }
1380 }
1381
1382 /*
1383 * Initialize chip using Message Unit Reset, if allowed
1384 */
1385 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1386 if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1387 mutex_exit(&mpt->m_mutex);
1388 mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1439 if (mptsas_hba_setup(mpt) == FALSE)
1440 goto fail;
1441 hba_attach_setup++;
1442
1443 if (mptsas_smp_setup(mpt) == FALSE)
1444 goto fail;
1445 smp_attach_setup++;
1446
1447 if (mptsas_enc_setup(mpt) == FALSE)
1448 goto fail;
1449 enc_attach_setup++;
1450
1451 if (mptsas_cache_create(mpt) == FALSE)
1452 goto fail;
1453
1454 mpt->m_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1455 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1456 if (mpt->m_scsi_reset_delay == 0) {
1457 mptsas_log(mpt, CE_NOTE,
1458 "scsi_reset_delay of 0 is not recommended,"
1459 " resetting to SCSI_DEFAULT_RESET_DELAY");
1460 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1461 }
1462
1463 /*
1464 * Initialize the wait and done FIFO queue
1465 */
1466 mpt->m_donetail = &mpt->m_doneq;
1467 mpt->m_waitqtail = &mpt->m_waitq;
1468 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1469 mpt->m_tx_draining = 0;
1470
1471 /*
1472 * ioc cmd queue initialize
1473 */
1474 mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1475 mpt->m_dev_handle = 0xFFFF;
1476
1477 MPTSAS_ENABLE_INTR(mpt);
1478
1479 /*
1639 }
1640 if (event_taskq_create) {
1641 ddi_taskq_destroy(mpt->m_event_taskq);
1642 }
1643 if (dr_taskq_create) {
1644 ddi_taskq_destroy(mpt->m_dr_taskq);
1645 }
1646 if (mutex_init_done) {
1647 mutex_destroy(&mpt->m_tx_waitq_mutex);
1648 mutex_destroy(&mpt->m_passthru_mutex);
1649 mutex_destroy(&mpt->m_mutex);
1650 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1651 mutex_destroy(
1652 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1653 }
1654 cv_destroy(&mpt->m_cv);
1655 cv_destroy(&mpt->m_passthru_cv);
1656 cv_destroy(&mpt->m_fw_cv);
1657 cv_destroy(&mpt->m_config_cv);
1658 cv_destroy(&mpt->m_fw_diag_cv);
1659 }
1660
1661 if (map_setup) {
1662 mptsas_cfg_fini(mpt);
1663 }
1664 if (config_setup) {
1665 mptsas_config_space_fini(mpt);
1666 }
1667 mptsas_free_handshake_msg(mpt);
1668 mptsas_hba_fini(mpt);
1669
1670 mptsas_fm_fini(mpt);
1671 ddi_soft_state_free(mptsas_state, instance);
1672 ddi_prop_remove_all(dip);
1673 }
1674 return (DDI_FAILURE);
1675 }
1676
1677 static int
1678 mptsas_suspend(dev_info_t *devi)
1886
1887 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1888
1889 if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1890 return (DDI_FAILURE);
1891
1892 mpt = TRAN2MPT(tran);
1893 if (!mpt) {
1894 return (DDI_FAILURE);
1895 }
1896 /*
1897 * Still have pathinfo child, should not detach mpt driver
1898 */
1899 if (scsi_hba_iport_unit_address(dip)) {
1900 if (mpt->m_mpxio_enable) {
1901 /*
1902 * MPxIO enabled for the iport
1903 */
1904 ndi_devi_enter(scsi_vhci_dip, &circ1);
1905 ndi_devi_enter(dip, &circ);
1906 while ((pip = mdi_get_next_client_path(dip, NULL)) !=
1907 NULL) {
1908 if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1909 continue;
1910 }
1911 ndi_devi_exit(dip, circ);
1912 ndi_devi_exit(scsi_vhci_dip, circ1);
1913 NDBG12(("detach failed because of "
1914 "outstanding path info"));
1915 return (DDI_FAILURE);
1916 }
1917 ndi_devi_exit(dip, circ);
1918 ndi_devi_exit(scsi_vhci_dip, circ1);
1919 (void) mdi_phci_unregister(dip, 0);
1920 }
1921
1922 ddi_prop_remove_all(dip);
1923
1924 return (DDI_SUCCESS);
1925 }
1926
1927 /* Make sure power level is D0 before accessing registers */
1928 if (mpt->m_options & MPTSAS_OPT_PM) {
1929 (void) pm_busy_component(dip, 0);
1930 if (mpt->m_power_level != PM_LEVEL_D0) {
1931 if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1932 DDI_SUCCESS) {
1933 mptsas_log(mpt, CE_WARN,
1934 "raise power request failed");
1935 (void) pm_idle_component(dip, 0);
1936 return (DDI_FAILURE);
1937 }
1938 }
1939 }
1940
1941 /*
1942 * Send RAID action system shutdown to sync IR. After action, send a
1943 * Message Unit Reset. Since after that DMA resource will be freed,
1944 * set ioc to READY state will avoid HBA initiated DMA operation.
1945 */
1946 mutex_enter(&mpt->m_mutex);
1947 MPTSAS_DISABLE_INTR(mpt);
1948 mptsas_raid_action_system_shutdown(mpt);
1949 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1950 (void) mptsas_ioc_reset(mpt, FALSE);
1951 mutex_exit(&mpt->m_mutex);
1952 mptsas_rem_intrs(mpt);
1953 ddi_taskq_destroy(mpt->m_event_taskq);
1954 ddi_taskq_destroy(mpt->m_dr_taskq);
2040
2041 mptsas_destroy_hashes(mpt);
2042
2043 /*
2044 * Delete nt_active.
2045 */
2046 mutex_enter(&mpt->m_mutex);
2047 mptsas_free_active_slots(mpt);
2048 mutex_exit(&mpt->m_mutex);
2049
2050 /* deallocate everything that was allocated in mptsas_attach */
2051 mptsas_cache_destroy(mpt);
2052
2053 mptsas_hba_fini(mpt);
2054 mptsas_cfg_fini(mpt);
2055
2056 /* Lower the power informing PM Framework */
2057 if (mpt->m_options & MPTSAS_OPT_PM) {
2058 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
2059 mptsas_log(mpt, CE_WARN,
2060 "lower power request failed during detach, "
2061 "ignoring");
2062 }
2063
2064 mutex_destroy(&mpt->m_tx_waitq_mutex);
2065 mutex_destroy(&mpt->m_passthru_mutex);
2066 mutex_destroy(&mpt->m_mutex);
2067 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
2068 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
2069 }
2070 cv_destroy(&mpt->m_cv);
2071 cv_destroy(&mpt->m_passthru_cv);
2072 cv_destroy(&mpt->m_fw_cv);
2073 cv_destroy(&mpt->m_config_cv);
2074 cv_destroy(&mpt->m_fw_diag_cv);
2075
2076 #ifdef MPTSAS_FAULTINJECTION
2077 ASSERT(TAILQ_EMPTY(&mpt->m_fminj_cmdq));
2078 #endif
2079
2080 mptsas_smp_teardown(mpt);
2081 mptsas_enc_teardown(mpt);
2082 mptsas_hba_teardown(mpt);
2083
2084 mptsas_config_space_fini(mpt);
2085
2086 mptsas_free_handshake_msg(mpt);
2087
2088 mptsas_fm_fini(mpt);
2089 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
2090 ddi_prop_remove_all(dip);
2091
2092 return (DDI_SUCCESS);
2093 }
2094
2095 static void
2096 mptsas_list_add(mptsas_t *mpt)
2097 {
2098 rw_enter(&mptsas_global_rwlock, RW_WRITER);
2099
2320 static void
2321 mptsas_smp_teardown(mptsas_t *mpt)
2322 {
2323 (void) smp_hba_detach(mpt->m_dip);
2324 if (mpt->m_smptran != NULL) {
2325 smp_hba_tran_free(mpt->m_smptran);
2326 mpt->m_smptran = NULL;
2327 }
2328 mpt->m_smp_devhdl = 0;
2329 }
2330
2331 static int
2332 mptsas_enc_setup(mptsas_t *mpt)
2333 {
2334 list_create(&mpt->m_enclosures, sizeof (mptsas_enclosure_t),
2335 offsetof(mptsas_enclosure_t, me_link));
2336 return (TRUE);
2337 }
2338
2339 static void
2340 mptsas_enc_free(mptsas_enclosure_t *mep)
2341 {
2342 if (mep == NULL)
2343 return;
2344 if (mep->me_slotleds != NULL) {
2345 VERIFY3U(mep->me_nslots, >, 0);
2346 kmem_free(mep->me_slotleds, sizeof (uint8_t) * mep->me_nslots);
2347 }
2348 kmem_free(mep, sizeof (mptsas_enclosure_t));
2349 }
2350
2351 static void
2352 mptsas_enc_teardown(mptsas_t *mpt)
2353 {
2354 mptsas_enclosure_t *mep;
2355
2356 while ((mep = list_remove_head(&mpt->m_enclosures)) != NULL) {
2357 mptsas_enc_free(mep);
2358 }
2359 list_destroy(&mpt->m_enclosures);
2360 }
2361
2362 static mptsas_enclosure_t *
2363 mptsas_enc_lookup(mptsas_t *mpt, uint16_t hdl)
2364 {
2365 mptsas_enclosure_t *mep;
2366
2367 ASSERT(MUTEX_HELD(&mpt->m_mutex));
2368
2369 for (mep = list_head(&mpt->m_enclosures); mep != NULL;
2370 mep = list_next(&mpt->m_enclosures, mep)) {
2371 if (hdl == mep->me_enchdl) {
2372 return (mep);
2373 }
2374 }
2375
2376 return (NULL);
2377 }
2474 /*
2475 * If IOC is not in operational state, try to hard reset it.
2476 */
2477 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2478 MPI2_IOC_STATE_OPERATIONAL) {
2479 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2480 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2481 mptsas_log(mpt, CE_WARN,
2482 "mptsas_power: hard reset failed");
2483 mutex_exit(&mpt->m_mutex);
2484 return (DDI_FAILURE);
2485 }
2486 }
2487 mpt->m_power_level = PM_LEVEL_D0;
2488 break;
2489 case PM_LEVEL_D3:
2490 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2491 MPTSAS_POWER_OFF(mpt);
2492 break;
2493 default:
2494 mptsas_log(mpt, CE_WARN, "unknown power level <%x>", level);
2495 rval = DDI_FAILURE;
2496 break;
2497 }
2498 mutex_exit(&mpt->m_mutex);
2499 return (rval);
2500 }
2501
2502 /*
2503 * Initialize configuration space and figure out which
2504 * chip and revison of the chip the mpt driver is using.
2505 */
2506 static int
2507 mptsas_config_space_init(mptsas_t *mpt)
2508 {
2509 NDBG0(("mptsas_config_space_init"));
2510
2511 if (mpt->m_config_handle != NULL)
2512 return (TRUE);
2513
2514 if (pci_config_setup(mpt->m_dip,
2703 mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2704 mpt->m_req_frame = memp;
2705
2706 /*
2707 * Clear the request frame pool.
2708 */
2709 bzero(mpt->m_req_frame, mem_size);
2710
2711 return (DDI_SUCCESS);
2712 }
2713
2714 static int
2715 mptsas_alloc_sense_bufs(mptsas_t *mpt)
2716 {
2717 ddi_dma_attr_t sense_dma_attrs;
2718 caddr_t memp;
2719 ddi_dma_cookie_t cookie;
2720 size_t mem_size;
2721 int num_extrqsense_bufs;
2722
2723 /*
2724 * re-alloc when it has already alloced
2725 */
2726 if (mpt->m_dma_req_sense_hdl) {
2727 rmfreemap(mpt->m_erqsense_map);
2728 mptsas_dma_addr_destroy(&mpt->m_dma_req_sense_hdl,
2729 &mpt->m_acc_req_sense_hdl);
2730 }
2731
2732 /*
2733 * The size of the request sense pool is:
2734 * (Number of Request Frames - 2 ) * Request Sense Size +
2735 * extra memory for extended sense requests.
2736 */
2737 mem_size = ((mpt->m_max_requests - 2) * mpt->m_req_sense_size) +
2738 mptsas_extreq_sense_bufsize;
2739
2740 /*
2741 * set the DMA attributes. ARQ buffers
2742 * aligned on a 16-byte boundry.
3045 if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
3046 DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
3047 /*
3048 * Stick in the address of form "pPHY,LUN"
3049 */
3050 reallen = snprintf(name, len, "p%x,%x", phynum, lun);
3051 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
3052 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
3053 == DDI_PROP_SUCCESS) {
3054 /*
3055 * Stick in the address of the form "wWWN,LUN"
3056 */
3057 reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
3058 ddi_prop_free(sas_wwn);
3059 } else {
3060 return (DDI_FAILURE);
3061 }
3062
3063 ASSERT(reallen < len);
3064 if (reallen >= len) {
3065 mptsas_log(0, CE_WARN, "mptsas_get_name: name parameter "
3066 "length too small, it needs to be %d bytes", reallen + 1);
3067 }
3068 return (DDI_SUCCESS);
3069 }
3070
3071 /*
3072 * tran_tgt_init(9E) - target device instance initialization
3073 */
3074 static int
3075 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3076 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3077 {
3078 #ifndef __lock_lint
3079 _NOTE(ARGUNUSED(hba_tran))
3080 #endif
3081
3082 /*
3083 * At this point, the scsi_device structure already exists
3084 * and has been initialized.
3085 *
3092 int lun = sd->sd_address.a_lun;
3093 mdi_pathinfo_t *pip = NULL;
3094 mptsas_tgt_private_t *tgt_private = NULL;
3095 mptsas_target_t *ptgt = NULL;
3096 char *psas_wwn = NULL;
3097 mptsas_phymask_t phymask = 0;
3098 uint64_t sas_wwn = 0;
3099 mptsas_target_addr_t addr;
3100 mpt = SDEV2MPT(sd);
3101
3102 ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
3103
3104 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
3105 (void *)hba_dip, (void *)tgt_dip, lun));
3106
3107 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
3108 (void) ndi_merge_node(tgt_dip, mptsas_name_child);
3109 ddi_set_name_addr(tgt_dip, NULL);
3110 return (DDI_FAILURE);
3111 }
3112
3113 /*
3114 * The phymask exists if the port is active, otherwise
3115 * nothing to do.
3116 */
3117 if (ddi_prop_exists(DDI_DEV_T_ANY, hba_dip,
3118 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "phymask") == 0)
3119 return (DDI_FAILURE);
3120
3121 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
3122 "phymask", 0);
3123
3124 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
3125 if ((pip = (void *)(sd->sd_private)) == NULL) {
3126 /*
3127 * Very bad news if this occurs. Somehow scsi_vhci has
3128 * lost the pathinfo node for this target.
3129 */
3130 return (DDI_NOT_WELL_FORMED);
3131 }
3132
3133 if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
3134 DDI_PROP_SUCCESS) {
3135 mptsas_log(mpt, CE_WARN, "Get lun property failed");
3136 return (DDI_FAILURE);
3137 }
3138
3139 if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
3140 &psas_wwn) == MDI_SUCCESS) {
3141 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
3142 sas_wwn = 0;
3143 }
3144 (void) mdi_prop_free(psas_wwn);
3145 }
3146 } else {
3147 lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
3148 DDI_PROP_DONTPASS, LUN_PROP, 0);
3149 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
3150 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
3151 DDI_PROP_SUCCESS) {
3152 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
3153 sas_wwn = 0;
3154 }
3155 ddi_prop_free(psas_wwn);
3156 } else {
3157 sas_wwn = 0;
3158 }
3159 }
3160
3161 ASSERT((sas_wwn != 0) || (phymask != 0));
3162 addr.mta_wwn = sas_wwn;
3163 addr.mta_phymask = phymask;
3164 mutex_enter(&mpt->m_mutex);
3165 ptgt = refhash_lookup(mpt->m_targets, &addr);
3166 mutex_exit(&mpt->m_mutex);
3167 if (ptgt == NULL) {
3168 mptsas_log(mpt, CE_WARN, "tgt_init: target doesn't exist or "
3169 "gone already! phymask:%x, saswwn %"PRIx64, phymask,
3170 sas_wwn);
3171 return (DDI_FAILURE);
3172 }
3173 if (hba_tran->tran_tgt_private == NULL) {
3174 tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
3175 KM_SLEEP);
3176 tgt_private->t_lun = lun;
3177 tgt_private->t_private = ptgt;
3178 hba_tran->tran_tgt_private = tgt_private;
3179 }
3180
3181 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
3182 return (DDI_SUCCESS);
3183 }
3184 mutex_enter(&mpt->m_mutex);
3185
3186 if (ptgt->m_deviceinfo &
3187 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
3188 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
3193 struct sata_id *sid = NULL;
3194 char model[SATA_ID_MODEL_LEN + 1];
3195 char fw[SATA_ID_FW_LEN + 1];
3196 char *vid, *pid;
3197
3198 mutex_exit(&mpt->m_mutex);
3199 /*
3200 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
3201 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
3202 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
3203 */
3204 inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
3205 rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
3206 inq89, inq89_len, &reallen, 1);
3207
3208 if (rval != 0) {
3209 if (inq89 != NULL) {
3210 kmem_free(inq89, inq89_len);
3211 }
3212
3213 mptsas_log(mpt, CE_WARN, "mptsas request inquiry page "
3214 "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
3215 return (DDI_SUCCESS);
3216 }
3217 sid = (void *)(&inq89[60]);
3218
3219 swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
3220 swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
3221
3222 model[SATA_ID_MODEL_LEN] = 0;
3223 fw[SATA_ID_FW_LEN] = 0;
3224
3225 sata_split_model(model, &vid, &pid);
3226
3227 /*
3228 * override SCSA "inquiry-*" properties
3229 */
3230 if (vid)
3231 (void) scsi_device_prop_update_inqstring(sd,
3232 INQUIRY_VENDOR_ID, vid, strlen(vid));
3233 if (pid)
3446 /*
3447 * The command should be allowed to retry by returning
3448 * TRAN_BUSY to stall the I/O's which come from
3449 * scsi_vhci since the device/path is in unstable state
3450 * now.
3451 */
3452 return (TRAN_BUSY);
3453 } else {
3454 /*
3455 * The device is offline, just fail the command by
3456 * return TRAN_FATAL_ERROR.
3457 */
3458 return (TRAN_FATAL_ERROR);
3459 }
3460 }
3461 rval = mptsas_accept_pkt(mpt, cmd);
3462
3463 return (rval);
3464 }
3465
3466 #ifdef MPTSAS_FAULTINJECTION
3467 static void
3468 mptsas_fminj_move_cmd_to_doneq(mptsas_t *mpt, mptsas_cmd_t *cmd,
3469 uchar_t reason, uint_t stat)
3470 {
3471 struct scsi_pkt *pkt = cmd->cmd_pkt;
3472
3473 TAILQ_REMOVE(&mpt->m_fminj_cmdq, cmd, cmd_active_link);
3474
3475 /* Setup reason/statistics. */
3476 pkt->pkt_reason = reason;
3477 pkt->pkt_statistics = stat;
3478
3479 cmd->cmd_active_expiration = 0;
3480
3481 /* Move command to doneque. */
3482 cmd->cmd_linkp = NULL;
3483 cmd->cmd_flags |= CFLAG_FINISHED;
3484 cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
3485
3486 *mpt->m_donetail = cmd;
3487 mpt->m_donetail = &cmd->cmd_linkp;
3488 mpt->m_doneq_len++;
3489 }
3490
3491 static void
3492 mptsas_fminj_move_tgt_to_doneq(mptsas_t *mpt, ushort_t target,
3493 uchar_t reason, uint_t stat)
3494 {
3495 mptsas_cmd_t *cmd;
3496
3497 ASSERT(mutex_owned(&mpt->m_mutex));
3498
3499 if (!TAILQ_EMPTY(&mpt->m_fminj_cmdq)) {
3500 cmd = TAILQ_FIRST(&mpt->m_fminj_cmdq);
3501 ASSERT(cmd != NULL);
3502
3503 while (cmd != NULL) {
3504 mptsas_cmd_t *next = TAILQ_NEXT(cmd, cmd_active_link);
3505
3506 if (Tgt(cmd) == target) {
3507 mptsas_fminj_move_cmd_to_doneq(mpt, cmd,
3508 reason, stat);
3509 }
3510 cmd = next;
3511 }
3512 }
3513 }
3514
3515 static void
3516 mptsas_fminj_watchsubr(mptsas_t *mpt,
3517 struct mptsas_active_cmdq *expired)
3518 {
3519 mptsas_cmd_t *cmd;
3520
3521 ASSERT(mutex_owned(&mpt->m_mutex));
3522
3523 if (!TAILQ_EMPTY(&mpt->m_fminj_cmdq)) {
3524 hrtime_t timestamp = gethrtime();
3525
3526 cmd = TAILQ_FIRST(&mpt->m_fminj_cmdq);
3527 ASSERT(cmd != NULL);
3528
3529 while (cmd != NULL) {
3530 mptsas_cmd_t *next = TAILQ_NEXT(cmd, cmd_active_link);
3531
3532 if (cmd->cmd_active_expiration <= timestamp) {
3533 struct scsi_pkt *pkt = cmd->cmd_pkt;
3534
3535 DTRACE_PROBE1(mptsas__command__timeout,
3536 struct scsi_pkt *, pkt);
3537
3538 /* Setup proper flags. */
3539 pkt->pkt_reason = CMD_TIMEOUT;
3540 pkt->pkt_statistics = (STAT_TIMEOUT |
3541 STAT_DEV_RESET);
3542 cmd->cmd_active_expiration = 0;
3543
3544 TAILQ_REMOVE(&mpt->m_fminj_cmdq, cmd,
3545 cmd_active_link);
3546 TAILQ_INSERT_TAIL(expired, cmd,
3547 cmd_active_link);
3548 }
3549 cmd = next;
3550 }
3551 }
3552 }
3553
3554 static int
3555 mptsas_fminject(mptsas_t *mpt, mptsas_cmd_t *cmd)
3556 {
3557 struct scsi_pkt *pkt = cmd->cmd_pkt;
3558
3559 ASSERT(mutex_owned(&mpt->m_mutex));
3560
3561 if (pkt->pkt_flags & FLAG_PKT_TIMEOUT) {
3562 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
3563 (pkt->pkt_comp != NULL)) {
3564 pkt->pkt_state = (STATE_GOT_BUS|STATE_GOT_TARGET|
3565 STATE_SENT_CMD);
3566 cmd->cmd_active_expiration =
3567 gethrtime() + (hrtime_t)pkt->pkt_time * NANOSEC;
3568 TAILQ_INSERT_TAIL(&mpt->m_fminj_cmdq,
3569 cmd, cmd_active_link);
3570 return (0);
3571 }
3572 }
3573 return (-1);
3574 }
3575 #endif /* MPTSAS_FAULTINJECTION */
3576
3577 static int
3578 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3579 {
3580 int rval = TRAN_ACCEPT;
3581 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3582
3583 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3584
3585 ASSERT(mutex_owned(&mpt->m_mutex));
3586
3587 if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3588 rval = mptsas_prepare_pkt(cmd);
3589 if (rval != TRAN_ACCEPT) {
3590 cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3591 return (rval);
3592 }
3593 }
3594
3595 /*
3596 * reset the throttle if we were draining
3597 */
3598 if ((ptgt->m_t_ncmds == 0) &&
3599 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3600 NDBG23(("reset throttle"));
3601 ASSERT(ptgt->m_reset_delay == 0);
3602 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3603 }
3604
3605 /*
3606 * If HBA is being reset, the device handles will be invalidated.
3607 * This is temporary and, if target is still attached, the device
3608 * handles will be re-assigned when firmware reset completes.
3609 * Then, if command was already waiting, complete the command
3610 * otherwise return BUSY and expect transport retry.
3611 */
3612 if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3613 NDBG20(("retry command, invalid devhdl, during FW reset."));
3614 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3615 if (cmd->cmd_flags & CFLAG_TXQ) {
3616 mptsas_doneq_add(mpt, cmd);
3617 mptsas_doneq_empty(mpt);
3618 return (rval);
3619 } else {
3620 return (TRAN_BUSY);
3621 }
3622 }
3623
3624 /*
3625 * If the device handle has been invalidated, set the response
3626 * reason to indicate the device is gone. Then add the
3627 * command to the done queue and run the completion routine
3628 * so the initiator of the command can clean up.
3629 */
3630 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3631 NDBG20(("rejecting command, invalid devhdl because "
3632 "device gone."));
3633 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3634 if (cmd->cmd_flags & CFLAG_TXQ) {
3635 mptsas_doneq_add(mpt, cmd);
3636 mptsas_doneq_empty(mpt);
3637 return (rval);
3638 } else {
3639 return (TRAN_FATAL_ERROR);
3640 }
3641 }
3642
3643 /*
3644 * Do fault injecttion before transmitting command.
3645 * FLAG_NOINTR commands are skipped.
3646 */
3647 #ifdef MPTSAS_FAULTINJECTION
3648 if (!mptsas_fminject(mpt, cmd)) {
3649 return (TRAN_ACCEPT);
3650 }
3651 #endif
3652
3653 /*
3654 * The first case is the normal case. mpt gets a command from the
3655 * target driver and starts it.
3656 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3657 * commands is m_max_requests - 2.
3658 */
3659 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3660 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3661 (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3662 (ptgt->m_reset_delay == 0) &&
3663 (ptgt->m_t_nwait == 0) &&
3664 ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3665 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3666 (void) mptsas_start_cmd(mpt, cmd);
3667 } else {
3668 mptsas_waitq_add(mpt, cmd);
3669 }
3670 } else {
3671 /*
3672 * Add this pkt to the work queue
3673 */
3746 * Initialize expiration time for passthrough commands,
3747 */
3748 cmd->cmd_active_expiration = gethrtime() +
3749 (hrtime_t)cmd->cmd_pkt->pkt_time * NANOSEC;
3750 }
3751 return (TRUE);
3752 }
3753
3754 /*
3755 * prepare the pkt:
3756 * the pkt may have been resubmitted or just reused so
3757 * initialize some fields and do some checks.
3758 */
3759 static int
3760 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3761 {
3762 struct scsi_pkt *pkt = CMD2PKT(cmd);
3763
3764 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3765
3766 #ifdef MPTSAS_FAULTINJECTION
3767 /* Check for fault flags prior to perform actual initialization. */
3768 if (pkt->pkt_flags & FLAG_PKT_BUSY) {
3769 return (TRAN_BUSY);
3770 }
3771 #endif
3772
3773 /*
3774 * Reinitialize some fields that need it; the packet may
3775 * have been resubmitted
3776 */
3777 pkt->pkt_reason = CMD_CMPLT;
3778 pkt->pkt_state = 0;
3779 pkt->pkt_statistics = 0;
3780 pkt->pkt_resid = 0;
3781 cmd->cmd_age = 0;
3782 cmd->cmd_pkt_flags = pkt->pkt_flags;
3783
3784 /*
3785 * zero status byte.
3786 */
3787 *(pkt->pkt_scbp) = 0;
3788
3789 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3790 pkt->pkt_resid = cmd->cmd_dmacount;
3791
3792 /*
3865 save_dma_handle = cmd->cmd_dmahandle;
3866 bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3867 cmd->cmd_dmahandle = save_dma_handle;
3868
3869 pkt = (void *)((uchar_t *)cmd +
3870 sizeof (struct mptsas_cmd));
3871 pkt->pkt_ha_private = (opaque_t)cmd;
3872 pkt->pkt_address = *ap;
3873 pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3874 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3875 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3876 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3877 cmd->cmd_cdblen = (uchar_t)cmdlen;
3878 cmd->cmd_scblen = statuslen;
3879 cmd->cmd_rqslen = SENSE_LENGTH;
3880 cmd->cmd_tgt_addr = ptgt;
3881
3882 if ((cmdlen > sizeof (cmd->cmd_cdb)) ||
3883 (tgtlen > PKT_PRIV_LEN) ||
3884 (statuslen > EXTCMDS_STATUS_SIZE)) {
3885 if (mptsas_pkt_alloc_extern(mpt, cmd,
3886 cmdlen, tgtlen, statuslen, kf)) {
3887 /*
3888 * if extern allocation fails, it will
3889 * deallocate the new pkt as well
3890 */
3891 return (NULL);
3892 }
3893 }
3894 new_cmd = cmd;
3895
3896 } else {
3897 cmd = PKT2CMD(pkt);
3898 pkt->pkt_start = 0;
3899 pkt->pkt_stop = 0;
3900 new_cmd = NULL;
3901 }
3902
3903
3904 /* grab cmd->cmd_cookiec here as oldcookiec */
3905
3906 oldcookiec = cmd->cmd_cookiec;
3907
3908 /*
3909 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3910 * greater than 0 and we'll need to grab the next dma window
3911 */
3912 /*
3913 * SLM-not doing extra command frame right now; may add later
3914 */
3915
3916 if (cmd->cmd_nwin > 0) {
3917
3918 /*
3919 * Make sure we havn't gone past the the total number
3998 case DDI_DMA_NOMAPPING:
3999 bioerror(bp, EFAULT);
4000 break;
4001 case DDI_DMA_TOOBIG:
4002 default:
4003 bioerror(bp, EINVAL);
4004 break;
4005 }
4006 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4007 if (new_cmd) {
4008 mptsas_scsi_destroy_pkt(ap, pkt);
4009 }
4010 return ((struct scsi_pkt *)NULL);
4011 }
4012
4013 get_dma_cookies:
4014 cmd->cmd_flags |= CFLAG_DMAVALID;
4015 ASSERT(cmd->cmd_cookiec > 0);
4016
4017 if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
4018 mptsas_log(mpt, CE_NOTE, "large cookiec received %d",
4019 cmd->cmd_cookiec);
4020 bioerror(bp, EINVAL);
4021 if (new_cmd) {
4022 mptsas_scsi_destroy_pkt(ap, pkt);
4023 }
4024 return ((struct scsi_pkt *)NULL);
4025 }
4026
4027 /*
4028 * Allocate extra SGL buffer if needed.
4029 */
4030 if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
4031 (cmd->cmd_extra_frames == NULL)) {
4032 if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
4033 DDI_FAILURE) {
4034 mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
4035 "failed");
4036 bioerror(bp, ENOMEM);
4037 if (new_cmd) {
4038 mptsas_scsi_destroy_pkt(ap, pkt);
4160 ap->a_target, (void *)pkt));
4161
4162 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4163 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4164 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4165 }
4166
4167 if (cmd->cmd_sg) {
4168 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
4169 cmd->cmd_sg = NULL;
4170 }
4171
4172 mptsas_free_extra_sgl_frame(mpt, cmd);
4173
4174 if ((cmd->cmd_flags &
4175 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
4176 CFLAG_SCBEXTERN)) == 0) {
4177 cmd->cmd_flags = CFLAG_FREE;
4178 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4179 } else {
4180 mptsas_pkt_destroy_extern(mpt, cmd);
4181 }
4182 }
4183
4184 /*
4185 * kmem cache constructor and destructor:
4186 * When constructing, we bzero the cmd and allocate the dma handle
4187 * When destructing, just free the dma handle
4188 */
4189 static int
4190 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
4191 {
4192 mptsas_cmd_t *cmd = buf;
4193 mptsas_t *mpt = cdrarg;
4194 int (*callback)(caddr_t);
4195
4196 callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
4197
4198 NDBG4(("mptsas_kmem_cache_constructor"));
4199
4200 /*
4201 * allocate a dma handle
5320 mptsas_handle_scsi_io_success(mptsas_t *mpt,
5321 pMpi2ReplyDescriptorsUnion_t reply_desc)
5322 {
5323 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success;
5324 uint16_t SMID;
5325 mptsas_slots_t *slots = mpt->m_active;
5326 mptsas_cmd_t *cmd = NULL;
5327 struct scsi_pkt *pkt;
5328
5329 ASSERT(mutex_owned(&mpt->m_mutex));
5330
5331 scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
5332 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
5333
5334 /*
5335 * This is a success reply so just complete the IO. First, do a sanity
5336 * check on the SMID. The final slot is used for TM requests, which
5337 * would not come into this reply handler.
5338 */
5339 if ((SMID == 0) || (SMID > slots->m_n_normal)) {
5340 mptsas_log(mpt, CE_WARN, "received invalid SMID of %d", SMID);
5341 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5342 return;
5343 }
5344
5345 cmd = slots->m_slot[SMID];
5346
5347 /*
5348 * print warning and return if the slot is empty
5349 */
5350 if (cmd == NULL) {
5351 mptsas_log(mpt, CE_WARN, "NULL command for successful SCSI IO "
5352 "in slot %d", SMID);
5353 return;
5354 }
5355
5356 pkt = CMD2PKT(cmd);
5357 ASSERT(pkt->pkt_start != 0);
5358 pkt->pkt_stop = gethrtime();
5359 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
5360 STATE_GOT_STATUS);
5361 if (cmd->cmd_flags & CFLAG_DMAVALID) {
5362 pkt->pkt_state |= STATE_XFERRED_DATA;
5363 }
5364 pkt->pkt_resid = 0;
5365
5366 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
5367 cmd->cmd_flags |= CFLAG_FINISHED;
5368 cv_broadcast(&mpt->m_passthru_cv);
5369 return;
5370 } else {
5371 mptsas_remove_cmd(mpt, cmd);
5372 }
5373
5374 if (cmd->cmd_flags & CFLAG_RETRY) {
5375 /*
5376 * The target returned QFULL or busy, do not add tihs
5377 * pkt to the doneq since the hba will retry
5378 * this cmd.
5402 m_replyh_arg_t *args;
5403 int reply_frame_no;
5404
5405 ASSERT(mutex_owned(&mpt->m_mutex));
5406
5407 address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
5408 reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
5409 &address_reply->ReplyFrameAddress);
5410 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
5411
5412 /*
5413 * If reply frame is not in the proper range we should ignore this
5414 * message and exit the interrupt handler.
5415 */
5416 reply_frame_dma_baseaddr = mpt->m_reply_frame_dma_addr & 0xffffffffu;
5417 if ((reply_addr < reply_frame_dma_baseaddr) ||
5418 (reply_addr >= (reply_frame_dma_baseaddr +
5419 (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
5420 ((reply_addr - reply_frame_dma_baseaddr) %
5421 mpt->m_reply_frame_size != 0)) {
5422 mptsas_log(mpt, CE_WARN, "received invalid reply frame "
5423 "address 0x%x", reply_addr);
5424 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5425 return;
5426 }
5427
5428 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
5429 DDI_DMA_SYNC_FORCPU);
5430 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
5431 reply_frame_dma_baseaddr));
5432 function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
5433
5434 NDBG31(("mptsas_handle_address_reply: function 0x%x, reply_addr=0x%x",
5435 function, reply_addr));
5436
5437 /*
5438 * don't get slot information and command for events since these values
5439 * don't exist
5440 */
5441 if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
5442 (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
5443 /*
5444 * This could be a TM reply, which use the last allocated SMID,
5445 * so allow for that.
5446 */
5447 if ((SMID == 0) || (SMID > (slots->m_n_normal + 1))) {
5448 mptsas_log(mpt, CE_WARN, "received invalid SMID of "
5449 "%d", SMID);
5450 ddi_fm_service_impact(mpt->m_dip,
5451 DDI_SERVICE_UNAFFECTED);
5452 return;
5453 }
5454
5455 cmd = slots->m_slot[SMID];
5456
5457 /*
5458 * print warning and return if the slot is empty
5459 */
5460 if (cmd == NULL) {
5461 mptsas_log(mpt, CE_WARN, "NULL command for address "
5462 "reply in slot %d", SMID);
5463 return;
5464 }
5465 if ((cmd->cmd_flags &
5466 (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
5467 cmd->cmd_rfm = reply_addr;
5468 cmd->cmd_flags |= CFLAG_FINISHED;
5469 cv_broadcast(&mpt->m_passthru_cv);
5470 cv_broadcast(&mpt->m_config_cv);
5471 cv_broadcast(&mpt->m_fw_diag_cv);
5472 return;
5473 } else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
5474 mptsas_remove_cmd(mpt, cmd);
5475 }
5476 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
5477 }
5478 /*
5479 * Depending on the function, we need to handle
5480 * the reply frame (and cmd) differently.
5481 */
5554 &reply->IOCStatus);
5555 buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
5556 &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
5557 if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
5558 pBuffer =
5559 &mpt->m_fw_diag_buffer_list[buffer_type];
5560 pBuffer->valid_data = TRUE;
5561 pBuffer->owned_by_firmware = FALSE;
5562 pBuffer->immediate = FALSE;
5563 }
5564 } else {
5565 /*
5566 * Normal handling of diag post reply with SMID.
5567 */
5568 cmd = slots->m_slot[SMID];
5569
5570 /*
5571 * print warning and return if the slot is empty
5572 */
5573 if (cmd == NULL) {
5574 mptsas_log(mpt, CE_WARN, "NULL command for "
5575 "address reply in slot %d", SMID);
5576 return;
5577 }
5578 cmd->cmd_rfm = reply_addr;
5579 cmd->cmd_flags |= CFLAG_FINISHED;
5580 cv_broadcast(&mpt->m_fw_diag_cv);
5581 }
5582 return;
5583 default:
5584 mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
5585 break;
5586 }
5587
5588 /*
5589 * Return the reply frame to the free queue.
5590 */
5591 ddi_put32(mpt->m_acc_free_queue_hdl,
5592 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
5593 reply_addr);
5594 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5639
5640 scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
5641 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5642 scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
5643 xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
5644 sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
5645 responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
5646 &reply->ResponseInfo);
5647
5648 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
5649 sas_wwn = ptgt->m_addr.mta_wwn;
5650 phy = ptgt->m_phynum;
5651 if (sas_wwn == 0) {
5652 (void) sprintf(wwn_str, "p%x", phy);
5653 } else {
5654 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
5655 }
5656 loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
5657 &reply->IOCLogInfo);
5658 mptsas_log(mpt, CE_NOTE,
5659 "log info 0x%x received for target %d %s, "
5660 "scsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5661 loginfo, Tgt(cmd), wwn_str, scsi_status, ioc_status,
5662 scsi_state);
5663 }
5664
5665 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5666 scsi_status, ioc_status, scsi_state));
5667
5668 pkt = CMD2PKT(cmd);
5669 ASSERT(pkt->pkt_start != 0);
5670 pkt->pkt_stop = gethrtime();
5671 *(pkt->pkt_scbp) = scsi_status;
5672
5673 if (loginfo == 0x31170000) {
5674 /*
5675 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
5676 * 0x31170000 comes, that means the device missing delay
5677 * is in progressing, the command need retry later.
5678 */
5679 *(pkt->pkt_scbp) = STATUS_BUSY;
5680 return;
5681 }
5682
5683 if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
5684 ((ioc_status & MPI2_IOCSTATUS_MASK) ==
5685 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
5686 pkt->pkt_reason = CMD_INCOMPLETE;
5687 pkt->pkt_state |= STATE_GOT_BUS;
5688 if (ptgt->m_reset_delay == 0) {
5689 mptsas_set_throttle(mpt, ptgt,
5690 DRAIN_THROTTLE);
5691 }
5692 return;
5693 }
5694
5695 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5696 responsedata &= 0x000000FF;
5697 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5698 mptsas_log(mpt, CE_NOTE, "TLR not supported");
5699 pkt->pkt_reason = CMD_TLR_OFF;
5700 return;
5701 }
5702 }
5703
5704
5705 switch (scsi_status) {
5706 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5707 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5708 arqstat = (void*)(pkt->pkt_scbp);
5709 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5710 (pkt->pkt_scbp));
5711 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5712 STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5713 if (cmd->cmd_flags & CFLAG_XARQ) {
5714 pkt->pkt_state |= STATE_XARQ_DONE;
5715 }
5716 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5717 pkt->pkt_state |= STATE_XFERRED_DATA;
5718 }
5754 }
5755
5756 /*
5757 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5758 * ASC/ASCQ=0x25/0x00 means invalid lun
5759 */
5760 if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5761 (scsi_sense_asc(sensedata) == 0x3F) &&
5762 (scsi_sense_ascq(sensedata) == 0x0E)) ||
5763 ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5764 (scsi_sense_asc(sensedata) == 0x25) &&
5765 (scsi_sense_ascq(sensedata) == 0x00))) {
5766 mptsas_topo_change_list_t *topo_node = NULL;
5767
5768 topo_node = kmem_zalloc(
5769 sizeof (mptsas_topo_change_list_t),
5770 KM_NOSLEEP);
5771 if (topo_node == NULL) {
5772 mptsas_log(mpt, CE_NOTE, "No memory"
5773 "resource for handle SAS dynamic"
5774 "reconfigure");
5775 break;
5776 }
5777 topo_node->mpt = mpt;
5778 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5779 topo_node->un.phymask = ptgt->m_addr.mta_phymask;
5780 topo_node->devhdl = ptgt->m_devhdl;
5781 topo_node->object = (void *)ptgt;
5782 topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5783
5784 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5785 mptsas_handle_dr,
5786 (void *)topo_node,
5787 DDI_NOSLEEP)) != DDI_SUCCESS) {
5788 kmem_free(topo_node,
5789 sizeof (mptsas_topo_change_list_t));
5790 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5791 "for handle SAS dynamic reconfigure"
5792 "failed");
5793 }
5794 }
5795 break;
5796 case MPI2_SCSI_STATUS_GOOD:
5797 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5798 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5799 pkt->pkt_reason = CMD_DEV_GONE;
5800 pkt->pkt_state |= STATE_GOT_BUS;
5801 if (ptgt->m_reset_delay == 0) {
5802 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5803 }
5804 NDBG31(("lost disk for target%d, command:%x",
5805 Tgt(cmd), pkt->pkt_cdbp[0]));
5806 break;
5807 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5808 NDBG31(("data overrun: xferred=%d", xferred));
5809 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5810 pkt->pkt_reason = CMD_DATA_OVR;
5811 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5812 | STATE_SENT_CMD | STATE_GOT_STATUS
5852 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5853 case MPI2_IOCSTATUS_BUSY:
5854 /*
5855 * set throttles to drain
5856 */
5857 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
5858 ptgt = refhash_next(mpt->m_targets, ptgt)) {
5859 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5860 }
5861
5862 /*
5863 * retry command
5864 */
5865 cmd->cmd_flags |= CFLAG_RETRY;
5866 cmd->cmd_pkt_flags |= FLAG_HEAD;
5867
5868 (void) mptsas_accept_pkt(mpt, cmd);
5869 break;
5870 default:
5871 mptsas_log(mpt, CE_WARN,
5872 "unknown ioc_status = %x", ioc_status);
5873 mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5874 "count = %x, scsi_status = %x", scsi_state,
5875 xferred, scsi_status);
5876 break;
5877 }
5878 break;
5879 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5880 mptsas_handle_qfull(mpt, cmd);
5881 break;
5882 case MPI2_SCSI_STATUS_BUSY:
5883 NDBG31(("scsi_status busy received"));
5884 break;
5885 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5886 NDBG31(("scsi_status reservation conflict received"));
5887 break;
5888 default:
5889 mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x",
5890 scsi_status, ioc_status);
5891 mptsas_log(mpt, CE_WARN,
5892 "mptsas_process_intr: invalid scsi status");
5893 break;
5894 }
5895 }
5896
5897 static void
5898 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5899 mptsas_cmd_t *cmd)
5900 {
5901 uint8_t task_type;
5902 uint16_t ioc_status;
5903 uint32_t log_info;
5904 uint16_t dev_handle;
5905 struct scsi_pkt *pkt = CMD2PKT(cmd);
5906
5907 task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5908 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5909 log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5910 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5911
5912 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5913 mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5914 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d",
5915 task_type, ioc_status, log_info, dev_handle);
5916 pkt->pkt_reason = CMD_INCOMPLETE;
5917 return;
5918 }
5919
5920 switch (task_type) {
5921 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5922 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5923 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5924 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5925 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5926 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5927 break;
5928 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5929 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5930 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5931 /*
5932 * Check for invalid DevHandle of 0 in case application
5933 * sends bad command. DevHandle of 0 could cause problems.
5934 */
5935 if (dev_handle == 0) {
5936 mptsas_log(mpt, CE_WARN, "Can't flush target with"
5937 " DevHandle of 0.");
5938 } else {
5939 mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5940 task_type);
5941 }
5942 break;
5943 default:
5944 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5945 task_type);
5946 mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5947 break;
5948 }
5949 }
5950
5951 static void
5952 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5953 {
5954 mptsas_t *mpt = arg->mpt;
5955 uint64_t t = arg->t;
5956 mptsas_cmd_t *cmd;
6103 pMpi2ReplyDescriptorsUnion_t reply_desc_union)
6104 {
6105 uint8_t reply_type;
6106
6107 ASSERT(mutex_owned(&mpt->m_mutex));
6108
6109 /*
6110 * The reply is valid, process it according to its
6111 * type. Also, set a flag for updated the reply index
6112 * after they've all been processed.
6113 */
6114 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
6115 &reply_desc_union->Default.ReplyFlags);
6116 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
6117 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
6118 reply_type == MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS) {
6119 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
6120 } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
6121 mptsas_handle_address_reply(mpt, reply_desc_union);
6122 } else {
6123 mptsas_log(mpt, CE_WARN, "bad reply type %x", reply_type);
6124 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
6125 }
6126
6127 /*
6128 * Clear the reply descriptor for re-use and increment
6129 * index.
6130 */
6131 ddi_put64(mpt->m_acc_post_queue_hdl,
6132 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
6133 0xFFFFFFFFFFFFFFFF);
6134 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
6135 DDI_DMA_SYNC_FORDEV);
6136 }
6137
6138 /*
6139 * handle qfull condition
6140 */
6141 static void
6142 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
6143 {
6547 }
6548 (void) sprintf(phy_mask_name, "%x", phymask);
6549 }
6550 parent = scsi_hba_iport_find(mpt->m_dip,
6551 phy_mask_name);
6552 if (parent == NULL) {
6553 mptsas_log(mpt, CE_WARN, "Failed to find an "
6554 "iport, should not happen!");
6555 goto out;
6556 }
6557
6558 }
6559 ASSERT(parent);
6560 handle_topo_change:
6561
6562 mutex_enter(&mpt->m_mutex);
6563 /*
6564 * If HBA is being reset, don't perform operations depending
6565 * on the IOC. We must free the topo list, however.
6566 */
6567 if (!mpt->m_in_reset) {
6568 mptsas_handle_topo_change(topo_node, parent);
6569 } else {
6570 NDBG20(("skipping topo change received during reset"));
6571 }
6572 save_node = topo_node;
6573 topo_node = topo_node->next;
6574 ASSERT(save_node);
6575 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
6576 mutex_exit(&mpt->m_mutex);
6577
6578 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6579 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
6580 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
6581 /*
6582 * If direct attached device associated, make sure
6583 * reset the parent before start the next one. But
6584 * all devices associated with expander shares the
6585 * parent. Also, reset parent if this is for RAID.
6586 */
6587 parent = NULL;
6588 }
6589 }
6590 out:
6591 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6637 if (ptgt == NULL) {
6638 /*
6639 * If a Phys Disk was deleted, RAID info needs to be
6640 * updated to reflect the new topology.
6641 */
6642 (void) mptsas_get_raid_info(mpt);
6643
6644 /*
6645 * Get sas device page 0 by DevHandle to make sure if
6646 * SSP/SATA end device exist.
6647 */
6648 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
6649 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
6650 topo_node->devhdl;
6651
6652 rval = mptsas_get_target_device_info(mpt, page_address,
6653 &devhdl, &ptgt);
6654 if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
6655 mptsas_log(mpt, CE_NOTE,
6656 "mptsas_handle_topo_change: target %d is "
6657 "not a SAS/SATA device",
6658 topo_node->devhdl);
6659 } else if (rval == DEV_INFO_FAIL_ALLOC) {
6660 mptsas_log(mpt, CE_NOTE,
6661 "mptsas_handle_topo_change: could not "
6662 "allocate memory");
6663 } else if (rval == DEV_INFO_FAIL_GUID) {
6664 mptsas_log(mpt, CE_NOTE,
6665 "mptsas_handle_topo_change: could not "
6666 "get SATA GUID for target %d",
6667 topo_node->devhdl);
6668 }
6669 /*
6670 * If rval is DEV_INFO_PHYS_DISK or indicates failure
6671 * then there is nothing else to do, just leave.
6672 */
6673 if (rval != DEV_INFO_SUCCESS) {
6674 return;
6675 }
6676 }
6677
6678 ASSERT(ptgt->m_devhdl == topo_node->devhdl);
6679
6680 mutex_exit(&mpt->m_mutex);
6681 flags = topo_node->flags;
6682
6683 if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
6684 phymask = ptgt->m_addr.mta_phymask;
6685 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6686 (void) sprintf(phy_mask_name, "%x", phymask);
6873 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6874 MPTSAS_NUM_PHYS);
6875 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6876 "prop update failed");
6877 mutex_enter(&mpt->m_mutex);
6878 break;
6879 }
6880 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6881 MPTSAS_VIRTUAL_PORT, 1) !=
6882 DDI_PROP_SUCCESS) {
6883 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6884 MPTSAS_VIRTUAL_PORT);
6885 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6886 "prop update failed");
6887 mutex_enter(&mpt->m_mutex);
6888 break;
6889 }
6890 }
6891
6892 mutex_enter(&mpt->m_mutex);
6893 if (rval == DDI_SUCCESS) {
6894 refhash_remove(mpt->m_targets, ptgt);
6895 ptgt = NULL;
6896 } else {
6897 /*
6898 * clean DR_INTRANSITION flag to allow I/O down to
6899 * PHCI driver since failover finished.
6900 * Invalidate the devhdl
6901 */
6902 ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6903 ptgt->m_tgt_unconfigured = 0;
6904 mutex_enter(&mpt->m_tx_waitq_mutex);
6905 ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6906 mutex_exit(&mpt->m_tx_waitq_mutex);
6907 }
6908
6909 /*
6910 * Send SAS IO Unit Control to free the dev handle
6911 */
6912 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6972
6973 mutex_enter(&mpt->m_mutex);
6974 break;
6975 }
6976 case MPTSAS_DR_EVENT_OFFLINE_SMP:
6977 {
6978 devhdl = topo_node->devhdl;
6979 uint32_t dev_info;
6980
6981 psmp = refhash_linear_search(mpt->m_smp_targets,
6982 mptsas_smp_eval_devhdl, &devhdl);
6983 if (psmp == NULL)
6984 break;
6985 /*
6986 * The mptsas_smp_t data is released only if the dip is offlined
6987 * successfully.
6988 */
6989 mutex_exit(&mpt->m_mutex);
6990
6991 ndi_devi_enter(parent, &circ1);
6992 rval = mptsas_offline_smp(parent, psmp);
6993 ndi_devi_exit(parent, circ1);
6994
6995 dev_info = psmp->m_deviceinfo;
6996 if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6997 DEVINFO_DIRECT_ATTACHED) {
6998 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6999 MPTSAS_VIRTUAL_PORT, 1) !=
7000 DDI_PROP_SUCCESS) {
7001 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
7002 MPTSAS_VIRTUAL_PORT);
7003 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
7004 "prop update failed");
7005 mutex_enter(&mpt->m_mutex);
7006 return;
7007 }
7008 /*
7009 * Check whether the smp connected to the iport,
7010 */
7011 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
7012 MPTSAS_NUM_PHYS, 0) !=
7013 DDI_PROP_SUCCESS) {
7014 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
7015 MPTSAS_NUM_PHYS);
7016 mptsas_log(mpt, CE_WARN, "mptsas num phys"
7017 "prop update failed");
7018 mutex_enter(&mpt->m_mutex);
7019 return;
7020 }
7021 /*
7022 * Clear parent's attached-port props
7023 */
7024 bzero(attached_wwnstr, sizeof (attached_wwnstr));
7025 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
7026 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
7027 DDI_PROP_SUCCESS) {
7028 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
7029 SCSI_ADDR_PROP_ATTACHED_PORT);
7030 mptsas_log(mpt, CE_WARN, "mptsas attached port "
7031 "prop update failed");
7032 mutex_enter(&mpt->m_mutex);
7033 return;
7034 }
7035 }
7036
7037 mutex_enter(&mpt->m_mutex);
7038 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
7039 "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
7040 if (rval == DDI_SUCCESS) {
7041 refhash_remove(mpt->m_smp_targets, psmp);
7042 } else {
7043 psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
7044 }
7045
7046 bzero(attached_wwnstr, sizeof (attached_wwnstr));
7047
7048 break;
7049 }
7050 default:
7051 return;
7052 }
7147 static int
7148 mptsas_handle_event_sync(void *args)
7149 {
7150 m_replyh_arg_t *replyh_arg;
7151 pMpi2EventNotificationReply_t eventreply;
7152 uint32_t event, rfm;
7153 mptsas_t *mpt;
7154 uint_t iocstatus;
7155
7156 replyh_arg = (m_replyh_arg_t *)args;
7157 rfm = replyh_arg->rfm;
7158 mpt = replyh_arg->mpt;
7159
7160 ASSERT(mutex_owned(&mpt->m_mutex));
7161
7162 eventreply = (pMpi2EventNotificationReply_t)
7163 (mpt->m_reply_frame + (rfm -
7164 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
7165 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7166
7167 if ((iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7168 &eventreply->IOCStatus)) != 0) {
7169 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7170 mptsas_log(mpt, CE_WARN,
7171 "mptsas_handle_event_sync: event 0x%x, "
7172 "IOCStatus=0x%x, "
7173 "IOCLogInfo=0x%x", event, iocstatus,
7174 ddi_get32(mpt->m_acc_reply_frame_hdl,
7175 &eventreply->IOCLogInfo));
7176 } else {
7177 mptsas_log(mpt, CE_WARN,
7178 "mptsas_handle_event_sync: event 0x%x, "
7179 "IOCStatus=0x%x, "
7180 "(IOCLogInfo=0x%x)", event, iocstatus,
7181 ddi_get32(mpt->m_acc_reply_frame_hdl,
7182 &eventreply->IOCLogInfo));
7183 }
7184 }
7185
7186 /*
7187 * figure out what kind of event we got and handle accordingly
7188 */
7189 switch (event) {
7190 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7191 {
7623 break;
7624 }
7625 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
7626 mpt->m_instance, phy, dev_handle, string, curr,
7627 prev));
7628 }
7629 if (topo_head != NULL) {
7630 /*
7631 * Launch DR taskq to handle topology change
7632 */
7633 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7634 mptsas_handle_dr, (void *)topo_head,
7635 DDI_NOSLEEP)) != DDI_SUCCESS) {
7636 while (topo_head != NULL) {
7637 topo_node = topo_head;
7638 topo_head = topo_head->next;
7639 kmem_free(topo_node,
7640 sizeof (mptsas_topo_change_list_t));
7641 }
7642 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7643 "for handle SAS DR event failed");
7644 }
7645 }
7646 break;
7647 }
7648 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7649 {
7650 Mpi2EventDataIrConfigChangeList_t *irChangeList;
7651 mptsas_topo_change_list_t *topo_head = NULL;
7652 mptsas_topo_change_list_t *topo_tail = NULL;
7653 mptsas_topo_change_list_t *topo_node = NULL;
7654 mptsas_target_t *ptgt;
7655 uint8_t num_entries, i, reason;
7656 uint16_t volhandle, diskhandle;
7657
7658 irChangeList = (pMpi2EventDataIrConfigChangeList_t)
7659 eventreply->EventData;
7660 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
7661 &irChangeList->NumElements);
7662
7663 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
7807 }
7808 default:
7809 break;
7810 }
7811 }
7812
7813 if (topo_head != NULL) {
7814 /*
7815 * Launch DR taskq to handle topology change
7816 */
7817 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7818 mptsas_handle_dr, (void *)topo_head,
7819 DDI_NOSLEEP)) != DDI_SUCCESS) {
7820 while (topo_head != NULL) {
7821 topo_node = topo_head;
7822 topo_head = topo_head->next;
7823 kmem_free(topo_node,
7824 sizeof (mptsas_topo_change_list_t));
7825 }
7826 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7827 "for handle SAS DR event failed");
7828 }
7829 }
7830 break;
7831 }
7832 default:
7833 return (DDI_FAILURE);
7834 }
7835
7836 return (DDI_SUCCESS);
7837 }
7838
7839 /*
7840 * handle events from ioc
7841 */
7842 static void
7843 mptsas_handle_event(void *args)
7844 {
7845 m_replyh_arg_t *replyh_arg;
7846 pMpi2EventNotificationReply_t eventreply;
7847 uint32_t event, iocloginfo, rfm;
7852
7853 replyh_arg = (m_replyh_arg_t *)args;
7854 rfm = replyh_arg->rfm;
7855 mpt = replyh_arg->mpt;
7856
7857 mutex_enter(&mpt->m_mutex);
7858 /*
7859 * If HBA is being reset, drop incoming event.
7860 */
7861 if (mpt->m_in_reset) {
7862 NDBG20(("dropping event received prior to reset"));
7863 mutex_exit(&mpt->m_mutex);
7864 return;
7865 }
7866
7867 eventreply = (pMpi2EventNotificationReply_t)
7868 (mpt->m_reply_frame + (rfm -
7869 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
7870 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7871
7872 if ((iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7873 &eventreply->IOCStatus)) != 0) {
7874 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7875 mptsas_log(mpt, CE_WARN,
7876 "mptsas_handle_event: IOCStatus=0x%x, "
7877 "IOCLogInfo=0x%x", iocstatus,
7878 ddi_get32(mpt->m_acc_reply_frame_hdl,
7879 &eventreply->IOCLogInfo));
7880 } else {
7881 mptsas_log(mpt, CE_WARN,
7882 "mptsas_handle_event: IOCStatus=0x%x, "
7883 "IOCLogInfo=0x%x", iocstatus,
7884 ddi_get32(mpt->m_acc_reply_frame_hdl,
7885 &eventreply->IOCLogInfo));
7886 }
7887 }
7888
7889 /*
7890 * figure out what kind of event we got and handle accordingly
7891 */
7892 switch (event) {
7893 case MPI2_EVENT_LOG_ENTRY_ADDED:
7894 break;
7895 case MPI2_EVENT_LOG_DATA:
7896 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7967 uint16_t enchdl;
7968 char string[80];
7969 mptsas_enclosure_t *mep;
7970
7971 encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7972 eventreply->EventData;
7973
7974 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7975 &encstatus->ReasonCode);
7976 enchdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7977 &encstatus->EnclosureHandle);
7978
7979 switch (rc) {
7980 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7981 (void) sprintf(string, "added");
7982 break;
7983 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7984 mep = mptsas_enc_lookup(mpt, enchdl);
7985 if (mep != NULL) {
7986 list_remove(&mpt->m_enclosures, mep);
7987 mptsas_enc_free(mep);
7988 mep = NULL;
7989 }
7990 (void) sprintf(string, ", not responding");
7991 break;
7992 default:
7993 break;
7994 }
7995 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure "
7996 "%x%s\n", mpt->m_instance,
7997 ddi_get16(mpt->m_acc_reply_frame_hdl,
7998 &encstatus->EnclosureHandle), string));
7999
8000 /*
8001 * No matter what has happened, update all of our device state
8002 * for enclosures, by retriggering an evaluation.
8003 */
8004 mpt->m_done_traverse_enc = 0;
8005 mptsas_update_hashtab(mpt);
8006 break;
8007 }
8008
8235 found = TRUE;
8236 break;
8237 }
8238 }
8239 }
8240 if (!found) {
8241 break;
8242 }
8243
8244 switch (irVolume->ReasonCode) {
8245 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
8246 {
8247 uint32_t i;
8248 mpt->m_raidconfig[config].m_raidvol[vol].m_settings =
8249 state;
8250
8251 i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
8252 mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
8253 ", auto-config of hot-swap drives is %s"
8254 ", write caching is %s"
8255 ", hot-spare pool mask is %02x",
8256 vol, state &
8257 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
8258 ? "disabled" : "enabled",
8259 i == MPI2_RAIDVOL0_SETTING_UNCHANGED
8260 ? "controlled by member disks" :
8261 i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
8262 ? "disabled" :
8263 i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
8264 ? "enabled" :
8265 "incorrectly set",
8266 (state >> 16) & 0xff);
8267 break;
8268 }
8269 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
8270 {
8271 mpt->m_raidconfig[config].m_raidvol[vol].m_state =
8272 (uint8_t)state;
8273
8274 mptsas_log(mpt, CE_NOTE,
8275 "Volume %d is now %s", vol,
8276 state == MPI2_RAID_VOL_STATE_OPTIMAL
8277 ? "optimal" :
8278 state == MPI2_RAID_VOL_STATE_DEGRADED
8279 ? "degraded" :
8280 state == MPI2_RAID_VOL_STATE_ONLINE
8281 ? "online" :
8282 state == MPI2_RAID_VOL_STATE_INITIALIZING
8283 ? "initializing" :
8284 state == MPI2_RAID_VOL_STATE_FAILED
8285 ? "failed" :
8286 state == MPI2_RAID_VOL_STATE_MISSING
8287 ? "missing" :
8288 "state unknown");
8289 break;
8290 }
8291 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
8292 {
8293 mpt->m_raidconfig[config].m_raidvol[vol].
8294 m_statusflags = state;
8295
8296 mptsas_log(mpt, CE_NOTE,
8297 " Volume %d is now %s%s%s%s%s%s%s%s%s",
8298 vol,
8299 state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
8300 ? ", enabled" : ", disabled",
8301 state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
8302 ? ", quiesced" : "",
8303 state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
8304 ? ", inactive" : ", active",
8305 state &
8306 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
8307 ? ", bad block table is full" : "",
8308 state &
8309 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
8310 ? ", resync in progress" : "",
8311 state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
8312 ? ", background initialization in progress" : "",
8313 state &
8314 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
8315 ? ", capacity expansion in progress" : "",
8316 state &
8317 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
8347 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
8348 &irPhysDisk->ReasonCode);
8349
8350 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
8351
8352 switch (reason) {
8353 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
8354 mptsas_log(mpt, CE_NOTE,
8355 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8356 "for enclosure with handle 0x%x is now in hot "
8357 "spare pool %d",
8358 physdisknum, devhandle, slot, enchandle,
8359 (state >> 16) & 0xff);
8360 break;
8361
8362 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
8363 status = state;
8364 mptsas_log(mpt, CE_NOTE,
8365 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8366 "for enclosure with handle 0x%x is now "
8367 "%s%s%s%s%s", physdisknum, devhandle, slot,
8368 enchandle,
8369 status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
8370 ? ", inactive" : ", active",
8371 status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
8372 ? ", out of sync" : "",
8373 status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
8374 ? ", quiesced" : "",
8375 status &
8376 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
8377 ? ", write cache enabled" : "",
8378 status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
8379 ? ", capacity expansion target" : "");
8380 break;
8381
8382 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
8383 mptsas_log(mpt, CE_NOTE,
8384 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8385 "for enclosure with handle 0x%x is now %s",
8386 physdisknum, devhandle, slot, enchandle,
8387 state == MPI2_RAID_PD_STATE_OPTIMAL
8388 ? "optimal" :
8389 state == MPI2_RAID_PD_STATE_REBUILDING
8390 ? "rebuilding" :
8391 state == MPI2_RAID_PD_STATE_DEGRADED
8392 ? "degraded" :
8393 state == MPI2_RAID_PD_STATE_HOT_SPARE
8394 ? "a hot spare" :
8395 state == MPI2_RAID_PD_STATE_ONLINE
8396 ? "online" :
8397 state == MPI2_RAID_PD_STATE_OFFLINE
8398 ? "offline" :
8399 state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
8400 ? "not compatible" :
8401 state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
8402 ? "not configured" :
8403 "state unknown");
8404 break;
8405 }
8406 break;
8407 }
8408 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
8409 {
8410 pMpi26EventDataActiveCableExcept_t actcable;
8411 uint32_t power;
8412 uint8_t reason, id;
8413
8414 actcable = (pMpi26EventDataActiveCableExcept_t)
8415 eventreply->EventData;
8416 power = ddi_get32(mpt->m_acc_reply_frame_hdl,
8417 &actcable->ActiveCablePowerRequirement);
8418 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
8419 &actcable->ReasonCode);
8420 id = ddi_get8(mpt->m_acc_reply_frame_hdl,
8421 &actcable->ReceptacleID);
8422
8423 /*
8424 * It'd be nice if this weren't just logging to the system but
8425 * were telling FMA about the active cable problem and FMA was
8426 * aware of the cable topology and state.
8427 */
8428 switch (reason) {
8429 case MPI26_EVENT_ACTIVE_CABLE_PRESENT:
8430 /* Don't log anything if it's fine */
8431 break;
8432 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
8433 mptsas_log(mpt, CE_WARN, "An active cable (id %u) does "
8434 "not have sufficient power to be enabled. "
8435 "Devices connected to this cable will not be "
8436 "visible to the system.", id);
8437 if (power == UINT32_MAX) {
8438 mptsas_log(mpt, CE_CONT, "The cable's power "
8439 "requirements are unknown.\n");
8440 } else {
8441 mptsas_log(mpt, CE_CONT, "The cable requires "
8442 "%u mW of power to function.\n", power);
8443 }
8444 break;
8445 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
8446 mptsas_log(mpt, CE_WARN, "An active cable (id %u) is "
8447 "degraded and not running at its full speed. "
8448 "Some devices might not appear.", id);
8449 break;
8450 default:
8451 break;
8452 }
8453 break;
8454 }
8455 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
8456 case MPI2_EVENT_PCIE_ENUMERATION:
8457 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
8458 case MPI2_EVENT_PCIE_LINK_COUNTER:
8459 mptsas_log(mpt, CE_NOTE, "Unhandled mpt_sas PCIe device "
8460 "event received (0x%x)", event);
8461 break;
8462 default:
8463 NDBG20(("mptsas%d: unknown event %x received",
8464 mpt->m_instance, event));
8465 break;
8466 }
8467
8468 /*
8469 * Return the reply frame to the free queue.
8470 */
8471 ddi_put32(mpt->m_acc_free_queue_hdl,
8472 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
8473 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
8474 DDI_DMA_SYNC_FORDEV);
8475 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
8476 mpt->m_free_index = 0;
8477 }
8478 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
8479 mpt->m_free_index);
8480 mutex_exit(&mpt->m_mutex);
8481 }
8482
8689
8690 /*
8691 * A Bus Reset could occur at any time and flush the tx_waitq,
8692 * so we cannot count on the tx_waitq to contain even one cmd.
8693 * And when the m_tx_waitq_mutex is released and run
8694 * mptsas_accept_pkt(), the tx_waitq may be flushed.
8695 */
8696 cmd = mpt->m_tx_waitq;
8697 for (;;) {
8698 if ((cmd = mpt->m_tx_waitq) == NULL) {
8699 mpt->m_tx_draining = 0;
8700 break;
8701 }
8702 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
8703 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8704 }
8705 cmd->cmd_linkp = NULL;
8706 mutex_exit(&mpt->m_tx_waitq_mutex);
8707 if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
8708 cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
8709 "to accept cmd on queue");
8710 mutex_enter(&mpt->m_tx_waitq_mutex);
8711 }
8712 }
8713
8714
8715 /*
8716 * mpt tag type lookup
8717 */
8718 static char mptsas_tag_lookup[] =
8719 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
8720
8721 static int
8722 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8723 {
8724 struct scsi_pkt *pkt = CMD2PKT(cmd);
8725 uint32_t control = 0;
8726 caddr_t mem, arsbuf;
8727 pMpi2SCSIIORequest_t io_request;
8728 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
8729 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
8775 }
8776 return (DDI_FAILURE);
8777 }
8778
8779 /*
8780 * Set correct tag bits.
8781 */
8782 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
8783 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
8784 FLAG_TAGMASK) >> 12)]) {
8785 case MSG_SIMPLE_QTAG:
8786 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8787 break;
8788 case MSG_HEAD_QTAG:
8789 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
8790 break;
8791 case MSG_ORDERED_QTAG:
8792 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
8793 break;
8794 default:
8795 mptsas_log(mpt, CE_WARN, "invalid tag type");
8796 break;
8797 }
8798 } else {
8799 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
8800 ptgt->m_t_throttle = 1;
8801 }
8802 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8803 }
8804
8805 if (cmd->cmd_pkt_flags & FLAG_TLR) {
8806 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
8807 }
8808
8809 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
8810 io_request = (pMpi2SCSIIORequest_t)mem;
8811 if (cmd->cmd_extrqslen != 0) {
8812 /*
8813 * Mapping of the buffer was done in mptsas_pkt_alloc_extern().
8814 * Calculate the DMA address with the same offset.
8815 */
8856 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
8857 MPI2_SGE_FLAGS_END_OF_BUFFER |
8858 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
8859 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
8860 }
8861
8862 /*
8863 * save ARQ information
8864 */
8865 ddi_put8(acc_hdl, &io_request->SenseBufferLength, ars_size);
8866 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress, ars_dmaaddrlow);
8867
8868 ddi_put32(acc_hdl, &io_request->Control, control);
8869
8870 NDBG31(("starting message=%d(0x%p), with cmd=0x%p",
8871 SMID, (void *)io_request, (void *)cmd));
8872
8873 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8874 (void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
8875 DDI_DMA_SYNC_FORDEV);
8876 pkt->pkt_start = gethrtime();
8877
8878 /*
8879 * Build request descriptor and write it to the request desc post reg.
8880 */
8881 request_desc |= (SMID << 16);
8882 request_desc |= (uint64_t)ptgt->m_devhdl << 48;
8883 MPTSAS_START_CMD(mpt, request_desc);
8884
8885 /*
8886 * Start timeout.
8887 */
8888 cmd->cmd_active_expiration = pkt->pkt_start +
8889 (hrtime_t)pkt->pkt_time * (hrtime_t)NANOSEC;
8890
8891 #ifdef MPTSAS_TEST
8892 /*
8893 * Force timeouts to happen immediately.
8894 */
8895 if (mptsas_test_timeouts)
8896 cmd->cmd_active_expiration = gethrtime();
8897 #endif
8898 c = TAILQ_FIRST(&ptgt->m_active_cmdq);
8899 if (c == NULL ||
8900 c->cmd_active_expiration < cmd->cmd_active_expiration) {
8901 /*
8902 * Common case is that this is the last pending expiration
8903 * (or queue is empty). Insert at head of the queue.
8904 */
8905 TAILQ_INSERT_HEAD(&ptgt->m_active_cmdq, cmd, cmd_active_link);
8906 } else {
8907 /*
8908 * Queue is not empty and first element expires later than
8909 * this command. Search for element expiring sooner.
8910 */
9392 }
9393
9394 void
9395 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
9396 {
9397
9398 NDBG25(("mptsas_set_throttle: throttle=%x", what));
9399
9400 /*
9401 * if the bus is draining/quiesced, no changes to the throttles
9402 * are allowed. Not allowing change of throttles during draining
9403 * limits error recovery but will reduce draining time
9404 *
9405 * all throttles should have been set to HOLD_THROTTLE
9406 */
9407 if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
9408 return;
9409 }
9410
9411 if (what == HOLD_THROTTLE) {
9412 ptgt->m_t_throttle = what;
9413 } else if (ptgt->m_reset_delay == 0) {
9414 if (what == MAX_THROTTLE)
9415 ptgt->m_t_throttle = mpt->m_max_tune_throttle;
9416 else
9417 ptgt->m_t_throttle = what;
9418 }
9419 }
9420
9421 /*
9422 * Clean up from a device reset.
9423 * For the case of target reset, this function clears the waitq of all
9424 * commands for a particular target. For the case of abort task set, this
9425 * function clears the waitq of all commonds for a particular target/lun.
9426 */
9427 static void
9428 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
9429 {
9430 mptsas_slots_t *slots = mpt->m_active;
9431 mptsas_cmd_t *cmd, *next_cmd;
9432 int slot;
9433 uchar_t reason;
9434 uint_t stat;
9435 hrtime_t timestamp;
9436
9547 cmd = mpt->m_tx_waitq;
9548 while (cmd != NULL) {
9549 next_cmd = cmd->cmd_linkp;
9550 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9551 mptsas_tx_waitq_delete(mpt, cmd);
9552 mutex_exit(&mpt->m_tx_waitq_mutex);
9553 mptsas_set_pkt_reason(mpt, cmd,
9554 reason, stat);
9555 mptsas_doneq_add(mpt, cmd);
9556 mutex_enter(&mpt->m_tx_waitq_mutex);
9557 }
9558 cmd = next_cmd;
9559 }
9560 mutex_exit(&mpt->m_tx_waitq_mutex);
9561 break;
9562 default:
9563 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
9564 tasktype);
9565 break;
9566 }
9567
9568 #ifdef MPTSAS_FAULTINJECTION
9569 mptsas_fminj_move_tgt_to_doneq(mpt, target, reason, stat);
9570 #endif
9571 }
9572
9573 /*
9574 * Clean up hba state, abort all outstanding command and commands in waitq
9575 * reset timeout of all targets.
9576 */
9577 static void
9578 mptsas_flush_hba(mptsas_t *mpt)
9579 {
9580 mptsas_slots_t *slots = mpt->m_active;
9581 mptsas_cmd_t *cmd;
9582 int slot;
9583
9584 NDBG25(("mptsas_flush_hba"));
9585
9586 /*
9587 * The I/O Controller should have already sent back
9588 * all commands via the scsi I/O reply frame. Make
9589 * sure all commands have been flushed.
9590 * Account for TM request, which use the last SMID.
9842 mptsas_slots_t *slots = mpt->m_active;
9843 int rval = FALSE;
9844
9845 ASSERT(mutex_owned(&mpt->m_mutex));
9846
9847 /*
9848 * Abort the command pkt on the target/lun in ap. If pkt is
9849 * NULL, abort all outstanding commands on that target/lun.
9850 * If you can abort them, return 1, else return 0.
9851 * Each packet that's aborted should be sent back to the target
9852 * driver through the callback routine, with pkt_reason set to
9853 * CMD_ABORTED.
9854 *
9855 * abort cmd pkt on HBA hardware; clean out of outstanding
9856 * command lists, etc.
9857 */
9858 if (pkt != NULL) {
9859 /* abort the specified packet */
9860 sp = PKT2CMD(pkt);
9861
9862 #ifdef MPTSAS_FAULTINJECTION
9863 /* Command already on the list. */
9864 if (((pkt->pkt_flags & FLAG_PKT_TIMEOUT) != 0) &&
9865 (sp->cmd_active_expiration != 0)) {
9866 mptsas_fminj_move_cmd_to_doneq(mpt, sp, CMD_ABORTED,
9867 STAT_ABORTED);
9868 rval = TRUE;
9869 goto done;
9870 }
9871 #endif
9872
9873 if (sp->cmd_queued) {
9874 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
9875 (void *)sp));
9876 mptsas_waitq_delete(mpt, sp);
9877 mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
9878 STAT_ABORTED);
9879 mptsas_doneq_add(mpt, sp);
9880 rval = TRUE;
9881 goto done;
9882 }
9883
9884 /*
9885 * Have mpt firmware abort this command
9886 */
9887
9888 if (slots->m_slot[sp->cmd_slot] != NULL) {
9889 rval = mptsas_ioc_task_management(mpt,
9890 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
9891 lun, NULL, 0, 0);
9892
10143
10144 /*PRINTFLIKE3*/
10145 void
10146 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
10147 {
10148 dev_info_t *dev;
10149 va_list ap;
10150
10151 if (mpt) {
10152 dev = mpt->m_dip;
10153 } else {
10154 dev = 0;
10155 }
10156
10157 mutex_enter(&mptsas_log_mutex);
10158
10159 va_start(ap, fmt);
10160 (void) vsprintf(mptsas_log_buf, fmt, ap);
10161 va_end(ap);
10162
10163 if (level == CE_CONT || level == CE_NOTE) {
10164 scsi_log(dev, mptsas_label, level, "!%s\n", mptsas_log_buf);
10165 } else {
10166 scsi_log(dev, mptsas_label, level, "!%s", mptsas_log_buf);
10167 }
10168
10169 mutex_exit(&mptsas_log_mutex);
10170 }
10171
10172 #ifdef MPTSAS_DEBUG
10173 /*
10174 * Use a circular buffer to log messages to private memory.
10175 * Increment idx atomically to minimize risk to miss lines.
10176 * It's fast and does not hold up the proceedings too much.
10177 */
10178 static const size_t mptsas_dbglog_linecnt = MPTSAS_DBGLOG_LINECNT;
10179 static const size_t mptsas_dbglog_linelen = MPTSAS_DBGLOG_LINELEN;
10180 static char mptsas_dbglog_bufs[MPTSAS_DBGLOG_LINECNT][MPTSAS_DBGLOG_LINELEN];
10181 static uint32_t mptsas_dbglog_idx = 0;
10182
10183 /*PRINTFLIKE1*/
10184 void
10185 mptsas_debug_log(char *fmt, ...)
10186 {
10214 #else
10215 scsi_log(dev, mptsas_label, CE_CONT, "!%s\n", mptsas_log_buf);
10216 #endif
10217 mutex_exit(&mptsas_log_mutex);
10218 }
10219 #endif
10220
10221 /*
10222 * timeout handling
10223 */
10224 static void
10225 mptsas_watch(void *arg)
10226 {
10227 #ifndef __lock_lint
10228 _NOTE(ARGUNUSED(arg))
10229 #endif
10230
10231 mptsas_t *mpt;
10232 uint32_t doorbell;
10233
10234 #ifdef MPTSAS_FAULTINJECTION
10235 struct mptsas_active_cmdq finj_cmds;
10236
10237 TAILQ_INIT(&finj_cmds);
10238 #endif
10239
10240 NDBG30(("mptsas_watch"));
10241
10242 rw_enter(&mptsas_global_rwlock, RW_READER);
10243 for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
10244
10245 mutex_enter(&mpt->m_mutex);
10246
10247 /* Skip device if not powered on */
10248 if (mpt->m_options & MPTSAS_OPT_PM) {
10249 if (mpt->m_power_level == PM_LEVEL_D0) {
10250 (void) pm_busy_component(mpt->m_dip, 0);
10251 mpt->m_busy = 1;
10252 } else {
10253 mutex_exit(&mpt->m_mutex);
10254 continue;
10255 }
10256 }
10257
10258 /*
10259 * Check if controller is in a FAULT state. If so, reset it.
10263 doorbell &= MPI2_DOORBELL_DATA_MASK;
10264 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
10265 "code: %04x", doorbell);
10266 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
10267 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10268 mptsas_log(mpt, CE_WARN, "Reset failed"
10269 "after fault was detected");
10270 }
10271 }
10272
10273 /*
10274 * For now, always call mptsas_watchsubr.
10275 */
10276 mptsas_watchsubr(mpt);
10277
10278 if (mpt->m_options & MPTSAS_OPT_PM) {
10279 mpt->m_busy = 0;
10280 (void) pm_idle_component(mpt->m_dip, 0);
10281 }
10282
10283 #ifdef MPTSAS_FAULTINJECTION
10284 mptsas_fminj_watchsubr(mpt, &finj_cmds);
10285 #endif
10286
10287 mutex_exit(&mpt->m_mutex);
10288 }
10289 rw_exit(&mptsas_global_rwlock);
10290
10291 mutex_enter(&mptsas_global_mutex);
10292 if (mptsas_timeouts_enabled)
10293 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
10294 mutex_exit(&mptsas_global_mutex);
10295
10296 #ifdef MPTSAS_FAULTINJECTION
10297 /* Complete all completed commands. */
10298 if (!TAILQ_EMPTY(&finj_cmds)) {
10299 mptsas_cmd_t *cmd;
10300
10301 while ((cmd = TAILQ_FIRST(&finj_cmds)) != NULL) {
10302 TAILQ_REMOVE(&finj_cmds, cmd, cmd_active_link);
10303 struct scsi_pkt *pkt = cmd->cmd_pkt;
10304
10305 if (pkt->pkt_comp != NULL) {
10306 (*pkt->pkt_comp)(pkt);
10307 }
10308 }
10309 }
10310 #endif
10311 }
10312
10313 static void
10314 mptsas_watchsubr_tgt(mptsas_t *mpt, mptsas_target_t *ptgt, hrtime_t timestamp)
10315 {
10316 mptsas_cmd_t *cmd;
10317
10318 /*
10319 * If we were draining due to a qfull condition,
10320 * go back to full throttle.
10321 */
10322 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
10323 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
10324 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
10325 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10326 mptsas_restart_hba(mpt);
10327 }
10328
10329 cmd = TAILQ_LAST(&ptgt->m_active_cmdq, mptsas_active_cmdq);
10330 if (cmd == NULL)
10857 mpi_pre_fw_25_download(mpt, pt);
10858 return;
10859 }
10860
10861 /*
10862 * User requests should come in with the Transaction
10863 * context element where the SGL will go. Putting the
10864 * SGL after that seems to work, but don't really know
10865 * why. Other drivers tend to create an extra SGL and
10866 * refer to the TCE through that.
10867 */
10868 req = (pMpi2FWDownloadRequest)pt->request;
10869 tcsge = (pMpi2FWDownloadTCSGE_t)&req->SGL;
10870 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10871 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10872 mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10873 }
10874
10875 pt->sgl_offset = offsetof(MPI2_FW_DOWNLOAD_REQUEST, SGL) +
10876 sizeof (*tcsge);
10877 if (pt->request_size != pt->sgl_offset) {
10878 NDBG15(("mpi_pre_fw_download(): Incorrect req size, "
10879 "0x%x, should be 0x%x, dataoutsz 0x%x",
10880 (int)pt->request_size, (int)pt->sgl_offset,
10881 (int)pt->dataout_size));
10882 }
10883 if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY)) {
10884 NDBG15(("mpi_pre_fw_download(): Incorrect rep size, "
10885 "0x%x, should be 0x%x", pt->data_size,
10886 (int)sizeof (MPI2_FW_DOWNLOAD_REPLY)));
10887 }
10888 }
10889
10890 /*
10891 * Prepare the pt for a SAS3 FW_DOWNLOAD request.
10892 */
10893 static void
10894 mpi_pre_fw_25_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10895 {
10896 pMpi2FWDownloadTCSGE_t tcsge;
10897 pMpi2FWDownloadRequest req2;
10898 pMpi25FWDownloadRequest req25;
10899
10900 /*
10901 * User requests should come in with the Transaction
10902 * context element where the SGL will go. The new firmware
10903 * Doesn't use TCE and has space in the main request for
10904 * this information. So move to the right place.
10905 */
10906 req2 = (pMpi2FWDownloadRequest)pt->request;
10907 req25 = (pMpi25FWDownloadRequest)pt->request;
10908 tcsge = (pMpi2FWDownloadTCSGE_t)&req2->SGL;
10909 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10910 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10911 mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10912 }
10913 req25->ImageOffset = tcsge->ImageOffset;
10914 req25->ImageSize = tcsge->ImageSize;
10915
10916 pt->sgl_offset = offsetof(MPI25_FW_DOWNLOAD_REQUEST, SGL);
10917 if (pt->request_size != pt->sgl_offset) {
10918 NDBG15(("mpi_pre_fw_25_download(): Incorrect req size, "
10919 "0x%x, should be 0x%x, dataoutsz 0x%x",
10920 pt->request_size, pt->sgl_offset,
10921 pt->dataout_size));
10922 }
10923 if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY)) {
10924 NDBG15(("mpi_pre_fw_25_download(): Incorrect rep size, "
10925 "0x%x, should be 0x%x", pt->data_size,
10926 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10927 }
10928 }
10929
10930 /*
10931 * Prepare the pt for a SAS2 FW_UPLOAD request.
10932 */
10933 static void
10934 mpi_pre_fw_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10935 {
10936 pMpi2FWUploadTCSGE_t tcsge;
10937 pMpi2FWUploadRequest_t req;
10938
10939 /*
10940 * If SAS3, call separate function.
10941 */
10942 if (mpt->m_MPI25) {
10943 mpi_pre_fw_25_upload(mpt, pt);
10944 return;
10945 }
10946
10947 /*
10948 * User requests should come in with the Transaction
10949 * context element where the SGL will go. Putting the
10950 * SGL after that seems to work, but don't really know
10951 * why. Other drivers tend to create an extra SGL and
10952 * refer to the TCE through that.
10953 */
10954 req = (pMpi2FWUploadRequest_t)pt->request;
10955 tcsge = (pMpi2FWUploadTCSGE_t)&req->SGL;
10956 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10957 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10958 mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10959 }
10960
10961 pt->sgl_offset = offsetof(MPI2_FW_UPLOAD_REQUEST, SGL) +
10962 sizeof (*tcsge);
10963 if (pt->request_size != pt->sgl_offset) {
10964 NDBG15(("mpi_pre_fw_upload(): Incorrect req size, "
10965 "0x%x, should be 0x%x, dataoutsz 0x%x",
10966 pt->request_size, pt->sgl_offset,
10967 pt->dataout_size));
10968 }
10969 if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY)) {
10970 NDBG15(("mpi_pre_fw_upload(): Incorrect rep size, "
10971 "0x%x, should be 0x%x", pt->data_size,
10972 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10973 }
10974 }
10975
10976 /*
10977 * Prepare the pt a SAS3 FW_UPLOAD request.
10978 */
10979 static void
10980 mpi_pre_fw_25_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10981 {
10982 pMpi2FWUploadTCSGE_t tcsge;
10983 pMpi2FWUploadRequest_t req2;
10984 pMpi25FWUploadRequest_t req25;
10985
10986 /*
10987 * User requests should come in with the Transaction
10988 * context element where the SGL will go. The new firmware
10989 * Doesn't use TCE and has space in the main request for
10990 * this information. So move to the right place.
10991 */
10992 req2 = (pMpi2FWUploadRequest_t)pt->request;
10993 req25 = (pMpi25FWUploadRequest_t)pt->request;
10994 tcsge = (pMpi2FWUploadTCSGE_t)&req2->SGL;
10995 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10996 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10997 mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10998 }
10999 req25->ImageOffset = tcsge->ImageOffset;
11000 req25->ImageSize = tcsge->ImageSize;
11001
11002 pt->sgl_offset = offsetof(MPI25_FW_UPLOAD_REQUEST, SGL);
11003 if (pt->request_size != pt->sgl_offset) {
11004 NDBG15(("mpi_pre_fw_25_upload(): Incorrect req size, "
11005 "0x%x, should be 0x%x, dataoutsz 0x%x",
11006 pt->request_size, pt->sgl_offset,
11007 pt->dataout_size));
11008 }
11009 if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY)) {
11010 NDBG15(("mpi_pre_fw_25_upload(): Incorrect rep size, "
11011 "0x%x, should be 0x%x", pt->data_size,
11012 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
11013 }
11014 }
11015
11016 /*
11017 * Prepare the pt for an IOC_FACTS request.
11018 */
11019 static void
11020 mpi_pre_ioc_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
11021 {
11022 #ifndef __lock_lint
11023 _NOTE(ARGUNUSED(mpt))
11024 #endif
11025 if (pt->request_size != sizeof (MPI2_IOC_FACTS_REQUEST)) {
11026 NDBG15(("mpi_pre_ioc_facts(): Incorrect req size, "
11027 "0x%x, should be 0x%x, dataoutsz 0x%x",
11028 pt->request_size,
11029 (int)sizeof (MPI2_IOC_FACTS_REQUEST),
11030 pt->dataout_size));
11031 }
11032 if (pt->data_size != sizeof (MPI2_IOC_FACTS_REPLY)) {
11033 NDBG15(("mpi_pre_ioc_facts(): Incorrect rep size, "
11034 "0x%x, should be 0x%x", pt->data_size,
11035 (int)sizeof (MPI2_IOC_FACTS_REPLY)));
11036 }
11037 pt->sgl_offset = (uint16_t)pt->request_size;
11038 }
11039
11040 /*
11041 * Prepare the pt for a PORT_FACTS request.
11042 */
11043 static void
11044 mpi_pre_port_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
11045 {
11046 #ifndef __lock_lint
11047 _NOTE(ARGUNUSED(mpt))
11048 #endif
11049 if (pt->request_size != sizeof (MPI2_PORT_FACTS_REQUEST)) {
11050 NDBG15(("mpi_pre_port_facts(): Incorrect req size, "
11051 "0x%x, should be 0x%x, dataoutsz 0x%x",
11052 pt->request_size,
11053 (int)sizeof (MPI2_PORT_FACTS_REQUEST),
11054 pt->dataout_size));
11055 }
11056 if (pt->data_size != sizeof (MPI2_PORT_FACTS_REPLY)) {
11057 NDBG15(("mpi_pre_port_facts(): Incorrect rep size, "
11058 "0x%x, should be 0x%x", pt->data_size,
11059 (int)sizeof (MPI2_PORT_FACTS_REPLY)));
11060 }
11061 pt->sgl_offset = (uint16_t)pt->request_size;
11062 }
11063
11064 /*
11065 * Prepare pt for a SATA_PASSTHROUGH request.
11066 */
11067 static void
11068 mpi_pre_sata_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
11069 {
11070 #ifndef __lock_lint
11071 _NOTE(ARGUNUSED(mpt))
11072 #endif
11073 pt->sgl_offset = offsetof(MPI2_SATA_PASSTHROUGH_REQUEST, SGL);
11074 if (pt->request_size != pt->sgl_offset) {
11075 NDBG15(("mpi_pre_sata_passthrough(): Incorrect req size, "
11076 "0x%x, should be 0x%x, dataoutsz 0x%x",
11077 pt->request_size, pt->sgl_offset,
11078 pt->dataout_size));
11079 }
11080 if (pt->data_size != sizeof (MPI2_SATA_PASSTHROUGH_REPLY)) {
11081 NDBG15(("mpi_pre_sata_passthrough(): Incorrect rep size, "
11082 "0x%x, should be 0x%x", pt->data_size,
11083 (int)sizeof (MPI2_SATA_PASSTHROUGH_REPLY)));
11084 }
11085 }
11086
11087 static void
11088 mpi_pre_smp_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
11089 {
11090 #ifndef __lock_lint
11091 _NOTE(ARGUNUSED(mpt))
11092 #endif
11093 pt->sgl_offset = offsetof(MPI2_SMP_PASSTHROUGH_REQUEST, SGL);
11094 if (pt->request_size != pt->sgl_offset) {
11095 NDBG15(("mpi_pre_smp_passthrough(): Incorrect req size, "
11096 "0x%x, should be 0x%x, dataoutsz 0x%x",
11097 pt->request_size, pt->sgl_offset,
11098 pt->dataout_size));
11099 }
11100 if (pt->data_size != sizeof (MPI2_SMP_PASSTHROUGH_REPLY)) {
11101 NDBG15(("mpi_pre_smp_passthrough(): Incorrect rep size, "
11102 "0x%x, should be 0x%x", pt->data_size,
11103 (int)sizeof (MPI2_SMP_PASSTHROUGH_REPLY)));
11104 }
11105 }
11106
11107 /*
11108 * Prepare pt for a CONFIG request.
11109 */
11110 static void
11111 mpi_pre_config(mptsas_t *mpt, mptsas_pt_request_t *pt)
11112 {
11113 #ifndef __lock_lint
11114 _NOTE(ARGUNUSED(mpt))
11115 #endif
11116 pt->sgl_offset = offsetof(MPI2_CONFIG_REQUEST, PageBufferSGE);
11117 if (pt->request_size != pt->sgl_offset) {
11118 NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
11119 "should be 0x%x, dataoutsz 0x%x", pt->request_size,
11120 pt->sgl_offset, pt->dataout_size));
11121 }
11122 if (pt->data_size != sizeof (MPI2_CONFIG_REPLY)) {
11123 NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
11124 "should be 0x%x", pt->data_size,
11125 (int)sizeof (MPI2_CONFIG_REPLY)));
11126 }
11127 pt->simple = 1;
11128 }
11129
11130 /*
11131 * Prepare pt for a SCSI_IO_REQ request.
11132 */
11133 static void
11134 mpi_pre_scsi_io_req(mptsas_t *mpt, mptsas_pt_request_t *pt)
11135 {
11136 #ifndef __lock_lint
11137 _NOTE(ARGUNUSED(mpt))
11138 #endif
11139 pt->sgl_offset = offsetof(MPI2_SCSI_IO_REQUEST, SGL);
11140 if (pt->request_size != pt->sgl_offset) {
11141 NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
11142 "should be 0x%x, dataoutsz 0x%x", pt->request_size,
11143 pt->sgl_offset,
11144 pt->dataout_size));
11145 }
11146 if (pt->data_size != sizeof (MPI2_SCSI_IO_REPLY)) {
11147 NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
11148 "should be 0x%x", pt->data_size,
11149 (int)sizeof (MPI2_SCSI_IO_REPLY)));
11150 }
11151 }
11152
11153 /*
11154 * Prepare the mptsas_cmd for a SAS_IO_UNIT_CONTROL request.
11155 */
11156 static void
11157 mpi_pre_sas_io_unit_control(mptsas_t *mpt, mptsas_pt_request_t *pt)
11158 {
11159 #ifndef __lock_lint
11160 _NOTE(ARGUNUSED(mpt))
11161 #endif
11162 pt->sgl_offset = (uint16_t)pt->request_size;
11163 }
11164
11165 /*
11166 * A set of functions to prepare an mptsas_cmd for the various
11167 * supported requests.
11168 */
11169 static struct mptsas_func {
11170 U8 Function;
11335 cmd->ioc_cmd_slot = (uint32_t)(rvalue);
11336
11337 pt.request = (uint8_t *)request_msg;
11338 pt.direction = direction;
11339 pt.simple = 0;
11340 pt.request_size = request_size;
11341 pt.data_size = data_size;
11342 pt.dataout_size = dataout_size;
11343 pt.data_cookie = data_dma_state.cookie;
11344 pt.dataout_cookie = dataout_dma_state.cookie;
11345 mptsas_prep_sgl_offset(mpt, &pt);
11346
11347 /*
11348 * Form a blank cmd/pkt to store the acknowledgement message
11349 */
11350 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb[0];
11351 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
11352 pkt->pkt_ha_private = (opaque_t)&pt;
11353 pkt->pkt_flags = FLAG_HEAD;
11354 pkt->pkt_time = timeout;
11355 pkt->pkt_start = gethrtime();
11356 cmd->cmd_pkt = pkt;
11357 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_PASSTHRU;
11358
11359 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
11360 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
11361 uint8_t com, cdb_group_id;
11362 boolean_t ret;
11363
11364 pkt->pkt_cdbp = ((pMpi2SCSIIORequest_t)request_msg)->CDB.CDB32;
11365 com = pkt->pkt_cdbp[0];
11366 cdb_group_id = CDB_GROUPID(com);
11367 switch (cdb_group_id) {
11368 case CDB_GROUPID_0: cmd->cmd_cdblen = CDB_GROUP0; break;
11369 case CDB_GROUPID_1: cmd->cmd_cdblen = CDB_GROUP1; break;
11370 case CDB_GROUPID_2: cmd->cmd_cdblen = CDB_GROUP2; break;
11371 case CDB_GROUPID_4: cmd->cmd_cdblen = CDB_GROUP4; break;
11372 case CDB_GROUPID_5: cmd->cmd_cdblen = CDB_GROUP5; break;
11373 default:
11374 NDBG27(("mptsas_do_passthru: SCSI_IO, reserved "
11375 "CDBGROUP 0x%x requested!", cdb_group_id));
12788 driverdata.RegData);
12789 break;
12790
12791 default:
12792 status = EINVAL;
12793 break;
12794 }
12795 } else {
12796 status = EFAULT;
12797 }
12798
12799 mutex_exit(&mpt->m_mutex);
12800 return (status);
12801 }
12802
12803 static int
12804 led_control(mptsas_t *mpt, intptr_t data, int mode)
12805 {
12806 int ret = 0;
12807 mptsas_led_control_t lc;
12808 mptsas_enclosure_t *mep;
12809 uint16_t slotidx;
12810
12811 if (ddi_copyin((void *)data, &lc, sizeof (lc), mode) != 0) {
12812 return (EFAULT);
12813 }
12814
12815 if ((lc.Command != MPTSAS_LEDCTL_FLAG_SET &&
12816 lc.Command != MPTSAS_LEDCTL_FLAG_GET) ||
12817 lc.Led < MPTSAS_LEDCTL_LED_MIN ||
12818 lc.Led > MPTSAS_LEDCTL_LED_MAX ||
12819 (lc.Command == MPTSAS_LEDCTL_FLAG_SET && lc.LedStatus != 0 &&
12820 lc.LedStatus != 1)) {
12821 return (EINVAL);
12822 }
12823
12824 if ((lc.Command == MPTSAS_LEDCTL_FLAG_SET && (mode & FWRITE) == 0) ||
12825 (lc.Command == MPTSAS_LEDCTL_FLAG_GET && (mode & FREAD) == 0))
12826 return (EACCES);
12827
12828 /* Locate the required enclosure */
12829 mutex_enter(&mpt->m_mutex);
12830 mep = mptsas_enc_lookup(mpt, lc.Enclosure);
12831 if (mep == NULL) {
12832 mutex_exit(&mpt->m_mutex);
12833 return (ENOENT);
12834 }
12835
12836 if (lc.Slot < mep->me_fslot) {
12837 mutex_exit(&mpt->m_mutex);
12838 return (ENOENT);
12839 }
12840
12841 /*
12842 * Slots on the enclosure are maintained in array where me_fslot is
12843 * entry zero. We normalize the requested slot.
12844 */
12845 slotidx = lc.Slot - mep->me_fslot;
12846 if (slotidx >= mep->me_nslots) {
12847 mutex_exit(&mpt->m_mutex);
12848 return (ENOENT);
12849 }
12850
12851 if (lc.Command == MPTSAS_LEDCTL_FLAG_SET) {
12852 /* Update our internal LED state. */
12853 mep->me_slotleds[slotidx] &= ~(1 << (lc.Led - 1));
12854 mep->me_slotleds[slotidx] |= lc.LedStatus << (lc.Led - 1);
12855
12856 /* Flush it to the controller. */
12857 ret = mptsas_flush_led_status(mpt, mep, slotidx);
12858 mutex_exit(&mpt->m_mutex);
12859 return (ret);
12860 }
12861
12862 /* Return our internal LED state. */
12863 lc.LedStatus = (mep->me_slotleds[slotidx] >> (lc.Led - 1)) & 1;
12864 mutex_exit(&mpt->m_mutex);
12865
12866 if (ddi_copyout(&lc, (void *)data, sizeof (lc), mode) != 0) {
12867 return (EFAULT);
12868 }
12869
12870 return (0);
12871 }
12872
12873 static int
12874 get_disk_info(mptsas_t *mpt, intptr_t data, int mode)
12875 {
12876 uint16_t i = 0;
12877 uint16_t count = 0;
12878 int ret = 0;
12879 mptsas_target_t *ptgt;
12880 mptsas_disk_info_t *di;
12881 STRUCT_DECL(mptsas_get_disk_info, gdi);
12882
12883 if ((mode & FREAD) == 0)
12985 if (mpt == NULL) {
12986 /*
12987 * Called from iport node, get the states
12988 */
12989 iport_flag = 1;
12990 dip = mptsas_get_dip_from_dev(dev, &phymask);
12991 if (dip == NULL) {
12992 return (ENXIO);
12993 }
12994 mpt = DIP2MPT(dip);
12995 }
12996 /* Make sure power level is D0 before accessing registers */
12997 mutex_enter(&mpt->m_mutex);
12998 if (mpt->m_options & MPTSAS_OPT_PM) {
12999 (void) pm_busy_component(mpt->m_dip, 0);
13000 if (mpt->m_power_level != PM_LEVEL_D0) {
13001 mutex_exit(&mpt->m_mutex);
13002 if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
13003 DDI_SUCCESS) {
13004 mptsas_log(mpt, CE_WARN,
13005 "raise power request failed");
13006 (void) pm_idle_component(mpt->m_dip, 0);
13007 return (ENXIO);
13008 }
13009 } else {
13010 mutex_exit(&mpt->m_mutex);
13011 }
13012 } else {
13013 mutex_exit(&mpt->m_mutex);
13014 }
13015
13016 if (iport_flag) {
13017 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
13018 if (status != 0) {
13019 goto out;
13020 }
13021 /*
13022 * The following code control the OK2RM LED, it doesn't affect
13023 * the ioctl return status.
13024 */
13025 if ((cmd == DEVCTL_DEVICE_ONLINE) ||
13026 (cmd == DEVCTL_DEVICE_OFFLINE)) {
13027 if (ndi_dc_allochdl((void *)data, &dcp) !=
13028 NDI_SUCCESS) {
13029 goto out;
13030 }
13031 addr = ndi_dc_getaddr(dcp);
13032 ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
13033 if (ptgt == NULL) {
13034 NDBG14(("mptsas_ioctl led control: tgt %s not "
13035 "found", addr));
13036 ndi_dc_freehdl(dcp);
13037 goto out;
13038 }
13039 ndi_dc_freehdl(dcp);
13040 }
13041 goto out;
13042 }
13043 switch (cmd) {
13044 case MPTIOCTL_GET_DISK_INFO:
13045 status = get_disk_info(mpt, data, mode);
13046 break;
13047 case MPTIOCTL_LED_CONTROL:
13048 status = led_control(mpt, data, mode);
13049 break;
13050 case MPTIOCTL_UPDATE_FLASH:
13051 if (ddi_copyin((void *)data, &flashdata,
13052 sizeof (struct mptsas_update_flash), mode)) {
13053 status = EFAULT;
13054 break;
13055 }
13056
13057 mutex_enter(&mpt->m_mutex);
13058 if (mptsas_update_flash(mpt,
13205 }
13206
13207 int
13208 mptsas_restart_ioc(mptsas_t *mpt)
13209 {
13210 int rval = DDI_SUCCESS;
13211 mptsas_target_t *ptgt = NULL;
13212
13213 ASSERT(mutex_owned(&mpt->m_mutex));
13214
13215 /*
13216 * Set a flag telling I/O path that we're processing a reset. This is
13217 * needed because after the reset is complete, the hash table still
13218 * needs to be rebuilt. If I/Os are started before the hash table is
13219 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
13220 * so that they can be retried.
13221 */
13222 mpt->m_in_reset = TRUE;
13223
13224 /*
13225 * Set all throttles to HOLD
13226 */
13227 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
13228 ptgt = refhash_next(mpt->m_targets, ptgt)) {
13229 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
13230 }
13231
13232 /*
13233 * Disable interrupts
13234 */
13235 MPTSAS_DISABLE_INTR(mpt);
13236
13237 /*
13238 * Abort all commands: outstanding commands, commands in waitq and
13239 * tx_waitq.
13240 */
13241 mptsas_flush_hba(mpt);
13242
13243 /*
13244 * Reinitialize the chip.
13313 }
13314 /*
13315 * Setup configuration space
13316 */
13317 if (mptsas_config_space_init(mpt) == FALSE) {
13318 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
13319 "failed!");
13320 goto fail;
13321 }
13322
13323 /*
13324 * IOC facts can change after a diag reset so all buffers that are
13325 * based on these numbers must be de-allocated and re-allocated. Get
13326 * new IOC facts each time chip is initialized.
13327 */
13328 if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
13329 mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
13330 goto fail;
13331 }
13332
13333 if (first_time) {
13334 if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
13335 goto fail;
13336 }
13337 /*
13338 * Allocate request message frames, reply free queue, reply
13339 * descriptor post queue, and reply message frames using
13340 * latest IOC facts.
13341 */
13342 if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
13343 mptsas_log(mpt, CE_WARN,
13344 "mptsas_alloc_request_frames failed");
13345 goto fail;
13346 }
13347 if (mptsas_alloc_sense_bufs(mpt) == DDI_FAILURE) {
13348 mptsas_log(mpt, CE_WARN,
13349 "mptsas_alloc_sense_bufs failed");
13350 goto fail;
13351 }
13352 if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
13353 mptsas_log(mpt, CE_WARN,
13354 "mptsas_alloc_free_queue failed!");
13355 goto fail;
13356 }
13357 if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
13358 mptsas_log(mpt, CE_WARN,
13359 "mptsas_alloc_post_queue failed!");
13360 goto fail;
13361 }
13362 if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
13363 mptsas_log(mpt, CE_WARN,
13364 "mptsas_alloc_reply_frames failed!");
13365 goto fail;
13366 }
13367 }
13368 mur:
13369 /*
13370 * Re-Initialize ioc to operational state
13371 */
13372 if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
13373 mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
13374 goto fail;
13375 }
13376
13377 mptsas_alloc_reply_args(mpt);
13378
13379 /*
13380 * Initialize reply post index. Reply free index is initialized after
13381 * the next loop.
13382 */
13383 mpt->m_post_index = 0;
13384
13385 /*
13386 * Initialize the Reply Free Queue with the physical addresses of our
13387 * reply frames.
13491 */
13492 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
13493 & PCI_STAT_CAP) {
13494 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
13495 PCI_CONF_CAP_PTR), 4);
13496 } else {
13497 caps_ptr = PCI_CAP_NEXT_PTR_NULL;
13498 }
13499
13500 /*
13501 * Walk capabilities if supported.
13502 */
13503 for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
13504
13505 /*
13506 * Check that we haven't exceeded the maximum number of
13507 * capabilities and that the pointer is in a valid range.
13508 */
13509 if (++cap_count > 48) {
13510 mptsas_log(mpt, CE_WARN,
13511 "too many device capabilities");
13512 break;
13513 }
13514 if (caps_ptr < 64) {
13515 mptsas_log(mpt, CE_WARN,
13516 "capabilities pointer 0x%x out of range",
13517 caps_ptr);
13518 break;
13519 }
13520
13521 /*
13522 * Get next capability and check that it is valid.
13523 * For now, we only support power management.
13524 */
13525 cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
13526 switch (cap) {
13527 case PCI_CAP_ID_PM:
13528 mptsas_log(mpt, CE_NOTE,
13529 "power management supported");
13530 mpt->m_options |= MPTSAS_OPT_PM;
13531
13532 /* Save PMCSR offset */
13533 mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
13534 break;
13535 /*
13536 * The following capabilities are valid. Any others
13537 * will cause a message to be logged.
13538 */
13539 case PCI_CAP_ID_VPD:
13540 case PCI_CAP_ID_MSI:
13541 case PCI_CAP_ID_PCIX:
13542 case PCI_CAP_ID_PCI_E:
13543 case PCI_CAP_ID_MSI_X:
13544 break;
13545 default:
13546 mptsas_log(mpt, CE_NOTE,
13547 "unrecognized capability 0x%x", cap);
13548 break;
13549 }
13550
13551 /*
13552 * Get next capabilities pointer and clear bits 0,1.
13553 */
13554 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
13555 (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
13556 }
13557 return (TRUE);
13558 }
13559
13560 static int
13561 mptsas_init_pm(mptsas_t *mpt)
13562 {
13563 char pmc_name[16];
13564 char *pmc[] = {
13565 NULL,
13566 "0=Off (PCI D3 State)",
13567 "3=On (PCI D0 State)",
13568 NULL
13569 };
13570 uint16_t pmcsr_stat;
13571
13572 if (mptsas_get_pci_cap(mpt) == FALSE) {
13573 return (DDI_FAILURE);
13574 }
13575 /*
13576 * If PCI's capability does not support PM, then don't need
13577 * to registe the pm-components
13578 */
13579 if (!(mpt->m_options & MPTSAS_OPT_PM))
13580 return (DDI_SUCCESS);
13581 /*
13582 * If power management is supported by this chip, create
13583 * pm-components property for the power management framework
13584 */
13585 (void) sprintf(pmc_name, "NAME=mpt_sas%d", mpt->m_instance);
13586 pmc[0] = pmc_name;
13587 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
13588 "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
13589 mpt->m_options &= ~MPTSAS_OPT_PM;
13590 mptsas_log(mpt, CE_WARN,
13591 "pm-component property creation failed");
13592 return (DDI_FAILURE);
13593 }
13594
13595 /*
13596 * Power on device.
13597 */
13598 (void) pm_busy_component(mpt->m_dip, 0);
13599 pmcsr_stat = pci_config_get16(mpt->m_config_handle,
13600 mpt->m_pmcsr_offset);
13601 if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
13602 mptsas_log(mpt, CE_WARN, "power up the device");
13603 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
13604 PCI_PMCSR_D0);
13605 }
13606 if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
13607 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
13608 return (DDI_FAILURE);
13609 }
13610 mpt->m_power_level = PM_LEVEL_D0;
13611 /*
13612 * Set pm idle delay.
13613 */
13614 mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
13615 mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
13616
13617 return (DDI_SUCCESS);
13618 }
13619
13620 static int
13621 mptsas_register_intrs(mptsas_t *mpt)
13622 {
13623 dev_info_t *dip;
13624 int intr_types;
13625
13626 dip = mpt->m_dip;
13627
13628 /* Get supported interrupt types */
13629 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
13630 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
13631 "failed");
13632 return (FALSE);
13633 }
13634
13635 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
13636
13637 /*
13638 * Try MSI, but fall back to FIXED
13639 */
13640 if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
13641 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
13642 NDBG0(("Using MSI interrupt type"));
13643 mpt->m_intr_type = DDI_INTR_TYPE_MSI;
13644 return (TRUE);
13645 }
13646 }
13647 if (intr_types & DDI_INTR_TYPE_FIXED) {
13648 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
13649 NDBG0(("Using FIXED interrupt type"));
13650 mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
13651 return (TRUE);
13665 }
13666
13667 /*
13668 * mptsas_add_intrs:
13669 *
13670 * Register FIXED or MSI interrupts.
13671 */
13672 static int
13673 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
13674 {
13675 dev_info_t *dip = mpt->m_dip;
13676 int avail, actual, count = 0;
13677 int i, flag, ret;
13678
13679 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
13680
13681 /* Get number of interrupts */
13682 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
13683 if ((ret != DDI_SUCCESS) || (count <= 0)) {
13684 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
13685 "ret %d count %d", ret, count);
13686
13687 return (DDI_FAILURE);
13688 }
13689
13690 /* Get number of available interrupts */
13691 ret = ddi_intr_get_navail(dip, intr_type, &avail);
13692 if ((ret != DDI_SUCCESS) || (avail == 0)) {
13693 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
13694 "ret %d avail %d", ret, avail);
13695
13696 return (DDI_FAILURE);
13697 }
13698
13699 if (avail < count) {
13700 mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
13701 "navail() returned %d", count, avail);
13702 }
13703
13704 /* Mpt only have one interrupt routine */
13705 if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
13706 count = 1;
13707 }
13708
13709 /* Allocate an array of interrupt handles */
13710 mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
13711 mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
13712
13713 flag = DDI_INTR_ALLOC_NORMAL;
13714
13715 /* call ddi_intr_alloc() */
13716 ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
13717 count, &actual, flag);
13718
13719 if ((ret != DDI_SUCCESS) || (actual == 0)) {
13720 mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d",
13721 ret);
13722 kmem_free(mpt->m_htable, mpt->m_intr_size);
13723 return (DDI_FAILURE);
13724 }
13725
13726 /* use interrupt count returned or abort? */
13727 if (actual < count) {
13728 mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d",
13729 count, actual);
13730 }
13731
13732 mpt->m_intr_cnt = actual;
13733
13734 /*
13735 * Get priority for first msi, assume remaining are all the same
13736 */
13737 if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
13738 &mpt->m_intr_pri)) != DDI_SUCCESS) {
13739 mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d", ret);
13740
13741 /* Free already allocated intr */
13742 for (i = 0; i < actual; i++) {
13743 (void) ddi_intr_free(mpt->m_htable[i]);
13744 }
13745
13746 kmem_free(mpt->m_htable, mpt->m_intr_size);
13747 return (DDI_FAILURE);
13748 }
13749
13750 /* Test for high level mutex */
13751 if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
13752 mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
13753 "Hi level interrupt not supported");
13754
13755 /* Free already allocated intr */
13756 for (i = 0; i < actual; i++) {
13757 (void) ddi_intr_free(mpt->m_htable[i]);
13758 }
13759
13760 kmem_free(mpt->m_htable, mpt->m_intr_size);
13761 return (DDI_FAILURE);
13762 }
13763
13764 /* Call ddi_intr_add_handler() */
13765 for (i = 0; i < actual; i++) {
13766 if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
13767 (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
13768 mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
13769 "failed %d", ret);
13770
13771 /* Free already allocated intr */
13772 for (i = 0; i < actual; i++) {
13773 (void) ddi_intr_free(mpt->m_htable[i]);
13774 }
13775
13776 kmem_free(mpt->m_htable, mpt->m_intr_size);
13777 return (DDI_FAILURE);
13778 }
13779 }
13780
13781 if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
13782 != DDI_SUCCESS) {
13783 mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d", ret);
13784
13785 /* Free already allocated intr */
13786 for (i = 0; i < actual; i++) {
13787 (void) ddi_intr_free(mpt->m_htable[i]);
13788 }
13789
13790 kmem_free(mpt->m_htable, mpt->m_intr_size);
13791 return (DDI_FAILURE);
13792 }
13793
13794 /*
13795 * Enable interrupts
13796 */
13797 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13798 /* Call ddi_intr_block_enable() for MSI interrupts */
13799 (void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
13800 } else {
13801 /* Call ddi_intr_enable for MSI or FIXED interrupts */
13802 for (i = 0; i < mpt->m_intr_cnt; i++) {
13803 (void) ddi_intr_enable(mpt->m_htable[i]);
14064 return (DEV_INFO_SUCCESS);
14065 }
14066
14067 uint64_t
14068 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
14069 {
14070 uint64_t sata_guid = 0, *pwwn = NULL;
14071 int target = ptgt->m_devhdl;
14072 uchar_t *inq83 = NULL;
14073 int inq83_len = 0xFF;
14074 uchar_t *dblk = NULL;
14075 int inq83_retry = 3;
14076 int rval = DDI_FAILURE;
14077
14078 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
14079
14080 inq83_retry:
14081 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
14082 inq83_len, NULL, 1);
14083 if (rval != DDI_SUCCESS) {
14084 mptsas_log(mpt, CE_WARN, "mptsas request inquiry page "
14085 "0x83 for target:%x, lun:%x failed!", target, lun);
14086 sata_guid = -1;
14087 goto out;
14088 }
14089 /* According to SAT2, the first descriptor is logic unit name */
14090 dblk = &inq83[4];
14091 if ((dblk[1] & 0x30) != 0) {
14092 mptsas_log(mpt, CE_WARN, "Descriptor is not lun associated.");
14093 goto out;
14094 }
14095 pwwn = (uint64_t *)(void *)(&dblk[4]);
14096 if ((dblk[4] & 0xf0) == 0x50) {
14097 sata_guid = BE_64(*pwwn);
14098 goto out;
14099 } else if (dblk[4] == 'A') {
14100 NDBG20(("SATA drive has no NAA format GUID."));
14101 goto out;
14102 } else {
14103 /* The data is not ready, wait and retry */
14104 inq83_retry--;
14105 if (inq83_retry <= 0) {
14106 goto out;
14107 }
14108 NDBG20(("The GUID is not ready, retry..."));
14109 delay(1 * drv_usectohz(1000000));
14110 goto inq83_retry;
14111 }
14112 out:
14180 bcopy((caddr_t)mpt->m_tran,
14181 (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
14182 tgt_private = kmem_alloc(
14183 sizeof (mptsas_tgt_private_t), KM_SLEEP);
14184 if (tgt_private == NULL) {
14185 goto out;
14186 }
14187 tgt_private->t_lun = ap->a_lun;
14188 tgt_private->t_private = ptgt;
14189 tran_clone->tran_tgt_private = tgt_private;
14190 ap->a_hba_tran = tran_clone;
14191
14192 pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
14193 data_bp, cdblen, sizeof (struct scsi_arq_status),
14194 0, PKT_CONSISTENT, NULL, NULL);
14195 if (pktp == NULL) {
14196 goto out;
14197 }
14198 bcopy(cdb, pktp->pkt_cdbp, cdblen);
14199 pktp->pkt_flags = FLAG_NOPARITY;
14200 pktp->pkt_time = mptsas_scsi_pkt_time;
14201 if (scsi_poll(pktp) < 0) {
14202 goto out;
14203 }
14204 if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
14205 goto out;
14206 }
14207 if (resid != NULL) {
14208 *resid = pktp->pkt_resid;
14209 }
14210
14211 ret = DDI_SUCCESS;
14212 out:
14213 if (pktp) {
14214 scsi_destroy_pkt(pktp);
14215 }
14216 if (tran_clone) {
14217 kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
14218 }
14219 if (tgt_private) {
14220 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
14391 (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
14392 (strncmp((char *)arg, "disk", 4) == 0)) {
14393 bconfig = FALSE;
14394 ndi_hold_devi(*childp);
14395 }
14396 } else {
14397 ret = NDI_FAILURE;
14398 break;
14399 }
14400
14401 /*
14402 * DDI group instructed us to use this flag.
14403 */
14404 mflags |= NDI_MDI_FALLBACK;
14405 break;
14406 case BUS_CONFIG_DRIVER:
14407 case BUS_CONFIG_ALL:
14408 mptsas_config_all(pdip);
14409 ret = NDI_SUCCESS;
14410 break;
14411 default:
14412 ret = NDI_FAILURE;
14413 break;
14414 }
14415
14416 if ((ret == NDI_SUCCESS) && bconfig) {
14417 ret = ndi_busop_bus_config(pdip, mflags, op,
14418 (devnm == NULL) ? arg : devnm, childp, 0);
14419 }
14420
14421 ndi_devi_exit(pdip, circ1);
14422 ndi_devi_exit(scsi_vhci_dip, circ);
14423 if (devnm != NULL)
14424 kmem_free(devnm, SCSI_MAXNAMELEN);
14425 return (ret);
14426 }
14427
14428 static int
14429 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
14430 mptsas_target_t *ptgt)
14431 {
14432 int rval = DDI_FAILURE;
14433 struct scsi_inquiry *sd_inq = NULL;
14441 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
14442 rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
14443 } else {
14444 rval = DDI_FAILURE;
14445 }
14446
14447 kmem_free(sd_inq, SUN_INQSIZE);
14448 return (rval);
14449 }
14450
14451 static int
14452 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
14453 dev_info_t **lundip)
14454 {
14455 int rval;
14456 mptsas_t *mpt = DIP2MPT(pdip);
14457 int phymask;
14458 mptsas_target_t *ptgt = NULL;
14459
14460 /*
14461 * The phymask exists if the port is active, otherwise
14462 * nothing to do.
14463 */
14464 if (ddi_prop_exists(DDI_DEV_T_ANY, pdip,
14465 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "phymask") == 0)
14466 return (DDI_FAILURE);
14467
14468 /*
14469 * Get the physical port associated to the iport
14470 */
14471 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14472 "phymask", 0);
14473
14474 ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
14475 if (ptgt == NULL) {
14476 /*
14477 * didn't match any device by searching
14478 */
14479 return (DDI_FAILURE);
14480 }
14481 /*
14482 * If the LUN already exists and the status is online,
14483 * we just return the pointer to dev_info_t directly.
14484 * For the mdi_pathinfo node, we'll handle it in
14485 * mptsas_create_virt_lun()
14486 * TODO should be also in mptsas_handle_dr
14487 */
14488
14489 *lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
14490 if (*lundip != NULL) {
14491 /*
14492 * TODO Another senario is, we hotplug the same disk
14493 * on the same slot, the devhdl changed, is this
14494 * possible?
14495 * tgt_private->t_private != ptgt
14496 */
14497 if (sasaddr != ptgt->m_addr.mta_wwn) {
14498 /*
14499 * The device has changed although the devhdl is the
14500 * same (Enclosure mapping mode, change drive on the
14501 * same slot)
14502 */
14503 return (DDI_FAILURE);
14504 }
14505 return (DDI_SUCCESS);
14506 }
14507
14508 /*
14509 * If this is a RAID, configure the volumes
14510 */
14511 if (mpt->m_num_raid_configs > 0) {
14512 /*
14513 * Configure IR volume
14514 */
14515 rval = mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
14516 return (rval);
14517 }
14518 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
14519
14520 return (rval);
14521 }
14522
14523 static int
14524 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
14525 dev_info_t **lundip)
14526 {
14527 int rval;
14528 mptsas_t *mpt = DIP2MPT(pdip);
14529 mptsas_phymask_t phymask;
14530 mptsas_target_t *ptgt = NULL;
14531
14532 /*
14533 * The phymask exists if the port is active, otherwise
14534 * nothing to do.
14535 */
14536 if (ddi_prop_exists(DDI_DEV_T_ANY, pdip,
14537 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "phymask") == 0)
14538 return (DDI_FAILURE);
14539 /*
14540 * Get the physical port associated to the iport
14541 */
14542 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14543 "phymask", 0);
14544
14545 ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
14546 if (ptgt == NULL) {
14547 /*
14548 * didn't match any device by searching
14549 */
14550 return (DDI_FAILURE);
14551 }
14552
14553 /*
14554 * If the LUN already exists and the status is online,
14555 * we just return the pointer to dev_info_t directly.
14556 * For the mdi_pathinfo node, we'll handle it in
14557 * mptsas_create_virt_lun().
14558 */
14559
14699
14700 if (ret != DDI_SUCCESS)
14701 return (ret);
14702 buffer = (char *)repluns_bp->b_un.b_addr;
14703 /*
14704 * find out the number of luns returned by the SCSI ReportLun call
14705 * and allocate buffer space
14706 */
14707 lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
14708 saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
14709 if (saved_repluns == NULL) {
14710 scsi_free_consistent_buf(repluns_bp);
14711 return (DDI_FAILURE);
14712 }
14713 for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
14714 if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
14715 &lun_num, &lun_addr_type) != DDI_SUCCESS) {
14716 continue;
14717 }
14718 saved_repluns[lun_cnt] = lun_num;
14719 if ((cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num)) !=
14720 NULL) {
14721 ret = DDI_SUCCESS;
14722 } else {
14723 ret = mptsas_probe_lun(pdip, lun_num, &cdip,
14724 ptgt);
14725 }
14726 if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
14727 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
14728 MPTSAS_DEV_GONE);
14729 }
14730 }
14731 mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
14732 kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
14733 scsi_free_consistent_buf(repluns_bp);
14734 return (DDI_SUCCESS);
14735 }
14736
14737 static int
14738 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
14739 {
14740 int rval = DDI_FAILURE;
14741 struct scsi_inquiry *sd_inq = NULL;
14742 mptsas_t *mpt = DIP2MPT(pdip);
14743 mptsas_target_t *ptgt = NULL;
14744
14745 mutex_enter(&mpt->m_mutex);
14836
14837 if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
14838 DDI_SUCCESS) {
14839 continue;
14840 }
14841
14842 if (wwid == sas_wwn) {
14843 for (i = 0; i < lun_cnt; i++) {
14844 if (repluns[i] == lun) {
14845 find = 1;
14846 break;
14847 }
14848 }
14849 } else {
14850 continue;
14851 }
14852 if (find == 0) {
14853 /*
14854 * The lun has not been there already
14855 */
14856 (void) mptsas_offline_lun(savechild, NULL);
14857 }
14858 }
14859
14860 pip = mdi_get_next_client_path(pdip, NULL);
14861 while (pip) {
14862 find = 0;
14863 savepip = pip;
14864 addr = MDI_PI(pip)->pi_addr;
14865
14866 pip = mdi_get_next_client_path(pdip, pip);
14867
14868 if (addr == NULL) {
14869 continue;
14870 }
14871
14872 if (mptsas_parse_address(addr, &sas_wwn, &phy,
14873 &lun) != DDI_SUCCESS) {
14874 continue;
14875 }
14876
14877 if (sas_wwn == wwid) {
14878 for (i = 0; i < lun_cnt; i++) {
14879 if (repluns[i] == lun) {
14880 find = 1;
14881 break;
14882 }
14883 }
14884 } else {
14885 continue;
14886 }
14887
14888 if (find == 0) {
14889 /*
14890 * The lun has not been there already
14891 */
14892 (void) mptsas_offline_lun(NULL, savepip);
14893 }
14894 }
14895 }
14896
14897 /*
14898 * If this enclosure doesn't exist in the enclosure list, add it. If it does,
14899 * update it.
14900 */
14901 static void
14902 mptsas_enclosure_update(mptsas_t *mpt, mptsas_enclosure_t *mep)
14903 {
14904 mptsas_enclosure_t *m;
14905
14906 ASSERT(MUTEX_HELD(&mpt->m_mutex));
14907 m = mptsas_enc_lookup(mpt, mep->me_enchdl);
14908 if (m != NULL) {
14909 uint8_t *ledp;
14910 m->me_flags = mep->me_flags;
14911
14912
14913 /*
14914 * If the number of slots and the first slot entry in the
14915 * enclosure has not changed, then we don't need to do anything
14916 * here. Otherwise, we need to allocate a new array for the LED
14917 * status of the slot.
14918 */
14919 if (m->me_fslot == mep->me_fslot &&
14920 m->me_nslots == mep->me_nslots)
14921 return;
14922
14923 /*
14924 * If the number of slots or the first slot has changed, it's
14925 * not clear that we're really in a place that we can continue
14926 * to honor the existing flags.
14927 */
14928 if (mep->me_nslots > 0) {
14929 ledp = kmem_zalloc(sizeof (uint8_t) * mep->me_nslots,
14930 KM_SLEEP);
14931 } else {
14932 ledp = NULL;
14933 }
14934
14935 if (m->me_slotleds != NULL) {
14936 kmem_free(m->me_slotleds, sizeof (uint8_t) *
14937 m->me_nslots);
14938 }
14939 m->me_slotleds = ledp;
14940 m->me_fslot = mep->me_fslot;
14941 m->me_nslots = mep->me_nslots;
14942 return;
14943 }
14944
14945 m = kmem_zalloc(sizeof (*m), KM_SLEEP);
14946 m->me_enchdl = mep->me_enchdl;
14947 m->me_flags = mep->me_flags;
14948 m->me_nslots = mep->me_nslots;
14949 m->me_fslot = mep->me_fslot;
14950 if (m->me_nslots > 0) {
14951 m->me_slotleds = kmem_zalloc(sizeof (uint8_t) * mep->me_nslots,
14952 KM_SLEEP);
14953 /*
14954 * It may make sense to optionally flush all of the slots and/or
14955 * read the slot status flag here to synchronize between
14956 * ourselves and the card. So far, that hasn't been needed
14957 * annecdotally when enumerating something new. If we do, we
14958 * should kick that off in a taskq potentially.
14959 */
14960 }
14961 list_insert_tail(&mpt->m_enclosures, m);
14962 }
14963
14964 static void
14965 mptsas_update_hashtab(struct mptsas *mpt)
14966 {
14967 uint32_t page_address;
14968 int rval = 0;
14969 uint16_t dev_handle;
14970 mptsas_target_t *ptgt = NULL;
14971 mptsas_smp_t smp_node;
14972
14973 /*
14974 * Get latest RAID info.
14975 */
14976 (void) mptsas_get_raid_info(mpt);
14977
14978 dev_handle = mpt->m_smp_devhdl;
14979 while (mpt->m_done_traverse_smp == 0) {
14980 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
15071 sp->m_deviceinfo = 0;
15072 }
15073 mpt->m_done_traverse_dev = 0;
15074 mpt->m_done_traverse_smp = 0;
15075 mpt->m_done_traverse_enc = 0;
15076 mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
15077 mptsas_update_hashtab(mpt);
15078 }
15079
15080 static void
15081 mptsas_config_all(dev_info_t *pdip)
15082 {
15083 dev_info_t *smpdip = NULL;
15084 mptsas_t *mpt = DIP2MPT(pdip);
15085 int phymask = 0;
15086 mptsas_phymask_t phy_mask;
15087 mptsas_target_t *ptgt = NULL;
15088 mptsas_smp_t *psmp;
15089
15090 /*
15091 * The phymask exists if the port is active, otherwise
15092 * nothing to do.
15093 */
15094 if (ddi_prop_exists(DDI_DEV_T_ANY, pdip,
15095 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "phymask") == 0)
15096 return;
15097
15098 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
15099 "phymask", 0);
15100
15101 /*
15102 * If this is a RAID, enumerate the volumes
15103 */
15104 if (mpt->m_num_raid_configs > 0) {
15105 mptsas_config_all_viport(pdip);
15106 return;
15107 }
15108
15109 mutex_enter(&mpt->m_mutex);
15110
15111 if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp ||
15112 !mpt->m_done_traverse_enc) {
15113 mptsas_update_hashtab(mpt);
15114 }
15115
15116 for (psmp = refhash_first(mpt->m_smp_targets); psmp != NULL;
15117 psmp = refhash_next(mpt->m_smp_targets, psmp)) {
15118 phy_mask = psmp->m_addr.mta_phymask;
15119 if (phy_mask == phymask) {
15120 smpdip = NULL;
15121 mutex_exit(&mpt->m_mutex);
15122 (void) mptsas_online_smp(pdip, psmp, &smpdip);
15123 mutex_enter(&mpt->m_mutex);
15124 }
15171
15172 child = ddi_get_child(pdip);
15173 while (child) {
15174 addr = ddi_get_name_addr(child);
15175 prechild = child;
15176 child = ddi_get_next_sibling(child);
15177
15178 if (addr == NULL) {
15179 continue;
15180 }
15181 if ((cp = strchr(addr, ',')) == NULL) {
15182 continue;
15183 }
15184
15185 s = (uintptr_t)cp - (uintptr_t)addr;
15186
15187 if (strncmp(addr, name, s) != 0) {
15188 continue;
15189 }
15190
15191 tmp_rval = mptsas_offline_lun(prechild, NULL);
15192 if (tmp_rval != DDI_SUCCESS) {
15193 rval = DDI_FAILURE;
15194 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15195 prechild, MPTSAS_DEV_GONE) !=
15196 DDI_PROP_SUCCESS) {
15197 mptsas_log(mpt, CE_WARN, "mptsas driver "
15198 "unable to create property for "
15199 "SAS %s (MPTSAS_DEV_GONE)", addr);
15200 }
15201 }
15202 }
15203
15204 pip = mdi_get_next_client_path(pdip, NULL);
15205 while (pip) {
15206 addr = MDI_PI(pip)->pi_addr;
15207 savepip = pip;
15208 pip = mdi_get_next_client_path(pdip, pip);
15209 if (addr == NULL) {
15210 continue;
15211 }
15212
15213 if ((cp = strchr(addr, ',')) == NULL) {
15214 continue;
15215 }
15216
15217 s = (uintptr_t)cp - (uintptr_t)addr;
15218
15219 if (strncmp(addr, name, s) != 0) {
15220 continue;
15221 }
15222
15223 (void) mptsas_offline_lun(NULL, savepip);
15224 /*
15225 * driver will not invoke mdi_pi_free, so path will not
15226 * be freed forever, return DDI_FAILURE.
15227 */
15228 rval = DDI_FAILURE;
15229 }
15230 return (rval);
15231 }
15232
15233 static int
15234 mptsas_offline_lun(dev_info_t *rdip, mdi_pathinfo_t *rpip)
15235 {
15236 int rval = DDI_FAILURE;
15237
15238 if (rpip != NULL) {
15239 if (MDI_PI_IS_OFFLINE(rpip)) {
15240 rval = DDI_SUCCESS;
15241 } else {
15242 rval = mdi_pi_offline(rpip, 0);
15243 }
15244 } else if (rdip != NULL) {
15245 rval = ndi_devi_offline(rdip,
15246 NDI_DEVFS_CLEAN | NDI_DEVI_REMOVE | NDI_DEVI_GONE);
15247 }
15248
15249 return (rval);
15250 }
15251
15252 static dev_info_t *
15253 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
15254 {
15255 dev_info_t *child = NULL;
15256 char *smp_wwn = NULL;
15257
15258 child = ddi_get_child(parent);
15259 while (child) {
15260 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
15261 DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
15262 != DDI_SUCCESS) {
15263 child = ddi_get_next_sibling(child);
15264 continue;
15265 }
15266
15267 if (strcmp(smp_wwn, str_wwn) == 0) {
15268 ddi_prop_free(smp_wwn);
15269 break;
15270 }
15271 child = ddi_get_next_sibling(child);
15272 ddi_prop_free(smp_wwn);
15273 }
15274 return (child);
15275 }
15276
15277 static int
15278 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node)
15279 {
15280 int rval = DDI_FAILURE;
15281 char wwn_str[MPTSAS_WWN_STRLEN];
15282 dev_info_t *cdip;
15283
15284 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
15285
15286 cdip = mptsas_find_smp_child(pdip, wwn_str);
15287 if (cdip == NULL)
15288 return (DDI_SUCCESS);
15289
15290 rval = ndi_devi_offline(cdip, NDI_DEVFS_CLEAN | NDI_DEVI_REMOVE);
15291
15292 return (rval);
15293 }
15294
15295 static dev_info_t *
15296 mptsas_find_child(dev_info_t *pdip, char *name)
15297 {
15298 dev_info_t *child = NULL;
15299 char *rname = NULL;
15300 int rval = DDI_FAILURE;
15301
15302 rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15303
15304 child = ddi_get_child(pdip);
15305 while (child) {
15306 rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
15307 if (rval != DDI_SUCCESS) {
15308 child = ddi_get_next_sibling(child);
15309 bzero(rname, SCSI_MAXNAMELEN);
15310 continue;
15311 }
15406 */
15407 if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
15408 sd_inq->inq_dtype == DTYPE_OPTICAL ||
15409 sd_inq->inq_dtype == DTYPE_ESI))
15410 goto create_lun;
15411
15412 /*
15413 * The LCA returns good SCSI status, but corrupt page 83 data the first
15414 * time it is queried. The solution is to keep trying to request page83
15415 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
15416 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
15417 * give up to get VPD page at this stage and fail the enumeration.
15418 */
15419
15420 inq83 = kmem_zalloc(inq83_len1, KM_SLEEP);
15421
15422 for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
15423 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
15424 inq83_len1, &inq83_len, 1);
15425 if (rval != 0) {
15426 mptsas_log(mpt, CE_WARN, "mptsas request inquiry page "
15427 "0x83 for target:%x, lun:%x failed!", target, lun);
15428 if (mptsas_physical_bind_failed_page_83 != B_FALSE)
15429 goto create_lun;
15430 goto out;
15431 }
15432 /*
15433 * create DEVID from inquiry data
15434 */
15435 if ((rval = ddi_devid_scsi_encode(
15436 DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
15437 sizeof (struct scsi_inquiry), NULL, 0, inq83,
15438 (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
15439 /*
15440 * extract GUID from DEVID
15441 */
15442 guid = ddi_devid_to_guid(devid);
15443
15444 /*
15445 * Do not enable MPXIO if the strlen(guid) is greater
15446 * than MPTSAS_MAX_GUID_LEN, this constrain would be
15447 * handled by framework later.
15448 */
15449 if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
15450 ddi_devid_free_guid(guid);
15451 guid = NULL;
15452 if (mpt->m_mpxio_enable == TRUE) {
15453 mptsas_log(mpt, CE_NOTE, "Target:%x, "
15454 "lun:%x doesn't have a valid GUID, "
15455 "multipathing for this drive is "
15456 "not enabled", target, lun);
15457 }
15458 }
15459
15460 /*
15461 * devid no longer needed
15462 */
15463 ddi_devid_free(devid);
15464 break;
15465 } else if (rval == DDI_NOT_WELL_FORMED) {
15466 /*
15467 * return value of ddi_devid_scsi_encode equal to
15468 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
15469 * to retry inquiry page 0x83 and get GUID.
15470 */
15471 NDBG20(("Not well formed devid, retry..."));
15472 delay(1 * drv_usectohz(1000000));
15473 continue;
15474 } else {
15475 mptsas_log(mpt, CE_WARN, "Encode devid failed for "
15476 "path target:%x, lun:%x", target, lun);
15477 rval = DDI_FAILURE;
15478 goto create_lun;
15479 }
15480 }
15481
15482 if (i == mptsas_inq83_retry_timeout) {
15483 mptsas_log(mpt, CE_WARN, "Repeated page83 requests timeout "
15484 "for path target:%x, lun:%x", target, lun);
15485 }
15486
15487 rval = DDI_FAILURE;
15488
15489 create_lun:
15490 if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
15491 rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
15492 ptgt, lun);
15493 }
15494 if (rval != DDI_SUCCESS) {
15495 rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
15496 ptgt, lun);
15497
15498 }
15499 out:
15500 if (guid != NULL) {
15501 /*
15502 * guid no longer needed
15503 */
15551 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
15552 } else {
15553 *pip = mptsas_find_path_phy(pdip, phy);
15554 }
15555
15556 if (*pip != NULL) {
15557 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
15558 ASSERT(*lun_dip != NULL);
15559 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
15560 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
15561 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
15562 if (strncmp(guid, old_guid, strlen(guid)) == 0) {
15563 /*
15564 * Same path back online again.
15565 */
15566 (void) ddi_prop_free(old_guid);
15567 if ((!MDI_PI_IS_ONLINE(*pip)) &&
15568 (!MDI_PI_IS_STANDBY(*pip)) &&
15569 (ptgt->m_tgt_unconfigured == 0)) {
15570 rval = mdi_pi_online(*pip, 0);
15571 } else {
15572 rval = DDI_SUCCESS;
15573 }
15574 if (rval != DDI_SUCCESS) {
15575 mptsas_log(mpt, CE_WARN, "path:target: "
15576 "%x, lun:%x online failed!", target,
15577 lun);
15578 *pip = NULL;
15579 *lun_dip = NULL;
15580 }
15581 return (rval);
15582 } else {
15583 /*
15584 * The GUID of the LUN has changed which maybe
15585 * because customer mapped another volume to the
15586 * same LUN.
15587 */
15588 mptsas_log(mpt, CE_WARN, "The GUID of the "
15589 "target:%x, lun:%x was changed, maybe "
15590 "because someone mapped another volume "
15591 "to the same LUN", target, lun);
15592 (void) ddi_prop_free(old_guid);
15593 if (!MDI_PI_IS_OFFLINE(*pip)) {
15594 rval = mdi_pi_offline(*pip, 0);
15595 if (rval != MDI_SUCCESS) {
15596 mptsas_log(mpt, CE_WARN, "path:"
15597 "target:%x, lun:%x offline "
15598 "failed!", target, lun);
15599 *pip = NULL;
15600 *lun_dip = NULL;
15601 return (DDI_FAILURE);
15602 }
15603 }
15604 if (mdi_pi_free(*pip,
15605 MDI_CLIENT_FLAGS_NO_EVENT) != MDI_SUCCESS) {
15606 mptsas_log(mpt, CE_WARN, "path:target:"
15607 "%x, lun:%x free failed!", target,
15608 lun);
15609 *pip = NULL;
15610 *lun_dip = NULL;
15611 return (DDI_FAILURE);
15612 }
15613 }
15614 } else {
15615 mptsas_log(mpt, CE_WARN, "Can't get client-guid "
15616 "property for path:target:%x, lun:%x", target, lun);
15617 *pip = NULL;
15618 *lun_dip = NULL;
15619 return (DDI_FAILURE);
15620 }
15621 }
15622 scsi_hba_nodename_compatible_get(inq, NULL,
15623 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
15624
15625 /*
15826 mptsas_log(mpt, CE_WARN, "mptsas driver"
15827 "failed to create pm-capable "
15828 "property, target %d", target);
15829 mdi_rtn = MDI_FAILURE;
15830 goto virt_create_done;
15831 }
15832 }
15833 /*
15834 * Create the phy-num property
15835 */
15836 if (mdi_prop_update_int(*pip, "phy-num",
15837 ptgt->m_phynum) != DDI_SUCCESS) {
15838 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15839 "create phy-num property for target %d lun %d",
15840 target, lun);
15841 mdi_rtn = MDI_FAILURE;
15842 goto virt_create_done;
15843 }
15844 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
15845 mdi_rtn = mdi_pi_online(*pip, 0);
15846 if (mdi_rtn == MDI_NOT_SUPPORTED) {
15847 mdi_rtn = MDI_FAILURE;
15848 }
15849 virt_create_done:
15850 if (*pip && mdi_rtn != MDI_SUCCESS) {
15851 (void) mdi_pi_free(*pip, MDI_CLIENT_FLAGS_NO_EVENT);
15852 *pip = NULL;
15853 *lun_dip = NULL;
15854 }
15855 }
15856
15857 scsi_hba_nodename_compatible_free(nodename, compatible);
15858 if (lun_addr != NULL) {
15859 kmem_free(lun_addr, SCSI_MAXNAMELEN);
15860 }
15861 if (wwn_str != NULL) {
15862 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
15863 }
15864 if (component != NULL) {
15865 kmem_free(component, MAXPATHLEN);
15866 }
15867
15868 return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15869 }
15870
15871 static int
16198 if (ndi_prop_update_int(DDI_DEV_T_NONE,
16199 *lun_dip, "phy-num", ptgt->m_phynum) !=
16200 DDI_PROP_SUCCESS) {
16201 mptsas_log(mpt, CE_WARN, "mptsas driver "
16202 "failed to create phy-num property for "
16203 "target %d", target);
16204 ndi_rtn = NDI_FAILURE;
16205 goto phys_create_done;
16206 }
16207 }
16208 phys_create_done:
16209 /*
16210 * If props were setup ok, online the lun
16211 */
16212 if (ndi_rtn == NDI_SUCCESS) {
16213 /*
16214 * Try to online the new node
16215 */
16216 ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
16217 }
16218
16219 /*
16220 * If success set rtn flag, else unwire alloc'd lun
16221 */
16222 if (ndi_rtn != NDI_SUCCESS) {
16223 NDBG12(("mptsas driver unable to online "
16224 "target %d lun %d", target, lun));
16225 ndi_prop_remove_all(*lun_dip);
16226 (void) ndi_devi_free(*lun_dip);
16227 *lun_dip = NULL;
16228 }
16229 }
16230
16231 scsi_hba_nodename_compatible_free(nodename, compatible);
16232
16233 if (wwn_str != NULL) {
16234 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
16235 }
16236 if (component != NULL) {
16237 kmem_free(component, MAXPATHLEN);
16249
16250 /* XXX An HBA driver should not be allocating an smp_device. */
16251 bzero(&smp_sd, sizeof (struct smp_device));
16252 smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
16253 bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
16254
16255 if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
16256 return (NDI_FAILURE);
16257 return (NDI_SUCCESS);
16258 }
16259
16260 static int
16261 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
16262 {
16263 mptsas_t *mpt = DIP2MPT(pdip);
16264 mptsas_smp_t *psmp = NULL;
16265 int rval;
16266 int phymask;
16267
16268 /*
16269 * The phymask exists if the port is active, otherwise
16270 * nothing to do.
16271 */
16272 if (ddi_prop_exists(DDI_DEV_T_ANY, pdip,
16273 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "phymask") == 0)
16274 return (DDI_FAILURE);
16275
16276 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
16277 "phymask", 0);
16278 /*
16279 * Find the smp node in hash table with specified sas address and
16280 * physical port
16281 */
16282 psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
16283 if (psmp == NULL) {
16284 return (DDI_FAILURE);
16285 }
16286
16287 rval = mptsas_online_smp(pdip, psmp, smp_dip);
16288
16289 return (rval);
16290 }
16291
16292 static int
16293 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
16294 dev_info_t **smp_dip)
16295 {
16888 */
16889 if (ret_data != NULL) {
16890 mptsas_smp_target_copy(data, ret_data);
16891 return (ret_data);
16892 }
16893
16894 ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
16895 bcopy(data, ret_data, sizeof (mptsas_smp_t));
16896 refhash_insert(mpt->m_smp_targets, ret_data);
16897 return (ret_data);
16898 }
16899
16900 /*
16901 * Functions for SGPIO LED support
16902 */
16903 static dev_info_t *
16904 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
16905 {
16906 dev_info_t *dip;
16907 int prop;
16908
16909 dip = e_ddi_hold_devi_by_dev(dev, 0);
16910 if (dip == NULL)
16911 return (dip);
16912
16913 /*
16914 * The phymask exists if the port is active, otherwise
16915 * nothing to do.
16916 */
16917 if (ddi_prop_exists(DDI_DEV_T_ANY, dip,
16918 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "phymask") == 0) {
16919 ddi_release_devi(dip);
16920 return ((dev_info_t *)NULL);
16921 }
16922
16923 prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
16924 "phymask", 0);
16925
16926 *phymask = (mptsas_phymask_t)prop;
16927 ddi_release_devi(dip);
16928 return (dip);
16929 }
16930 static mptsas_target_t *
16931 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
16932 {
16933 uint8_t phynum;
16934 uint64_t wwn;
16935 int lun;
16936 mptsas_target_t *ptgt = NULL;
16937
16938 if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
16939 return (NULL);
16940 }
16941 if (addr[0] == 'w') {
16942 ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
16943 } else {
16944 ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
16945 }
16946 return (ptgt);
16947 }
16948
16949 static int
16950 mptsas_flush_led_status(mptsas_t *mpt, mptsas_enclosure_t *mep, uint16_t idx)
16951 {
16952 uint32_t slotstatus = 0;
16953
16954 ASSERT3U(idx, <, mep->me_nslots);
16955
16956 /* Build an MPI2 Slot Status based on our view of the world */
16957 if (mep->me_slotleds[idx] & (1 << (MPTSAS_LEDCTL_LED_IDENT - 1)))
16958 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST;
16959 if (mep->me_slotleds[idx] & (1 << (MPTSAS_LEDCTL_LED_FAIL - 1)))
16960 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT;
16961 if (mep->me_slotleds[idx] & (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1)))
16962 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
16963
16964 /* Write it to the controller */
16965 NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
16966 slotstatus, idx + mep->me_fslot));
16967 return (mptsas_send_sep(mpt, mep, idx, &slotstatus,
16968 MPI2_SEP_REQ_ACTION_WRITE_STATUS));
16969 }
16970
16971 /*
16972 * send sep request, use enclosure/slot addressing
16973 */
16974 static int
16975 mptsas_send_sep(mptsas_t *mpt, mptsas_enclosure_t *mep, uint16_t idx,
16976 uint32_t *status, uint8_t act)
16977 {
16978 Mpi2SepRequest_t req;
16979 Mpi2SepReply_t rep;
16980 int ret;
16981 uint16_t enctype;
16982 uint16_t slot;
16983
16984 ASSERT(mutex_owned(&mpt->m_mutex));
16985
16986 /*
16987 * Look through the enclosures and make sure that this enclosure is
16988 * something that is directly attached device. If we didn't find an
16989 * enclosure for this device, don't send the ioctl.
16990 */
16991 enctype = mep->me_flags & MPI2_SAS_ENCLS0_FLAGS_MNG_MASK;
16992 if (enctype != MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES &&
16993 enctype != MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO &&
16994 enctype != MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO) {
16995 return (ENOTTY);
16996 }
16997 slot = idx + mep->me_fslot;
16998
16999 bzero(&req, sizeof (req));
17000 bzero(&rep, sizeof (rep));
17001
17002 req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
17003 req.Action = act;
17004 req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
17005 req.EnclosureHandle = LE_16(mep->me_enchdl);
17006 req.Slot = LE_16(slot);
17007 if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
17008 req.SlotStatus = LE_32(*status);
17009 }
17010 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
17011 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
17012 if (ret != 0) {
17013 mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
17014 "Processor Request message error %d", ret);
17015 return (ret);
17016 }
17017 /* do passthrough success, check the ioc status */
17018 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
17019 mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
17020 "status:%x loginfo %x", act, LE_16(rep.IOCStatus),
17021 LE_32(rep.IOCLogInfo));
17022 switch (LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) {
17023 case MPI2_IOCSTATUS_INVALID_FUNCTION:
17024 case MPI2_IOCSTATUS_INVALID_VPID:
17025 case MPI2_IOCSTATUS_INVALID_FIELD:
17026 case MPI2_IOCSTATUS_INVALID_STATE:
|