198 static int mrsas_parse_devname(char *, int *, int *);
199 static int mrsas_config_all_devices(struct mrsas_instance *);
200 static int mrsas_config_ld(struct mrsas_instance *, uint16_t,
201 uint8_t, dev_info_t **);
202 static int mrsas_name_node(dev_info_t *, char *, int);
203 static void mrsas_issue_evt_taskq(struct mrsas_eventinfo *);
204 static void free_additional_dma_buffer(struct mrsas_instance *);
205 static void io_timeout_checker(void *);
206 static void mrsas_fm_init(struct mrsas_instance *);
207 static void mrsas_fm_fini(struct mrsas_instance *);
208
209 static struct mrsas_function_template mrsas_function_template_ppc = {
210 .read_fw_status_reg = read_fw_status_reg_ppc,
211 .issue_cmd = issue_cmd_ppc,
212 .issue_cmd_in_sync_mode = issue_cmd_in_sync_mode_ppc,
213 .issue_cmd_in_poll_mode = issue_cmd_in_poll_mode_ppc,
214 .enable_intr = enable_intr_ppc,
215 .disable_intr = disable_intr_ppc,
216 .intr_ack = intr_ack_ppc,
217 .init_adapter = mrsas_init_adapter_ppc
218 /* .reset_adapter = mrsas_reset_adapter_ppc */
219 };
220
221
222 static struct mrsas_function_template mrsas_function_template_fusion = {
223 .read_fw_status_reg = tbolt_read_fw_status_reg,
224 .issue_cmd = tbolt_issue_cmd,
225 .issue_cmd_in_sync_mode = tbolt_issue_cmd_in_sync_mode,
226 .issue_cmd_in_poll_mode = tbolt_issue_cmd_in_poll_mode,
227 .enable_intr = tbolt_enable_intr,
228 .disable_intr = tbolt_disable_intr,
229 .intr_ack = tbolt_intr_ack,
230 .init_adapter = mrsas_init_adapter_tbolt
231 /* .reset_adapter = mrsas_reset_adapter_tbolt */
232 };
233
234
235 ddi_dma_attr_t mrsas_generic_dma_attr = {
236 DMA_ATTR_V0, /* dma_attr_version */
237 0, /* low DMA address range */
238 0xFFFFFFFFU, /* high DMA address range */
286 DEVO_REV, /* rev, */
287 0, /* refcnt */
288 mrsas_getinfo, /* getinfo */
289 nulldev, /* identify */
290 nulldev, /* probe */
291 mrsas_attach, /* attach */
292 mrsas_detach, /* detach */
293 #ifdef __sparc
294 mrsas_reset, /* reset */
295 #else /* __sparc */
296 nodev,
297 #endif /* __sparc */
298 &mrsas_cb_ops, /* char/block ops */
299 NULL, /* bus ops */
300 NULL, /* power */
301 #ifdef __sparc
302 ddi_quiesce_not_needed
303 #else /* __sparc */
304 mrsas_quiesce /* quiesce */
305 #endif /* __sparc */
306
307 };
308
309 static struct modldrv modldrv = {
310 &mod_driverops, /* module type - driver */
311 MRSAS_VERSION,
312 &mrsas_ops, /* driver ops */
313 };
314
315 static struct modlinkage modlinkage = {
316 MODREV_1, /* ml_rev - must be MODREV_1 */
317 &modldrv, /* ml_linkage */
318 NULL /* end of driver linkage */
319 };
320
321 static struct ddi_device_acc_attr endian_attr = {
322 DDI_DEVICE_ATTR_V1,
323 DDI_STRUCTURE_LE_ACC,
324 DDI_STRICTORDER_ACC,
325 DDI_DEFAULT_ACC
326 };
327
328
329 unsigned int enable_fp = 1;
330
331
332 /*
333 * ************************************************************************** *
334 * *
335 * common entry points - for loadable kernel modules *
336 * *
337 * ************************************************************************** *
338 */
339
340 /*
341 * _init - initialize a loadable module
342 * @void
343 *
344 * The driver should perform any one-time resource allocation or data
345 * initialization during driver loading in _init(). For example, the driver
346 * should initialize any mutexes global to the driver in this routine.
347 * The driver should not, however, use _init() to allocate or initialize
348 * anything that has to do with a particular instance of the device.
496 case DDI_ATTACH:
497 /* allocate the soft state for the instance */
498 if (ddi_soft_state_zalloc(mrsas_state, instance_no)
499 != DDI_SUCCESS) {
500 cmn_err(CE_WARN,
501 "mr_sas%d: Failed to allocate soft state",
502 instance_no);
503 return (DDI_FAILURE);
504 }
505
506 instance = (struct mrsas_instance *)ddi_get_soft_state
507 (mrsas_state, instance_no);
508
509 if (instance == NULL) {
510 cmn_err(CE_WARN,
511 "mr_sas%d: Bad soft state", instance_no);
512 ddi_soft_state_free(mrsas_state, instance_no);
513 return (DDI_FAILURE);
514 }
515
516 bzero(instance, sizeof (struct mrsas_instance));
517
518 instance->unroll.softs = 1;
519
520 /* Setup the PCI configuration space handles */
521 if (pci_config_setup(dip, &instance->pci_handle) !=
522 DDI_SUCCESS) {
523 cmn_err(CE_WARN,
524 "mr_sas%d: pci config setup failed ",
525 instance_no);
526
527 ddi_soft_state_free(mrsas_state, instance_no);
528 return (DDI_FAILURE);
529 }
530 if (instance->pci_handle == NULL) {
531 cmn_err(CE_WARN,
532 "mr_sas%d: pci config setup failed ",
533 instance_no);
534 ddi_soft_state_free(mrsas_state, instance_no);
535 return (DDI_FAILURE);
536 }
537
538
539
540 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
541 cmn_err(CE_WARN,
542 "mr_sas: failed to get registers.");
543
544 pci_config_teardown(&instance->pci_handle);
545 ddi_soft_state_free(mrsas_state, instance_no);
546 return (DDI_FAILURE);
547 }
548
549 vendor_id = pci_config_get16(instance->pci_handle,
550 PCI_CONF_VENID);
551 device_id = pci_config_get16(instance->pci_handle,
552 PCI_CONF_DEVID);
553
554 subsysvid = pci_config_get16(instance->pci_handle,
555 PCI_CONF_SUBVENID);
556 subsysid = pci_config_get16(instance->pci_handle,
557 PCI_CONF_SUBSYSID);
558
559 pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
637 /* Setup register map */
638 if ((ddi_dev_regsize(instance->dip,
639 REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) ||
640 reglength < MINIMUM_MFI_MEM_SZ) {
641 goto fail_attach;
642 }
643 if (reglength > DEFAULT_MFI_MEM_SZ) {
644 reglength = DEFAULT_MFI_MEM_SZ;
645 con_log(CL_DLEVEL1, (CE_NOTE,
646 "mr_sas: register length to map is 0x%lx bytes",
647 reglength));
648 }
649 if (ddi_regs_map_setup(instance->dip,
650 REGISTER_SET_IO_2108, &instance->regmap, 0,
651 reglength, &endian_attr, &instance->regmap_handle)
652 != DDI_SUCCESS) {
653 cmn_err(CE_WARN,
654 "mr_sas: couldn't map control registers");
655 goto fail_attach;
656 }
657 if (instance->regmap_handle == NULL) {
658 cmn_err(CE_WARN,
659 "mr_sas: couldn't map control registers");
660 goto fail_attach;
661 }
662
663 instance->unroll.regs = 1;
664
665 /*
666 * Disable Interrupt Now.
667 * Setup Software interrupt
668 */
669 instance->func_ptr->disable_intr(instance);
670
671 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
672 "mrsas-enable-msi", &data) == DDI_SUCCESS) {
673 if (strncmp(data, "no", 3) == 0) {
674 msi_enable = 0;
675 con_log(CL_ANN1, (CE_WARN,
676 "msi_enable = %d disabled", msi_enable));
677 }
678 ddi_prop_free(data);
679 }
680
681 con_log(CL_DLEVEL1, (CE_NOTE, "msi_enable = %d", msi_enable));
1228 if (instance->unroll.syncCmd == 1) {
1229 if (instance->tbolt) {
1230 if (abort_syncmap_cmd(instance,
1231 instance->map_update_cmd)) {
1232 cmn_err(CE_WARN, "mrsas_detach: "
1233 "failed to abort previous syncmap command");
1234 }
1235
1236 instance->unroll.syncCmd = 0;
1237 con_log(CL_ANN1, (CE_CONT, "sync cmd aborted, done."));
1238 }
1239 }
1240
1241 if (instance->unroll.aenPend == 1) {
1242 if (abort_aen_cmd(instance, instance->aen_cmd))
1243 cmn_err(CE_WARN, "mrsas_detach: "
1244 "failed to abort prevous AEN command");
1245
1246 instance->unroll.aenPend = 0;
1247 con_log(CL_ANN1, (CE_CONT, "aen cmd aborted, done."));
1248 /* This means the controller is fully initialzed and running */
1249 /* Shutdown should be a last command to controller. */
1250 /* shutdown_controller(); */
1251 }
1252
1253
1254 if (instance->unroll.timer == 1) {
1255 if (instance->timeout_id != (timeout_id_t)-1) {
1256 (void) untimeout(instance->timeout_id);
1257 instance->timeout_id = (timeout_id_t)-1;
1258
1259 instance->unroll.timer = 0;
1260 }
1261 }
1262
1263 instance->func_ptr->disable_intr(instance);
1264
1265
1266 if (instance->unroll.mutexs == 1) {
1267 mutex_destroy(&instance->cmd_pool_mtx);
1268 mutex_destroy(&instance->app_cmd_pool_mtx);
1457
1458 struct mrsas_instance *instance;
1459 struct mrsas_ioctl *ioctl;
1460 struct mrsas_aen aen;
1461 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1462
1463 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev)));
1464
1465 if (instance == NULL) {
1466 /* invalid minor number */
1467 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found."));
1468 return (ENXIO);
1469 }
1470
1471 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl),
1472 KM_SLEEP);
1473 if (ioctl == NULL) {
1474 /* Failed to allocate memory for ioctl */
1475 con_log(CL_ANN, (CE_WARN, "mr_sas_ioctl: "
1476 "failed to allocate memory for ioctl"));
1477 return (ENXIO);
1478 }
1479
1480 switch ((uint_t)cmd) {
1481 case MRSAS_IOCTL_FIRMWARE:
1482 if (ddi_copyin((void *)arg, ioctl,
1483 sizeof (struct mrsas_ioctl), mode)) {
1484 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: "
1485 "ERROR IOCTL copyin"));
1486 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1487 return (EFAULT);
1488 }
1489
1490 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) {
1491 rval = handle_drv_ioctl(instance, ioctl, mode);
1492 } else {
1493 rval = handle_mfi_ioctl(instance, ioctl, mode);
1494 }
1495
1496 if (ddi_copyout((void *)ioctl, (void *)arg,
1497 (sizeof (struct mrsas_ioctl) - 1), mode)) {
2531 * *
2532 * ************************************************************************** *
2533 */
2534 /*
2535 * get_mfi_pkt : Get a command from the free pool
2536 * After successful allocation, the caller of this routine
2537 * must clear the frame buffer (memset to zero) before
2538 * using the packet further.
2539 *
2540 * ***** Note *****
2541 * After clearing the frame buffer the context id of the
2542 * frame buffer SHOULD be restored back.
2543 */
2544 static struct mrsas_cmd *
2545 get_mfi_pkt(struct mrsas_instance *instance)
2546 {
2547 mlist_t *head = &instance->cmd_pool_list;
2548 struct mrsas_cmd *cmd = NULL;
2549
2550 mutex_enter(&instance->cmd_pool_mtx);
2551 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2552
2553 if (!mlist_empty(head)) {
2554 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2555 mlist_del_init(head->next);
2556 }
2557 if (cmd != NULL) {
2558 cmd->pkt = NULL;
2559 cmd->retry_count_for_ocr = 0;
2560 cmd->drv_pkt_time = 0;
2561
2562 }
2563 mutex_exit(&instance->cmd_pool_mtx);
2564
2565 return (cmd);
2566 }
2567
2568 static struct mrsas_cmd *
2569 get_mfi_app_pkt(struct mrsas_instance *instance)
2570 {
2571 mlist_t *head = &instance->app_cmd_pool_list;
2572 struct mrsas_cmd *cmd = NULL;
2573
2574 mutex_enter(&instance->app_cmd_pool_mtx);
2575 ASSERT(mutex_owned(&instance->app_cmd_pool_mtx));
2576
2577 if (!mlist_empty(head)) {
2578 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2579 mlist_del_init(head->next);
2580 }
2581 if (cmd != NULL) {
2582 cmd->pkt = NULL;
2583 cmd->retry_count_for_ocr = 0;
2584 cmd->drv_pkt_time = 0;
2585 }
2586
2587 mutex_exit(&instance->app_cmd_pool_mtx);
2588
2589 return (cmd);
2590 }
2591 /*
2592 * return_mfi_pkt : Return a cmd to free command pool
2593 */
2594 static void
2595 return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2596 {
2597 mutex_enter(&instance->cmd_pool_mtx);
2598 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2599 /* use mlist_add_tail for debug assistance */
2600 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2601
2602 mutex_exit(&instance->cmd_pool_mtx);
2603 }
2604
2605 static void
2606 return_mfi_app_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2607 {
2608 mutex_enter(&instance->app_cmd_pool_mtx);
2609 ASSERT(mutex_owned(&instance->app_cmd_pool_mtx));
2610
2611 mlist_add(&cmd->list, &instance->app_cmd_pool_list);
2612
2613 mutex_exit(&instance->app_cmd_pool_mtx);
2614 }
2615 void
2616 push_pending_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2617 {
2618 struct scsi_pkt *pkt;
2619 struct mrsas_header *hdr;
2620 con_log(CL_DLEVEL2, (CE_NOTE, "push_pending_pkt(): Called\n"));
2621 mutex_enter(&instance->cmd_pend_mtx);
2622 ASSERT(mutex_owned(&instance->cmd_pend_mtx));
2623 mlist_del_init(&cmd->list);
2624 mlist_add_tail(&cmd->list, &instance->cmd_pend_list);
2625 if (cmd->sync_cmd == MRSAS_TRUE) {
2626 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2627 if (hdr) {
2628 con_log(CL_ANN1, (CE_CONT,
2629 "push_pending_mfi_pkt: "
2630 "cmd %p index %x "
2631 "time %llx",
2632 (void *)cmd, cmd->index,
2633 gethrtime()));
2634 /* Wait for specified interval */
2635 cmd->drv_pkt_time = ddi_get16(
2636 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2637 if (cmd->drv_pkt_time < debug_timeout_g)
2638 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2639 con_log(CL_ANN1, (CE_CONT,
2640 "push_pending_pkt(): "
2641 "Called IO Timeout Value %x\n",
2642 cmd->drv_pkt_time));
2661 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2662 }
2663 }
2664
2665 mutex_exit(&instance->cmd_pend_mtx);
2666
2667 }
2668
2669 int
2670 mrsas_print_pending_cmds(struct mrsas_instance *instance)
2671 {
2672 mlist_t *head = &instance->cmd_pend_list;
2673 mlist_t *tmp = head;
2674 struct mrsas_cmd *cmd = NULL;
2675 struct mrsas_header *hdr;
2676 unsigned int flag = 1;
2677 struct scsi_pkt *pkt;
2678 int saved_level;
2679 int cmd_count = 0;
2680
2681
2682 saved_level = debug_level_g;
2683 debug_level_g = CL_ANN1;
2684
2685 cmn_err(CE_NOTE, "mrsas_print_pending_cmds(): Called\n");
2686
2687 while (flag) {
2688 mutex_enter(&instance->cmd_pend_mtx);
2689 tmp = tmp->next;
2690 if (tmp == head) {
2691 mutex_exit(&instance->cmd_pend_mtx);
2692 flag = 0;
2693 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds():"
2694 " NO MORE CMDS PENDING....\n"));
2695 break;
2696 } else {
2697 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2698 mutex_exit(&instance->cmd_pend_mtx);
2699 if (cmd) {
2700 if (cmd->sync_cmd == MRSAS_TRUE) {
2701 hdr = (struct mrsas_header *)
3667 cmd = get_mfi_pkt(instance);
3668
3669 if (mrsas_build_init_cmd(instance, &cmd) != DDI_SUCCESS) {
3670 con_log(CL_ANN,
3671 (CE_NOTE, "Error, failed to build INIT command"));
3672
3673 goto fail_undo_alloc_mfi_space;
3674 }
3675
3676 /*
3677 * Disable interrupt before sending init frame ( see linux driver code)
3678 * send INIT MFI frame in polled mode
3679 */
3680 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3681 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
3682 goto fail_fw_init;
3683 }
3684
3685 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
3686 goto fail_fw_init;
3687 /* return_mfi_pkt(instance, cmd); */ /* XXX KEBE ASKS, inherit? */
3688
3689 if (ctio_enable &&
3690 (instance->func_ptr->read_fw_status_reg(instance) & 0x04000000)) {
3691 con_log(CL_ANN, (CE_NOTE, "mr_sas: IEEE SGL's supported"));
3692 instance->flag_ieee = 1;
3693 } else {
3694 instance->flag_ieee = 0;
3695 }
3696
3697 instance->unroll.alloc_space_mfi = 1;
3698 instance->unroll.verBuff = 1;
3699
3700 return (DDI_SUCCESS);
3701
3702
3703 fail_fw_init:
3704 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
3705
3706 fail_undo_alloc_mfi_space:
3707 return_mfi_pkt(instance, cmd);
4867 con_log(CL_ANN, (CE_WARN,
4868 "Failed ddi_dma_alloc_handle: "
4869 "unknown status %d", i));
4870 break;
4871 }
4872
4873 return (-1);
4874 }
4875
4876 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr,
4877 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
4878 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
4879 alen < obj->size) {
4880
4881 ddi_dma_free_handle(&obj->dma_handle);
4882
4883 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
4884
4885 return (-1);
4886 }
4887 if (obj->dma_handle == NULL) {
4888 /* XXX KEBE ASKS --> fm_service_impact()? */
4889 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
4890 return (-1);
4891 }
4892
4893 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
4894 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
4895 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
4896
4897 ddi_dma_mem_free(&obj->acc_handle);
4898 ddi_dma_free_handle(&obj->dma_handle);
4899
4900 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
4901
4902 return (-1);
4903 }
4904 if (obj->acc_handle == NULL) {
4905 /* XXX KEBE ASKS --> fm_service_impact()? */
4906 ddi_dma_mem_free(&obj->acc_handle);
4907 ddi_dma_free_handle(&obj->dma_handle);
4908
4909 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
4910 return (-1);
4911 }
4912
4913 if (mrsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) {
4914 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4915 return (-1);
4916 }
4917
4918 if (mrsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) {
4919 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4920 return (-1);
4921 }
4922
4923 return (cookie_cnt);
4924 }
4925
4926 /*
4927 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t)
4928 *
4929 * De-allocate the memory and other resources for an dma object, which must
4930 * have been alloated by a previous call to mrsas_alloc_dma_obj()
4931 */
4932 /* ARGSUSED */
6830 if (pkt) {
6831 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6832 "ISSUED CMD TO FW : called : cmd:"
6833 ": %p instance : %p pkt : %p pkt_time : %x\n",
6834 gethrtime(), (void *)cmd, (void *)instance,
6835 (void *)pkt, cmd->drv_pkt_time));
6836 if (instance->adapterresetinprogress) {
6837 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6838 con_log(CL_ANN1, (CE_NOTE, "Reset the scsi_pkt timer"));
6839 } else {
6840 push_pending_mfi_pkt(instance, cmd);
6841 }
6842
6843 } else {
6844 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6845 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
6846 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
6847 }
6848
6849 mutex_enter(&instance->reg_write_mtx);
6850 ASSERT(mutex_owned(&instance->reg_write_mtx));
6851 /* Issue the command to the FW */
6852 WR_IB_QPORT((cmd->frame_phys_addr) |
6853 (((cmd->frame_count - 1) << 1) | 1), instance);
6854 mutex_exit(&instance->reg_write_mtx);
6855
6856 }
6857
6858 /*
6859 * issue_cmd_in_sync_mode
6860 */
6861 static int
6862 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance,
6863 struct mrsas_cmd *cmd)
6864 {
6865 int i;
6866 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
6867 struct mrsas_header *hdr = &cmd->frame->hdr;
6868
6869 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called"));
6870
6871 if (instance->adapterresetinprogress) {
6872 cmd->drv_pkt_time = ddi_get16(
6873 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
6874 if (cmd->drv_pkt_time < debug_timeout_g)
6875 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6876
6877 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: "
6878 "issue and return in reset case\n"));
6879 WR_IB_QPORT((cmd->frame_phys_addr) |
6880 (((cmd->frame_count - 1) << 1) | 1), instance);
6881
6882 return (DDI_SUCCESS);
6883 } else {
6884 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: pushing the pkt\n"));
6885 push_pending_mfi_pkt(instance, cmd);
6886 }
6887
6888 cmd->cmd_status = ENODATA;
6889
6890 mutex_enter(&instance->reg_write_mtx);
6891 ASSERT(mutex_owned(&instance->reg_write_mtx));
6892 /* Issue the command to the FW */
6893 WR_IB_QPORT((cmd->frame_phys_addr) |
6894 (((cmd->frame_count - 1) << 1) | 1), instance);
6895 mutex_exit(&instance->reg_write_mtx);
6896
6897 mutex_enter(&instance->int_cmd_mtx);
6898 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
6899 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
6900 }
6901 mutex_exit(&instance->int_cmd_mtx);
6902
6903 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done"));
6904
6905 if (i < (msecs -1)) {
6906 return (DDI_SUCCESS);
6907 } else {
6908 return (DDI_FAILURE);
6909 }
6910 }
6911
|
198 static int mrsas_parse_devname(char *, int *, int *);
199 static int mrsas_config_all_devices(struct mrsas_instance *);
200 static int mrsas_config_ld(struct mrsas_instance *, uint16_t,
201 uint8_t, dev_info_t **);
202 static int mrsas_name_node(dev_info_t *, char *, int);
203 static void mrsas_issue_evt_taskq(struct mrsas_eventinfo *);
204 static void free_additional_dma_buffer(struct mrsas_instance *);
205 static void io_timeout_checker(void *);
206 static void mrsas_fm_init(struct mrsas_instance *);
207 static void mrsas_fm_fini(struct mrsas_instance *);
208
209 static struct mrsas_function_template mrsas_function_template_ppc = {
210 .read_fw_status_reg = read_fw_status_reg_ppc,
211 .issue_cmd = issue_cmd_ppc,
212 .issue_cmd_in_sync_mode = issue_cmd_in_sync_mode_ppc,
213 .issue_cmd_in_poll_mode = issue_cmd_in_poll_mode_ppc,
214 .enable_intr = enable_intr_ppc,
215 .disable_intr = disable_intr_ppc,
216 .intr_ack = intr_ack_ppc,
217 .init_adapter = mrsas_init_adapter_ppc
218 };
219
220
221 static struct mrsas_function_template mrsas_function_template_fusion = {
222 .read_fw_status_reg = tbolt_read_fw_status_reg,
223 .issue_cmd = tbolt_issue_cmd,
224 .issue_cmd_in_sync_mode = tbolt_issue_cmd_in_sync_mode,
225 .issue_cmd_in_poll_mode = tbolt_issue_cmd_in_poll_mode,
226 .enable_intr = tbolt_enable_intr,
227 .disable_intr = tbolt_disable_intr,
228 .intr_ack = tbolt_intr_ack,
229 .init_adapter = mrsas_init_adapter_tbolt
230 /* .reset_adapter = mrsas_reset_adapter_tbolt */
231 };
232
233
234 ddi_dma_attr_t mrsas_generic_dma_attr = {
235 DMA_ATTR_V0, /* dma_attr_version */
236 0, /* low DMA address range */
237 0xFFFFFFFFU, /* high DMA address range */
285 DEVO_REV, /* rev, */
286 0, /* refcnt */
287 mrsas_getinfo, /* getinfo */
288 nulldev, /* identify */
289 nulldev, /* probe */
290 mrsas_attach, /* attach */
291 mrsas_detach, /* detach */
292 #ifdef __sparc
293 mrsas_reset, /* reset */
294 #else /* __sparc */
295 nodev,
296 #endif /* __sparc */
297 &mrsas_cb_ops, /* char/block ops */
298 NULL, /* bus ops */
299 NULL, /* power */
300 #ifdef __sparc
301 ddi_quiesce_not_needed
302 #else /* __sparc */
303 mrsas_quiesce /* quiesce */
304 #endif /* __sparc */
305 };
306
307 static struct modldrv modldrv = {
308 &mod_driverops, /* module type - driver */
309 MRSAS_VERSION,
310 &mrsas_ops, /* driver ops */
311 };
312
313 static struct modlinkage modlinkage = {
314 MODREV_1, /* ml_rev - must be MODREV_1 */
315 &modldrv, /* ml_linkage */
316 NULL /* end of driver linkage */
317 };
318
319 static struct ddi_device_acc_attr endian_attr = {
320 DDI_DEVICE_ATTR_V1,
321 DDI_STRUCTURE_LE_ACC,
322 DDI_STRICTORDER_ACC,
323 DDI_DEFAULT_ACC
324 };
325
326 /* Use the LSI Fast Path for the 2208 (tbolt) commands. */
327 unsigned int enable_fp = 1;
328
329
330 /*
331 * ************************************************************************** *
332 * *
333 * common entry points - for loadable kernel modules *
334 * *
335 * ************************************************************************** *
336 */
337
338 /*
339 * _init - initialize a loadable module
340 * @void
341 *
342 * The driver should perform any one-time resource allocation or data
343 * initialization during driver loading in _init(). For example, the driver
344 * should initialize any mutexes global to the driver in this routine.
345 * The driver should not, however, use _init() to allocate or initialize
346 * anything that has to do with a particular instance of the device.
494 case DDI_ATTACH:
495 /* allocate the soft state for the instance */
496 if (ddi_soft_state_zalloc(mrsas_state, instance_no)
497 != DDI_SUCCESS) {
498 cmn_err(CE_WARN,
499 "mr_sas%d: Failed to allocate soft state",
500 instance_no);
501 return (DDI_FAILURE);
502 }
503
504 instance = (struct mrsas_instance *)ddi_get_soft_state
505 (mrsas_state, instance_no);
506
507 if (instance == NULL) {
508 cmn_err(CE_WARN,
509 "mr_sas%d: Bad soft state", instance_no);
510 ddi_soft_state_free(mrsas_state, instance_no);
511 return (DDI_FAILURE);
512 }
513
514 instance->unroll.softs = 1;
515
516 /* Setup the PCI configuration space handles */
517 if (pci_config_setup(dip, &instance->pci_handle) !=
518 DDI_SUCCESS) {
519 cmn_err(CE_WARN,
520 "mr_sas%d: pci config setup failed ",
521 instance_no);
522
523 ddi_soft_state_free(mrsas_state, instance_no);
524 return (DDI_FAILURE);
525 }
526
527 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
528 cmn_err(CE_WARN,
529 "mr_sas: failed to get registers.");
530
531 pci_config_teardown(&instance->pci_handle);
532 ddi_soft_state_free(mrsas_state, instance_no);
533 return (DDI_FAILURE);
534 }
535
536 vendor_id = pci_config_get16(instance->pci_handle,
537 PCI_CONF_VENID);
538 device_id = pci_config_get16(instance->pci_handle,
539 PCI_CONF_DEVID);
540
541 subsysvid = pci_config_get16(instance->pci_handle,
542 PCI_CONF_SUBVENID);
543 subsysid = pci_config_get16(instance->pci_handle,
544 PCI_CONF_SUBSYSID);
545
546 pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
624 /* Setup register map */
625 if ((ddi_dev_regsize(instance->dip,
626 REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) ||
627 reglength < MINIMUM_MFI_MEM_SZ) {
628 goto fail_attach;
629 }
630 if (reglength > DEFAULT_MFI_MEM_SZ) {
631 reglength = DEFAULT_MFI_MEM_SZ;
632 con_log(CL_DLEVEL1, (CE_NOTE,
633 "mr_sas: register length to map is 0x%lx bytes",
634 reglength));
635 }
636 if (ddi_regs_map_setup(instance->dip,
637 REGISTER_SET_IO_2108, &instance->regmap, 0,
638 reglength, &endian_attr, &instance->regmap_handle)
639 != DDI_SUCCESS) {
640 cmn_err(CE_WARN,
641 "mr_sas: couldn't map control registers");
642 goto fail_attach;
643 }
644
645 instance->unroll.regs = 1;
646
647 /*
648 * Disable Interrupt Now.
649 * Setup Software interrupt
650 */
651 instance->func_ptr->disable_intr(instance);
652
653 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
654 "mrsas-enable-msi", &data) == DDI_SUCCESS) {
655 if (strncmp(data, "no", 3) == 0) {
656 msi_enable = 0;
657 con_log(CL_ANN1, (CE_WARN,
658 "msi_enable = %d disabled", msi_enable));
659 }
660 ddi_prop_free(data);
661 }
662
663 con_log(CL_DLEVEL1, (CE_NOTE, "msi_enable = %d", msi_enable));
1210 if (instance->unroll.syncCmd == 1) {
1211 if (instance->tbolt) {
1212 if (abort_syncmap_cmd(instance,
1213 instance->map_update_cmd)) {
1214 cmn_err(CE_WARN, "mrsas_detach: "
1215 "failed to abort previous syncmap command");
1216 }
1217
1218 instance->unroll.syncCmd = 0;
1219 con_log(CL_ANN1, (CE_CONT, "sync cmd aborted, done."));
1220 }
1221 }
1222
1223 if (instance->unroll.aenPend == 1) {
1224 if (abort_aen_cmd(instance, instance->aen_cmd))
1225 cmn_err(CE_WARN, "mrsas_detach: "
1226 "failed to abort prevous AEN command");
1227
1228 instance->unroll.aenPend = 0;
1229 con_log(CL_ANN1, (CE_CONT, "aen cmd aborted, done."));
1230 /* This means the controller is fully initialized and running */
1231 /* Shutdown should be a last command to controller. */
1232 /* shutdown_controller(); */
1233 }
1234
1235
1236 if (instance->unroll.timer == 1) {
1237 if (instance->timeout_id != (timeout_id_t)-1) {
1238 (void) untimeout(instance->timeout_id);
1239 instance->timeout_id = (timeout_id_t)-1;
1240
1241 instance->unroll.timer = 0;
1242 }
1243 }
1244
1245 instance->func_ptr->disable_intr(instance);
1246
1247
1248 if (instance->unroll.mutexs == 1) {
1249 mutex_destroy(&instance->cmd_pool_mtx);
1250 mutex_destroy(&instance->app_cmd_pool_mtx);
1439
1440 struct mrsas_instance *instance;
1441 struct mrsas_ioctl *ioctl;
1442 struct mrsas_aen aen;
1443 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1444
1445 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev)));
1446
1447 if (instance == NULL) {
1448 /* invalid minor number */
1449 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found."));
1450 return (ENXIO);
1451 }
1452
1453 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl),
1454 KM_SLEEP);
1455 if (ioctl == NULL) {
1456 /* Failed to allocate memory for ioctl */
1457 con_log(CL_ANN, (CE_WARN, "mr_sas_ioctl: "
1458 "failed to allocate memory for ioctl"));
1459 return (ENOMEM);
1460 }
1461
1462 switch ((uint_t)cmd) {
1463 case MRSAS_IOCTL_FIRMWARE:
1464 if (ddi_copyin((void *)arg, ioctl,
1465 sizeof (struct mrsas_ioctl), mode)) {
1466 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: "
1467 "ERROR IOCTL copyin"));
1468 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1469 return (EFAULT);
1470 }
1471
1472 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) {
1473 rval = handle_drv_ioctl(instance, ioctl, mode);
1474 } else {
1475 rval = handle_mfi_ioctl(instance, ioctl, mode);
1476 }
1477
1478 if (ddi_copyout((void *)ioctl, (void *)arg,
1479 (sizeof (struct mrsas_ioctl) - 1), mode)) {
2513 * *
2514 * ************************************************************************** *
2515 */
2516 /*
2517 * get_mfi_pkt : Get a command from the free pool
2518 * After successful allocation, the caller of this routine
2519 * must clear the frame buffer (memset to zero) before
2520 * using the packet further.
2521 *
2522 * ***** Note *****
2523 * After clearing the frame buffer the context id of the
2524 * frame buffer SHOULD be restored back.
2525 */
2526 static struct mrsas_cmd *
2527 get_mfi_pkt(struct mrsas_instance *instance)
2528 {
2529 mlist_t *head = &instance->cmd_pool_list;
2530 struct mrsas_cmd *cmd = NULL;
2531
2532 mutex_enter(&instance->cmd_pool_mtx);
2533
2534 if (!mlist_empty(head)) {
2535 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2536 mlist_del_init(head->next);
2537 }
2538 if (cmd != NULL) {
2539 cmd->pkt = NULL;
2540 cmd->retry_count_for_ocr = 0;
2541 cmd->drv_pkt_time = 0;
2542
2543 }
2544 mutex_exit(&instance->cmd_pool_mtx);
2545
2546 return (cmd);
2547 }
2548
2549 static struct mrsas_cmd *
2550 get_mfi_app_pkt(struct mrsas_instance *instance)
2551 {
2552 mlist_t *head = &instance->app_cmd_pool_list;
2553 struct mrsas_cmd *cmd = NULL;
2554
2555 mutex_enter(&instance->app_cmd_pool_mtx);
2556
2557 if (!mlist_empty(head)) {
2558 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2559 mlist_del_init(head->next);
2560 }
2561 if (cmd != NULL) {
2562 cmd->pkt = NULL;
2563 cmd->retry_count_for_ocr = 0;
2564 cmd->drv_pkt_time = 0;
2565 }
2566
2567 mutex_exit(&instance->app_cmd_pool_mtx);
2568
2569 return (cmd);
2570 }
2571 /*
2572 * return_mfi_pkt : Return a cmd to free command pool
2573 */
2574 static void
2575 return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2576 {
2577 mutex_enter(&instance->cmd_pool_mtx);
2578 /* use mlist_add_tail for debug assistance */
2579 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2580
2581 mutex_exit(&instance->cmd_pool_mtx);
2582 }
2583
2584 static void
2585 return_mfi_app_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2586 {
2587 mutex_enter(&instance->app_cmd_pool_mtx);
2588
2589 mlist_add(&cmd->list, &instance->app_cmd_pool_list);
2590
2591 mutex_exit(&instance->app_cmd_pool_mtx);
2592 }
2593 void
2594 push_pending_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2595 {
2596 struct scsi_pkt *pkt;
2597 struct mrsas_header *hdr;
2598 con_log(CL_DLEVEL2, (CE_NOTE, "push_pending_pkt(): Called\n"));
2599 mutex_enter(&instance->cmd_pend_mtx);
2600 mlist_del_init(&cmd->list);
2601 mlist_add_tail(&cmd->list, &instance->cmd_pend_list);
2602 if (cmd->sync_cmd == MRSAS_TRUE) {
2603 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2604 if (hdr) {
2605 con_log(CL_ANN1, (CE_CONT,
2606 "push_pending_mfi_pkt: "
2607 "cmd %p index %x "
2608 "time %llx",
2609 (void *)cmd, cmd->index,
2610 gethrtime()));
2611 /* Wait for specified interval */
2612 cmd->drv_pkt_time = ddi_get16(
2613 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2614 if (cmd->drv_pkt_time < debug_timeout_g)
2615 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2616 con_log(CL_ANN1, (CE_CONT,
2617 "push_pending_pkt(): "
2618 "Called IO Timeout Value %x\n",
2619 cmd->drv_pkt_time));
2638 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2639 }
2640 }
2641
2642 mutex_exit(&instance->cmd_pend_mtx);
2643
2644 }
2645
2646 int
2647 mrsas_print_pending_cmds(struct mrsas_instance *instance)
2648 {
2649 mlist_t *head = &instance->cmd_pend_list;
2650 mlist_t *tmp = head;
2651 struct mrsas_cmd *cmd = NULL;
2652 struct mrsas_header *hdr;
2653 unsigned int flag = 1;
2654 struct scsi_pkt *pkt;
2655 int saved_level;
2656 int cmd_count = 0;
2657
2658 saved_level = debug_level_g;
2659 debug_level_g = CL_ANN1;
2660
2661 cmn_err(CE_NOTE, "mrsas_print_pending_cmds(): Called\n");
2662
2663 while (flag) {
2664 mutex_enter(&instance->cmd_pend_mtx);
2665 tmp = tmp->next;
2666 if (tmp == head) {
2667 mutex_exit(&instance->cmd_pend_mtx);
2668 flag = 0;
2669 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds():"
2670 " NO MORE CMDS PENDING....\n"));
2671 break;
2672 } else {
2673 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2674 mutex_exit(&instance->cmd_pend_mtx);
2675 if (cmd) {
2676 if (cmd->sync_cmd == MRSAS_TRUE) {
2677 hdr = (struct mrsas_header *)
3643 cmd = get_mfi_pkt(instance);
3644
3645 if (mrsas_build_init_cmd(instance, &cmd) != DDI_SUCCESS) {
3646 con_log(CL_ANN,
3647 (CE_NOTE, "Error, failed to build INIT command"));
3648
3649 goto fail_undo_alloc_mfi_space;
3650 }
3651
3652 /*
3653 * Disable interrupt before sending init frame ( see linux driver code)
3654 * send INIT MFI frame in polled mode
3655 */
3656 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3657 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
3658 goto fail_fw_init;
3659 }
3660
3661 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
3662 goto fail_fw_init;
3663 return_mfi_pkt(instance, cmd);
3664
3665 if (ctio_enable &&
3666 (instance->func_ptr->read_fw_status_reg(instance) & 0x04000000)) {
3667 con_log(CL_ANN, (CE_NOTE, "mr_sas: IEEE SGL's supported"));
3668 instance->flag_ieee = 1;
3669 } else {
3670 instance->flag_ieee = 0;
3671 }
3672
3673 instance->unroll.alloc_space_mfi = 1;
3674 instance->unroll.verBuff = 1;
3675
3676 return (DDI_SUCCESS);
3677
3678
3679 fail_fw_init:
3680 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
3681
3682 fail_undo_alloc_mfi_space:
3683 return_mfi_pkt(instance, cmd);
4843 con_log(CL_ANN, (CE_WARN,
4844 "Failed ddi_dma_alloc_handle: "
4845 "unknown status %d", i));
4846 break;
4847 }
4848
4849 return (-1);
4850 }
4851
4852 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr,
4853 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
4854 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
4855 alen < obj->size) {
4856
4857 ddi_dma_free_handle(&obj->dma_handle);
4858
4859 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
4860
4861 return (-1);
4862 }
4863
4864 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
4865 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
4866 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
4867
4868 ddi_dma_mem_free(&obj->acc_handle);
4869 ddi_dma_free_handle(&obj->dma_handle);
4870
4871 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
4872
4873 return (-1);
4874 }
4875
4876 if (mrsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) {
4877 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4878 return (-1);
4879 }
4880
4881 if (mrsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) {
4882 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4883 return (-1);
4884 }
4885
4886 return (cookie_cnt);
4887 }
4888
4889 /*
4890 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t)
4891 *
4892 * De-allocate the memory and other resources for an dma object, which must
4893 * have been alloated by a previous call to mrsas_alloc_dma_obj()
4894 */
4895 /* ARGSUSED */
6793 if (pkt) {
6794 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6795 "ISSUED CMD TO FW : called : cmd:"
6796 ": %p instance : %p pkt : %p pkt_time : %x\n",
6797 gethrtime(), (void *)cmd, (void *)instance,
6798 (void *)pkt, cmd->drv_pkt_time));
6799 if (instance->adapterresetinprogress) {
6800 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6801 con_log(CL_ANN1, (CE_NOTE, "Reset the scsi_pkt timer"));
6802 } else {
6803 push_pending_mfi_pkt(instance, cmd);
6804 }
6805
6806 } else {
6807 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6808 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
6809 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
6810 }
6811
6812 mutex_enter(&instance->reg_write_mtx);
6813 /* Issue the command to the FW */
6814 WR_IB_QPORT((cmd->frame_phys_addr) |
6815 (((cmd->frame_count - 1) << 1) | 1), instance);
6816 mutex_exit(&instance->reg_write_mtx);
6817
6818 }
6819
6820 /*
6821 * issue_cmd_in_sync_mode
6822 */
6823 static int
6824 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance,
6825 struct mrsas_cmd *cmd)
6826 {
6827 int i;
6828 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
6829 struct mrsas_header *hdr = &cmd->frame->hdr;
6830
6831 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called"));
6832
6833 if (instance->adapterresetinprogress) {
6834 cmd->drv_pkt_time = ddi_get16(
6835 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
6836 if (cmd->drv_pkt_time < debug_timeout_g)
6837 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6838
6839 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: "
6840 "issue and return in reset case\n"));
6841 WR_IB_QPORT((cmd->frame_phys_addr) |
6842 (((cmd->frame_count - 1) << 1) | 1), instance);
6843
6844 return (DDI_SUCCESS);
6845 } else {
6846 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: pushing the pkt\n"));
6847 push_pending_mfi_pkt(instance, cmd);
6848 }
6849
6850 cmd->cmd_status = ENODATA;
6851
6852 mutex_enter(&instance->reg_write_mtx);
6853 /* Issue the command to the FW */
6854 WR_IB_QPORT((cmd->frame_phys_addr) |
6855 (((cmd->frame_count - 1) << 1) | 1), instance);
6856 mutex_exit(&instance->reg_write_mtx);
6857
6858 mutex_enter(&instance->int_cmd_mtx);
6859 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
6860 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
6861 }
6862 mutex_exit(&instance->int_cmd_mtx);
6863
6864 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done"));
6865
6866 if (i < (msecs -1)) {
6867 return (DDI_SUCCESS);
6868 } else {
6869 return (DDI_FAILURE);
6870 }
6871 }
6872
|