2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /* Copyright 2010 QLogic Corporation */
  23 
  24 /*
  25  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  26  */
  27 /*
  28  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
  29  * Copyright (c) 2016 by Delphix. All rights reserved.
  30  */
  31 
  32 #pragma ident   "Copyright 2010 QLogic Corporation; ql_api.c"
  33 
  34 /*
  35  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
  36  *
  37  * ***********************************************************************
  38  * *                                                                    **
  39  * *                            NOTICE                                  **
  40  * *            COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION              **
  41  * *                    ALL RIGHTS RESERVED                             **
  42  * *                                                                    **
  43  * ***********************************************************************
  44  *
  45  */
  46 
  47 #include <ql_apps.h>
  48 #include <ql_api.h>
  49 #include <ql_debug.h>
  50 #include <ql_init.h>
  51 #include <ql_iocb.h>
  52 #include <ql_ioctl.h>
  53 #include <ql_isr.h>
  54 #include <ql_mbx.h>
  55 #include <ql_nx.h>
  56 #include <ql_xioctl.h>
  57 
  58 /*
  59  * Solaris external defines.
  60  */
  61 extern pri_t minclsyspri;
  62 extern pri_t maxclsyspri;
  63 
  64 /*
  65  * dev_ops functions prototypes
  66  */
  67 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
  68 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
  69 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
  70 static int ql_power(dev_info_t *, int, int);
  71 static int ql_quiesce(dev_info_t *);
  72 
  73 /*
  74  * FCA functions prototypes exported by means of the transport table
  75  */
  76 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
  77     fc_fca_bind_info_t *);
  78 static void ql_unbind_port(opaque_t);
  79 static int ql_init_pkt(opaque_t, fc_packet_t *, int);
  80 static int ql_un_init_pkt(opaque_t, fc_packet_t *);
  81 static int ql_els_send(opaque_t, fc_packet_t *);
  82 static int ql_get_cap(opaque_t, char *, void *);
  83 static int ql_set_cap(opaque_t, char *, void *);
  84 static int ql_getmap(opaque_t, fc_lilpmap_t *);
  85 static int ql_transport(opaque_t, fc_packet_t *);
  86 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
  87 static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
  88 static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
  89 static int ql_abort(opaque_t, fc_packet_t *, int);
  90 static int ql_reset(opaque_t, uint32_t);
  91 static int ql_port_manage(opaque_t, fc_fca_pm_t *);
  92 static opaque_t ql_get_device(opaque_t, fc_portid_t);
  93 
  94 /*
  95  * FCA Driver Support Function Prototypes.
  96  */
  97 static uint16_t ql_wait_outstanding(ql_adapter_state_t *);
  98 static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
  99     ql_srb_t *);
 100 static void ql_task_daemon(void *);
 101 static void ql_task_thread(ql_adapter_state_t *);
 102 static void ql_unsol_callback(ql_srb_t *);
 103 static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
 104     fc_unsol_buf_t *);
 105 static void ql_timer(void *);
 106 static void ql_watchdog(ql_adapter_state_t *, uint32_t *, uint32_t *);
 107 static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *,
 108     uint32_t *, uint32_t *);
 109 static void ql_halt(ql_adapter_state_t *, int);
 110 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
 111 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
 112 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
 113 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
 114 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
 115 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
 116 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
 117 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
 118 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
 119 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
 120 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
 121 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
 122 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
 123 static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
 124 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
 125 static int ql_login_port(ql_adapter_state_t *, port_id_t);
 126 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
 127 static int ql_logout_port(ql_adapter_state_t *, port_id_t);
 128 static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
 129 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
 130 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
 131 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
 132 static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
 133 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
 134     ql_srb_t *);
 135 static int ql_kstat_update(kstat_t *, int);
 136 static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
 137 static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
 138 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
 139 static void ql_rst_aen(ql_adapter_state_t *);
 140 static void ql_restart_queues(ql_adapter_state_t *);
 141 static void ql_abort_queues(ql_adapter_state_t *);
 142 static void ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq);
 143 static void ql_idle_check(ql_adapter_state_t *);
 144 static int ql_loop_resync(ql_adapter_state_t *);
 145 static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
 146 static size_t ql_2581_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
 147 static int ql_save_config_regs(dev_info_t *);
 148 static int ql_restore_config_regs(dev_info_t *);
 149 static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
 150 static int ql_handle_rscn_update(ql_adapter_state_t *);
 151 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
 152 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
 153 static int ql_dump_firmware(ql_adapter_state_t *);
 154 static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
 155 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
 156 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
 157 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
 158 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
 159 static int ql_81xx_binary_fw_dump(ql_adapter_state_t *, ql_81xx_fw_dump_t *);
 160 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
 161     void *);
 162 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
 163     uint8_t);
 164 static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
 165 static int ql_suspend_adapter(ql_adapter_state_t *);
 166 static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
 167 static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
 168 int ql_alloc_dma_resouce(ql_adapter_state_t *, dma_mem_t *, int);
 169 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
 170 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
 171 static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
 172 static int ql_setup_interrupts(ql_adapter_state_t *);
 173 static int ql_setup_msi(ql_adapter_state_t *);
 174 static int ql_setup_msix(ql_adapter_state_t *);
 175 static int ql_setup_fixed(ql_adapter_state_t *);
 176 static void ql_release_intr(ql_adapter_state_t *);
 177 static void ql_disable_intr(ql_adapter_state_t *);
 178 static int ql_legacy_intr(ql_adapter_state_t *);
 179 static int ql_init_mutex(ql_adapter_state_t *);
 180 static void ql_destroy_mutex(ql_adapter_state_t *);
 181 static void ql_iidma(ql_adapter_state_t *);
 182 
 183 static int ql_n_port_plogi(ql_adapter_state_t *);
 184 static void ql_fca_isp_els_request(ql_adapter_state_t *, fc_packet_t *,
 185     els_descriptor_t *);
 186 static void ql_isp_els_request_ctor(els_descriptor_t *,
 187     els_passthru_entry_t *);
 188 static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *);
 189 static int ql_wait_for_td_stop(ql_adapter_state_t *);
 190 static void ql_process_idc_event(ql_adapter_state_t *);
 191 
 192 /*
 193  * Global data
 194  */
 195 static uint8_t  ql_enable_pm = 1;
 196 static int      ql_flash_sbus_fpga = 0;
 197 uint32_t        ql_os_release_level;
 198 uint32_t        ql_disable_aif = 0;
 199 uint32_t        ql_disable_msi = 0;
 200 uint32_t        ql_disable_msix = 0;
 201 uint32_t        ql_enable_ets = 0;
 202 uint16_t        ql_osc_wait_count = 1000;
 203 
 204 /* Timer routine variables. */
 205 static timeout_id_t     ql_timer_timeout_id = NULL;
 206 static clock_t          ql_timer_ticks;
 207 
 208 /* Soft state head pointer. */
 209 void *ql_state = NULL;
 210 
 211 /* Head adapter link. */
 212 ql_head_t ql_hba = {
 213         NULL,
 214         NULL
 215 };
 216 
 217 /* Global hba index */
 218 uint32_t ql_gfru_hba_index = 1;
 219 
 220 /*
 221  * Some IP defines and globals
 222  */
 
 437         0x420, 0x422, 0x424, 0x426, 0x428, 0x42a, 0x42c, 0x42e,
 438         0x430, 0x432, 0x434, 0x436, 0x438, 0x43a, 0x43c, 0x43e,
 439 
 440         0xff,   /* fpm_diag_config  - n/a */
 441         0xff,   /* pcr - n/a */
 442         0xff,   /* mctr - n/a */
 443         0xff,   /* fb_cmd - n/a */
 444         0x48,   /* hccr */
 445         0x4c,   /* gpiod */
 446         0x50,   /* gpioe */
 447         0xff,   /* host_to_host_sema - n/a */
 448         0x2c,   /* pri_req_in */
 449         0x30,   /* pri_req_out */
 450         0x3c,   /* atio_req_in */
 451         0x40,   /* atio_req_out */
 452         0x54,   /* io_base_addr */
 453         0x380,  /* nx_host_int */
 454         0x504   /* nx_risc_int */
 455 };
 456 
 457 /* mutex for protecting variables shared by all instances of the driver */
 458 kmutex_t ql_global_mutex;
 459 kmutex_t ql_global_hw_mutex;
 460 kmutex_t ql_global_el_mutex;
 461 
 462 /* DMA access attribute structure. */
 463 static ddi_device_acc_attr_t ql_dev_acc_attr = {
 464         DDI_DEVICE_ATTR_V0,
 465         DDI_STRUCTURE_LE_ACC,
 466         DDI_STRICTORDER_ACC
 467 };
 468 
 469 /* I/O DMA attributes structures. */
 470 static ddi_dma_attr_t ql_64bit_io_dma_attr = {
 471         DMA_ATTR_V0,                    /* dma_attr_version */
 472         QL_DMA_LOW_ADDRESS,             /* low DMA address range */
 473         QL_DMA_HIGH_64BIT_ADDRESS,      /* high DMA address range */
 474         QL_DMA_XFER_COUNTER,            /* DMA counter register */
 475         QL_DMA_ADDRESS_ALIGNMENT,       /* DMA address alignment */
 476         QL_DMA_BURSTSIZES,              /* DMA burstsizes */
 477         QL_DMA_MIN_XFER_SIZE,           /* min effective DMA size */
 478         QL_DMA_MAX_XFER_SIZE,           /* max DMA xfer size */
 479         QL_DMA_SEGMENT_BOUNDARY,        /* segment boundary */
 480         QL_DMA_SG_LIST_LENGTH,          /* s/g list length */
 481         QL_DMA_GRANULARITY,             /* granularity of device */
 482         QL_DMA_XFER_FLAGS               /* DMA transfer flags */
 483 };
 484 
 485 static ddi_dma_attr_t ql_32bit_io_dma_attr = {
 486         DMA_ATTR_V0,                    /* dma_attr_version */
 487         QL_DMA_LOW_ADDRESS,             /* low DMA address range */
 488         QL_DMA_HIGH_32BIT_ADDRESS,      /* high DMA address range */
 489         QL_DMA_XFER_COUNTER,            /* DMA counter register */
 490         QL_DMA_ADDRESS_ALIGNMENT,       /* DMA address alignment */
 491         QL_DMA_BURSTSIZES,              /* DMA burstsizes */
 492         QL_DMA_MIN_XFER_SIZE,           /* min effective DMA size */
 493         QL_DMA_MAX_XFER_SIZE,           /* max DMA xfer size */
 494         QL_DMA_SEGMENT_BOUNDARY,        /* segment boundary */
 495         QL_DMA_SG_LIST_LENGTH,          /* s/g list length */
 496         QL_DMA_GRANULARITY,             /* granularity of device */
 497         QL_DMA_XFER_FLAGS               /* DMA transfer flags */
 498 };
 499 
 500 /* Load the default dma attributes */
 501 static  ddi_dma_attr_t  ql_32fcsm_cmd_dma_attr;
 502 static  ddi_dma_attr_t  ql_64fcsm_cmd_dma_attr;
 503 static  ddi_dma_attr_t  ql_32fcsm_rsp_dma_attr;
 504 static  ddi_dma_attr_t  ql_64fcsm_rsp_dma_attr;
 505 static  ddi_dma_attr_t  ql_32fcip_cmd_dma_attr;
 506 static  ddi_dma_attr_t  ql_64fcip_cmd_dma_attr;
 507 static  ddi_dma_attr_t  ql_32fcip_rsp_dma_attr;
 508 static  ddi_dma_attr_t  ql_64fcip_rsp_dma_attr;
 509 static  ddi_dma_attr_t  ql_32fcp_cmd_dma_attr;
 510 static  ddi_dma_attr_t  ql_64fcp_cmd_dma_attr;
 511 static  ddi_dma_attr_t  ql_32fcp_rsp_dma_attr;
 512 static  ddi_dma_attr_t  ql_64fcp_rsp_dma_attr;
 513 static  ddi_dma_attr_t  ql_32fcp_data_dma_attr;
 514 static  ddi_dma_attr_t  ql_64fcp_data_dma_attr;
 515 
 516 /* Static declarations of cb_ops entry point functions... */
 517 static struct cb_ops ql_cb_ops = {
 518         ql_open,                        /* b/c open */
 519         ql_close,                       /* b/c close */
 520         nodev,                          /* b strategy */
 521         nodev,                          /* b print */
 522         nodev,                          /* b dump */
 523         nodev,                          /* c read */
 524         nodev,                          /* c write */
 525         ql_ioctl,                       /* c ioctl */
 526         nodev,                          /* c devmap */
 527         nodev,                          /* c mmap */
 528         nodev,                          /* c segmap */
 529         nochpoll,                       /* c poll */
 530         nodev,                          /* cb_prop_op */
 531         NULL,                           /* streamtab  */
 532         D_MP | D_NEW | D_HOTPLUG,       /* Driver compatibility flag */
 533         CB_REV,                         /* cb_ops revision */
 534         nodev,                          /* c aread */
 535         nodev                           /* c awrite */
 
 539 static struct dev_ops ql_devops = {
 540         DEVO_REV,                       /* devo_rev */
 541         0,                              /* refcnt */
 542         ql_getinfo,                     /* getinfo */
 543         nulldev,                        /* identify */
 544         nulldev,                        /* probe */
 545         ql_attach,                      /* attach */
 546         ql_detach,                      /* detach */
 547         nodev,                          /* reset */
 548         &ql_cb_ops,                 /* char/block ops */
 549         NULL,                           /* bus operations */
 550         ql_power,                       /* power management */
 551         ql_quiesce                      /* quiesce device */
 552 };
 553 
 554 /* ELS command code to text converter */
 555 cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE();
 556 /* Mailbox command code to text converter */
 557 cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE();
 558 
 559 char qlc_driver_version[] = QL_VERSION;
 560 
 561 /*
 562  * Loadable Driver Interface Structures.
 563  * Declare and initialize the module configuration section...
 564  */
 565 static struct modldrv modldrv = {
 566         &mod_driverops,                             /* type of module: driver */
 567         "SunFC Qlogic FCA v" QL_VERSION,        /* name of module */
 568         &ql_devops                          /* driver dev_ops */
 569 };
 570 
 571 static struct modlinkage modlinkage = {
 572         MODREV_1,
 573         &modldrv,
 574         NULL
 575 };
 576 
 577 /* ************************************************************************ */
 578 /*                              Loadable Module Routines.                   */
 579 /* ************************************************************************ */
 580 
 581 /*
 582  * _init
 583  *      Initializes a loadable module. It is called before any other
 584  *      routine in a loadable module.
 585  *
 586  * Returns:
 587  *      0 = success
 588  *
 589  * Context:
 590  *      Kernel context.
 591  */
 592 int
 593 _init(void)
 594 {
 595         uint16_t        w16;
 596         int             rval = 0;
 597 
 598         /* Get OS major release level. */
 599         for (w16 = 0; w16 < sizeof (utsname.release); w16++) {
 600                 if (utsname.release[w16] == '.') {
 601                         w16++;
 602                         break;
 603                 }
 604         }
 605         if (w16 < sizeof (utsname.release)) {
 606                 (void) ql_bstr_to_dec(&utsname.release[w16],
 607                     &ql_os_release_level, 0);
 608         } else {
 609                 ql_os_release_level = 0;
 610         }
 611         if (ql_os_release_level < 6) {
 612                 cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
 613                     QL_NAME, ql_os_release_level);
 614                 rval = EINVAL;
 615         }
 616         if (ql_os_release_level == 6) {
 617                 ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
 618                 ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
 619         }
 620 
 621         if (rval == 0) {
 622                 rval = ddi_soft_state_init(&ql_state,
 623                     sizeof (ql_adapter_state_t), 0);
 624         }
 625         if (rval == 0) {
 626                 /* allow the FC Transport to tweak the dev_ops */
 627                 fc_fca_init(&ql_devops);
 628 
 629                 mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
 630                 mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
 631                 mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
 632                 rval = mod_install(&modlinkage);
 633                 if (rval != 0) {
 634                         mutex_destroy(&ql_global_hw_mutex);
 635                         mutex_destroy(&ql_global_mutex);
 636                         mutex_destroy(&ql_global_el_mutex);
 637                         ddi_soft_state_fini(&ql_state);
 638                 } else {
 639                         /*EMPTY*/
 640                         ql_32fcsm_cmd_dma_attr = ql_32bit_io_dma_attr;
 641                         ql_64fcsm_cmd_dma_attr = ql_64bit_io_dma_attr;
 642                         ql_32fcsm_rsp_dma_attr = ql_32bit_io_dma_attr;
 643                         ql_64fcsm_rsp_dma_attr = ql_64bit_io_dma_attr;
 644                         ql_32fcip_cmd_dma_attr = ql_32bit_io_dma_attr;
 645                         ql_64fcip_cmd_dma_attr = ql_64bit_io_dma_attr;
 646                         ql_32fcip_rsp_dma_attr = ql_32bit_io_dma_attr;
 647                         ql_64fcip_rsp_dma_attr = ql_64bit_io_dma_attr;
 648                         ql_32fcp_cmd_dma_attr = ql_32bit_io_dma_attr;
 649                         ql_64fcp_cmd_dma_attr = ql_64bit_io_dma_attr;
 650                         ql_32fcp_rsp_dma_attr = ql_32bit_io_dma_attr;
 651                         ql_64fcp_rsp_dma_attr = ql_64bit_io_dma_attr;
 652                         ql_32fcp_data_dma_attr = ql_32bit_io_dma_attr;
 653                         ql_64fcp_data_dma_attr = ql_64bit_io_dma_attr;
 654                         ql_32fcsm_cmd_dma_attr.dma_attr_sgllen =
 655                             ql_64fcsm_cmd_dma_attr.dma_attr_sgllen =
 656                             QL_FCSM_CMD_SGLLEN;
 657                         ql_32fcsm_rsp_dma_attr.dma_attr_sgllen =
 658                             ql_64fcsm_rsp_dma_attr.dma_attr_sgllen =
 659                             QL_FCSM_RSP_SGLLEN;
 660                         ql_32fcip_cmd_dma_attr.dma_attr_sgllen =
 661                             ql_64fcip_cmd_dma_attr.dma_attr_sgllen =
 662                             QL_FCIP_CMD_SGLLEN;
 663                         ql_32fcip_rsp_dma_attr.dma_attr_sgllen =
 664                             ql_64fcip_rsp_dma_attr.dma_attr_sgllen =
 665                             QL_FCIP_RSP_SGLLEN;
 666                         ql_32fcp_cmd_dma_attr.dma_attr_sgllen =
 667                             ql_64fcp_cmd_dma_attr.dma_attr_sgllen =
 668                             QL_FCP_CMD_SGLLEN;
 669                         ql_32fcp_rsp_dma_attr.dma_attr_sgllen =
 670                             ql_64fcp_rsp_dma_attr.dma_attr_sgllen =
 671                             QL_FCP_RSP_SGLLEN;
 672                 }
 673         }
 674 
 675         if (rval != 0) {
 676                 cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
 677                     QL_NAME);
 678         }
 679 
 680         return (rval);
 681 }
 682 
 683 /*
 684  * _fini
 685  *      Prepares a module for unloading. It is called when the system
 686  *      wants to unload a module. If the module determines that it can
 687  *      be unloaded, then _fini() returns the value returned by
 688  *      mod_remove(). Upon successful return from _fini() no other
 689  *      routine in the module will be called before _init() is called.
 690  *
 691  * Returns:
 692  *      0 = success
 693  *
 694  * Context:
 695  *      Kernel context.
 696  */
 697 int
 698 _fini(void)
 699 {
 700         int     rval;
 701 
 702         rval = mod_remove(&modlinkage);
 703         if (rval == 0) {
 704                 mutex_destroy(&ql_global_hw_mutex);
 705                 mutex_destroy(&ql_global_mutex);
 706                 mutex_destroy(&ql_global_el_mutex);
 707                 ddi_soft_state_fini(&ql_state);
 708         }
 709 
 710         return (rval);
 711 }
 712 
 713 /*
 714  * _info
 715  *      Returns information about loadable module.
 716  *
 717  * Input:
 718  *      modinfo = pointer to module information structure.
 719  *
 720  * Returns:
 721  *      Value returned by mod_info().
 722  *
 723  * Context:
 724  *      Kernel context.
 725  */
 726 int
 
 746  *      arg = command specific argument.
 747  *      resultp = pointer to where request information is stored.
 748  *
 749  * Returns:
 750  *      DDI_SUCCESS or DDI_FAILURE.
 751  *
 752  * Context:
 753  *      Kernel context.
 754  */
 755 /* ARGSUSED */
 756 static int
 757 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
 758 {
 759         ql_adapter_state_t      *ha;
 760         int                     minor;
 761         int                     rval = DDI_FAILURE;
 762 
 763         minor = (int)(getminor((dev_t)arg));
 764         ha = ddi_get_soft_state(ql_state, minor);
 765         if (ha == NULL) {
 766                 QL_PRINT_2(CE_CONT, "failed, unknown minor=%d\n",
 767                     getminor((dev_t)arg));
 768                 *resultp = NULL;
 769                 return (rval);
 770         }
 771 
 772         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
 773 
 774         switch (cmd) {
 775         case DDI_INFO_DEVT2DEVINFO:
 776                 *resultp = ha->dip;
 777                 rval = DDI_SUCCESS;
 778                 break;
 779         case DDI_INFO_DEVT2INSTANCE:
 780                 *resultp = (void *)(uintptr_t)(ha->instance);
 781                 rval = DDI_SUCCESS;
 782                 break;
 783         default:
 784                 EL(ha, "failed, unsupported cmd=%d\n", cmd);
 785                 rval = DDI_FAILURE;
 786                 break;
 787         }
 788 
 789         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
 790 
 791         return (rval);
 792 }
 793 
 794 /*
 795  * ql_attach
 796  *      Configure and attach an instance of the driver
 797  *      for a port.
 798  *
 799  * Input:
 800  *      dip = pointer to device information structure.
 801  *      cmd = attach type.
 802  *
 803  * Returns:
 804  *      DDI_SUCCESS or DDI_FAILURE.
 805  *
 806  * Context:
 807  *      Kernel context.
 808  */
 809 static int
 810 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
 811 {
 812         off_t                   regsize;
 813         uint32_t                size;
 814         int                     rval, *ptr;
 815         int                     instance;
 816         uint_t                  progress = 0;
 817         char                    *buf;
 818         ushort_t                caps_ptr, cap;
 819         fc_fca_tran_t           *tran;
 820         ql_adapter_state_t      *ha = NULL;
 821 
 822         static char *pmcomps[] = {
 823                 NULL,
 824                 PM_LEVEL_D3_STR,                /* Device OFF */
 825                 PM_LEVEL_D0_STR,                /* Device ON */
 826         };
 827 
 828         QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n",
 829             ddi_get_instance(dip), cmd);
 830 
 831         buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
 832 
 833         switch (cmd) {
 834         case DDI_ATTACH:
 835                 /* first get the instance */
 836                 instance = ddi_get_instance(dip);
 837 
 838                 cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
 839                     QL_NAME, instance, QL_VERSION);
 840 
 841                 /* Correct OS version? */
 842                 if (ql_os_release_level != 11) {
 843                         cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
 844                             "11", QL_NAME, instance);
 845                         goto attach_failed;
 846                 }
 847 
 848                 /* Hardware is installed in a DMA-capable slot? */
 849                 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
 850                         cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
 851                             instance);
 852                         goto attach_failed;
 853                 }
 854 
 855                 /* No support for high-level interrupts */
 856                 if (ddi_intr_hilevel(dip, 0) != 0) {
 857                         cmn_err(CE_WARN, "%s(%d): High level interrupt"
 858                             " not supported", QL_NAME, instance);
 859                         goto attach_failed;
 860                 }
 861 
 862                 /* Allocate our per-device-instance structure */
 863                 if (ddi_soft_state_zalloc(ql_state,
 864                     instance) != DDI_SUCCESS) {
 865                         cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
 866                             QL_NAME, instance);
 867                         goto attach_failed;
 868                 }
 869                 progress |= QL_SOFT_STATE_ALLOCED;
 870 
 871                 ha = ddi_get_soft_state(ql_state, instance);
 872                 if (ha == NULL) {
 873                         cmn_err(CE_WARN, "%s(%d): can't get soft state",
 874                             QL_NAME, instance);
 875                         goto attach_failed;
 876                 }
 877                 ha->dip = dip;
 878                 ha->instance = instance;
 879                 ha->hba.base_address = ha;
 880                 ha->pha = ha;
 881 
 882                 if (ql_el_trace_desc_ctor(ha) != DDI_SUCCESS) {
 883                         cmn_err(CE_WARN, "%s(%d): can't setup el tracing",
 884                             QL_NAME, instance);
 885                         goto attach_failed;
 886                 }
 887 
 888                 /* Get extended logging and dump flags. */
 889                 ql_common_properties(ha);
 890 
 891                 if (strcmp(ddi_driver_name(ddi_get_parent(dip)),
 892                     "sbus") == 0) {
 893                         EL(ha, "%s SBUS card detected", QL_NAME);
 894                         ha->cfg_flags |= CFG_SBUS_CARD;
 895                 }
 896 
 897                 ha->dev = kmem_zalloc(sizeof (*ha->dev) *
 898                     DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
 899 
 900                 ha->outstanding_cmds = kmem_zalloc(
 901                     sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS,
 902                     KM_SLEEP);
 903 
 904                 ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
 905                     QL_UB_LIMIT, KM_SLEEP);
 906 
 907                 ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
 908                     KM_SLEEP);
 909 
 910                 (void) ddi_pathname(dip, buf);
 911                 ha->devpath = kmem_zalloc(strlen(buf)+1, KM_SLEEP);
 912                 if (ha->devpath == NULL) {
 913                         EL(ha, "devpath mem alloc failed\n");
 914                 } else {
 915                         (void) strcpy(ha->devpath, buf);
 916                         EL(ha, "devpath is: %s\n", ha->devpath);
 917                 }
 918 
 919                 if (CFG_IST(ha, CFG_SBUS_CARD)) {
 920                         /*
 921                          * For cards where PCI is mapped to sbus e.g. Ivory.
 922                          *
 923                          * 0x00 : 0x000 - 0x0FF PCI Config Space for 2200
 924                          *      : 0x100 - 0x3FF PCI IO space for 2200
 925                          * 0x01 : 0x000 - 0x0FF PCI Config Space for fpga
 926                          *      : 0x100 - 0x3FF PCI IO Space for fpga
 927                          */
 928                         if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
 929                             0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle) !=
 930                             DDI_SUCCESS) {
 931                                 cmn_err(CE_WARN, "%s(%d): Unable to map device"
 
 951                          */
 952                         if (ddi_regs_map_setup(dip, 0,
 953                             (caddr_t *)&ha->sbus_config_base, 0, 0x100,
 954                             &ql_dev_acc_attr, &ha->sbus_config_handle) !=
 955                             DDI_SUCCESS) {
 956                                 cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
 957                                     "config registers", QL_NAME, instance);
 958                                 goto attach_failed;
 959                         }
 960                         progress |= QL_CONFIG_SPACE_SETUP;
 961                 } else {
 962                         /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
 963                         rval = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
 964                             DDI_PROP_DONTPASS, "reg", &ptr, &size);
 965                         if (rval != DDI_PROP_SUCCESS) {
 966                                 cmn_err(CE_WARN, "%s(%d): Unable to get PCI "
 967                                     "address registers", QL_NAME, instance);
 968                                 goto attach_failed;
 969                         } else {
 970                                 ha->pci_bus_addr = ptr[0];
 971                                 ha->function_number = (uint8_t)
 972                                     (ha->pci_bus_addr >> 8 & 7);
 973                                 ddi_prop_free(ptr);
 974                         }
 975 
 976                         /*
 977                          * We should map config space before adding interrupt
 978                          * So that the chip type (2200 or 2300) can be
 979                          * determined before the interrupt routine gets a
 980                          * chance to execute.
 981                          */
 982                         if (pci_config_setup(ha->dip, &ha->pci_handle) !=
 983                             DDI_SUCCESS) {
 984                                 cmn_err(CE_WARN, "%s(%d): can't setup PCI "
 985                                     "config space", QL_NAME, instance);
 986                                 goto attach_failed;
 987                         }
 988                         progress |= QL_CONFIG_SPACE_SETUP;
 989 
 990                         /*
 991                          * Setup the ISP2200 registers address mapping to be
 992                          * accessed by this particular driver.
 993                          * 0x0   Configuration Space
 994                          * 0x1   I/O Space
 995                          * 0x2   32-bit Memory Space address
 996                          * 0x3   64-bit Memory Space address
 997                          */
 998                         size = ql_pci_config_get32(ha, PCI_CONF_BASE0) & BIT_0 ?
 999                             2 : 1;
1000                         if (ddi_dev_regsize(dip, size, ®size) !=
1001                             DDI_SUCCESS ||
1002                             ddi_regs_map_setup(dip, size, &ha->iobase,
1003                             0, regsize, &ql_dev_acc_attr, &ha->dev_handle) !=
1004                             DDI_SUCCESS) {
1005                                 cmn_err(CE_WARN, "%s(%d): regs_map_setup(mem) "
1006                                     "failed", QL_NAME, instance);
1007                                 goto attach_failed;
1008                         }
1009                         progress |= QL_REGS_MAPPED;
1010 
1011                         /*
1012                          * We need I/O space mappings for 23xx HBAs for
1013                          * loading flash (FCode). The chip has a bug due to
1014                          * which loading flash fails through mem space
1015                          * mappings in PCI-X mode.
1016                          */
1017                         if (size == 1) {
1018                                 ha->iomap_iobase = ha->iobase;
1019                                 ha->iomap_dev_handle = ha->dev_handle;
1020                         } else {
1021                                 if (ddi_dev_regsize(dip, 1, ®size) !=
1022                                     DDI_SUCCESS ||
1023                                     ddi_regs_map_setup(dip, 1,
1024                                     &ha->iomap_iobase, 0, regsize,
1025                                     &ql_dev_acc_attr, &ha->iomap_dev_handle) !=
1026                                     DDI_SUCCESS) {
1027                                         cmn_err(CE_WARN, "%s(%d): regs_map_"
1028                                             "setup(I/O) failed", QL_NAME,
1029                                             instance);
1030                                         goto attach_failed;
1031                                 }
1032                                 progress |= QL_IOMAP_IOBASE_MAPPED;
1033                         }
1034                 }
1035 
1036                 ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
1037                     PCI_CONF_SUBSYSID);
1038                 ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
1039                     PCI_CONF_SUBVENID);
1040                 ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
1041                     PCI_CONF_VENID);
1042                 ha->device_id = (uint16_t)ql_pci_config_get16(ha,
1043                     PCI_CONF_DEVID);
1044                 ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
1045                     PCI_CONF_REVID);
1046 
1047                 EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
1048                     "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
1049                     ha->subven_id, ha->subsys_id);
1050 
1051                 switch (ha->device_id) {
1052                 case 0x2300:
1053                 case 0x2312:
1054                 case 0x2322:
1055                 case 0x6312:
1056                 case 0x6322:
1057                         if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1058                                 ha->flags |= FUNCTION_1;
1059                         }
1060                         if ((ha->device_id == 0x6322) ||
1061                             (ha->device_id == 0x2322)) {
1062                                 ha->cfg_flags |= CFG_CTRL_6322;
1063                                 ha->fw_class = 0x6322;
1064                                 ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
1065                         } else {
1066                                 ha->cfg_flags |= CFG_CTRL_2300;
1067                                 ha->fw_class = 0x2300;
1068                                 ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
1069                         }
1070                         ha->reg_off = ®_off_2300;
1071                         if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1072                                 goto attach_failed;
1073                         }
1074                         ha->fcp_cmd = ql_command_iocb;
1075                         ha->ip_cmd = ql_ip_iocb;
1076                         ha->ms_cmd = ql_ms_iocb;
1077                         if (CFG_IST(ha, CFG_SBUS_CARD)) {
1078                                 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1079                                 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1080                         } else {
1081                                 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1082                                 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1083                         }
1084                         break;
1085 
1086                 case 0x2200:
1087                         ha->cfg_flags |= CFG_CTRL_2200;
1088                         ha->reg_off = ®_off_2200;
1089                         ha->fw_class = 0x2200;
1090                         if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1091                                 goto attach_failed;
1092                         }
1093                         ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
1094                         ha->fcp_cmd = ql_command_iocb;
1095                         ha->ip_cmd = ql_ip_iocb;
1096                         ha->ms_cmd = ql_ms_iocb;
1097                         if (CFG_IST(ha, CFG_SBUS_CARD)) {
1098                                 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1099                                 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1100                         } else {
1101                                 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1102                                 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1103                         }
1104                         break;
1105 
1106                 case 0x2422:
1107                 case 0x2432:
1108                 case 0x5422:
1109                 case 0x5432:
1110                 case 0x8432:
1111                         if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1112                                 ha->flags |= FUNCTION_1;
1113                         }
1114                         ha->cfg_flags |= CFG_CTRL_2422;
1115                         if (ha->device_id == 0x8432) {
1116                                 ha->cfg_flags |= CFG_CTRL_MENLO;
1117                         } else {
1118                                 ha->flags |= VP_ENABLED;
1119                         }
1120 
1121                         ha->reg_off = ®_off_2400_2500;
1122                         ha->fw_class = 0x2400;
1123                         if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1124                                 goto attach_failed;
1125                         }
1126                         ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
1127                         ha->fcp_cmd = ql_command_24xx_iocb;
1128                         ha->ip_cmd = ql_ip_24xx_iocb;
1129                         ha->ms_cmd = ql_ms_24xx_iocb;
1130                         ha->els_cmd = ql_els_24xx_iocb;
1131                         ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1132                         ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1133                         break;
1134 
1135                 case 0x2522:
1136                 case 0x2532:
1137                         if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1138                                 ha->flags |= FUNCTION_1;
1139                         }
1140                         ha->cfg_flags |= CFG_CTRL_25XX;
1141                         ha->flags |= VP_ENABLED;
1142                         ha->fw_class = 0x2500;
1143                         ha->reg_off = ®_off_2400_2500;
1144                         if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1145                                 goto attach_failed;
1146                         }
1147                         ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1148                         ha->fcp_cmd = ql_command_24xx_iocb;
1149                         ha->ip_cmd = ql_ip_24xx_iocb;
1150                         ha->ms_cmd = ql_ms_24xx_iocb;
1151                         ha->els_cmd = ql_els_24xx_iocb;
1152                         ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1153                         ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1154                         break;
1155 
1156                 case 0x8001:
1157                         if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 4) {
1158                                 ha->flags |= FUNCTION_1;
1159                         }
1160                         ha->cfg_flags |= CFG_CTRL_81XX;
1161                         ha->flags |= VP_ENABLED;
1162                         ha->fw_class = 0x8100;
1163                         ha->reg_off = ®_off_2400_2500;
1164                         if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1165                                 goto attach_failed;
1166                         }
1167                         ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1168                         ha->fcp_cmd = ql_command_24xx_iocb;
1169                         ha->ip_cmd = ql_ip_24xx_iocb;
1170                         ha->ms_cmd = ql_ms_24xx_iocb;
1171                         ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1172                         ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1173                         break;
1174 
1175                 case 0x8021:
1176                         if (ha->function_number & BIT_0) {
1177                                 ha->flags |= FUNCTION_1;
1178                         }
1179                         ha->cfg_flags |= CFG_CTRL_8021;
1180                         ha->reg_off = ®_off_8021;
1181                         ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1182                         ha->fcp_cmd = ql_command_24xx_iocb;
1183                         ha->ms_cmd = ql_ms_24xx_iocb;
1184                         ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1185                         ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1186 
1187                         ha->nx_pcibase = ha->iobase;
1188                         ha->iobase += 0xBC000 + (ha->function_number << 11);
1189                         ha->iomap_iobase += 0xBC000 +
1190                             (ha->function_number << 11);
1191 
1192                         /* map doorbell */
1193                         if (ddi_dev_regsize(dip, 2, ®size) != DDI_SUCCESS ||
1194                             ddi_regs_map_setup(dip, 2, &ha->db_iobase,
1195                             0, regsize, &ql_dev_acc_attr, &ha->db_dev_handle) !=
1196                             DDI_SUCCESS) {
1197                                 cmn_err(CE_WARN, "%s(%d): regs_map_setup"
1198                                     "(doorbell) failed", QL_NAME, instance);
1199                                 goto attach_failed;
1200                         }
1201                         progress |= QL_DB_IOBASE_MAPPED;
1202 
1203                         ha->nx_req_in = (uint32_t *)(ha->db_iobase +
1204                             (ha->function_number << 12));
1205                         ha->db_read = ha->nx_pcibase + (512 * 1024) +
1206                             (ha->function_number * 8);
1207 
1208                         ql_8021_update_crb_int_ptr(ha);
1209                         ql_8021_set_drv_active(ha);
1210                         break;
1211 
1212                 default:
1213                         cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1214                             QL_NAME, instance, ha->device_id);
1215                         goto attach_failed;
1216                 }
1217 
1218                 /* Setup hba buffer. */
1219 
1220                 size = CFG_IST(ha, CFG_CTRL_24258081) ?
1221                     (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE) :
1222                     (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE +
1223                     RCVBUF_QUEUE_SIZE);
1224 
1225                 if (ql_get_dma_mem(ha, &ha->hba_buf, size, LITTLE_ENDIAN_DMA,
1226                     QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1227                         cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1228                             "alloc failed", QL_NAME, instance);
1229                         goto attach_failed;
1230                 }
1231                 progress |= QL_HBA_BUFFER_SETUP;
1232 
1233                 /* Setup buffer pointers. */
1234                 ha->request_dvma = ha->hba_buf.cookie.dmac_laddress +
1235                     REQUEST_Q_BUFFER_OFFSET;
1236                 ha->request_ring_bp = (struct cmd_entry *)
1237                     ((caddr_t)ha->hba_buf.bp + REQUEST_Q_BUFFER_OFFSET);
1238 
1239                 ha->response_dvma = ha->hba_buf.cookie.dmac_laddress +
1240                     RESPONSE_Q_BUFFER_OFFSET;
1241                 ha->response_ring_bp = (struct sts_entry *)
1242                     ((caddr_t)ha->hba_buf.bp + RESPONSE_Q_BUFFER_OFFSET);
1243 
1244                 ha->rcvbuf_dvma = ha->hba_buf.cookie.dmac_laddress +
1245                     RCVBUF_Q_BUFFER_OFFSET;
1246                 ha->rcvbuf_ring_bp = (struct rcvbuf *)
1247                     ((caddr_t)ha->hba_buf.bp + RCVBUF_Q_BUFFER_OFFSET);
1248 
1249                 /* Allocate resource for QLogic IOCTL */
1250                 (void) ql_alloc_xioctl_resource(ha);
1251 
1252                 /* Setup interrupts */
1253                 if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1254                         cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1255                             "rval=%xh", QL_NAME, instance, rval);
1256                         goto attach_failed;
1257                 }
1258 
1259                 progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1260 
1261                 if (ql_nvram_cache_desc_ctor(ha) != DDI_SUCCESS) {
1262                         cmn_err(CE_WARN, "%s(%d): can't setup nvram cache",
1263                             QL_NAME, instance);
1264                         goto attach_failed;
1265                 }
1266 
1267                 /*
1268                  * Allocate an N Port information structure
1269                  * for use when in P2P topology.
1270                  */
1271                 ha->n_port = (ql_n_port_info_t *)
1272                     kmem_zalloc(sizeof (ql_n_port_info_t), KM_SLEEP);
1273                 if (ha->n_port == NULL) {
1274                         cmn_err(CE_WARN, "%s(%d): Failed to create N Port info",
1275                             QL_NAME, instance);
1276                         goto attach_failed;
1277                 }
1278 
1279                 progress |= QL_N_PORT_INFO_CREATED;
1280 
1281                 /*
1282                  * Determine support for Power Management
1283                  */
1284                 caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1285 
1286                 while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
 
1337                                     PM_LEVEL_D0) != DDI_SUCCESS) {
1338                                         cmn_err(CE_WARN, "%s(%d): failed to"
1339                                             " raise power or initialize"
1340                                             " adapter", QL_NAME, instance);
1341                                 }
1342                         }
1343                 } else {
1344                         /* Initialize adapter. */
1345                         ha->power_level = PM_LEVEL_D0;
1346                         if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1347                                 cmn_err(CE_WARN, "%s(%d): failed to initialize"
1348                                     " adapter", QL_NAME, instance);
1349                         }
1350                 }
1351 
1352                 if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1353                     ha->fw_subminor_version == 0) {
1354                         cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1355                             QL_NAME, ha->instance);
1356                 } else {
1357                         int     rval;
1358                         char    ver_fmt[256];
1359 
1360                         rval = (int)snprintf(ver_fmt, (size_t)sizeof (ver_fmt),
1361                             "Firmware version %d.%d.%d", ha->fw_major_version,
1362                             ha->fw_minor_version, ha->fw_subminor_version);
1363 
1364                         if (CFG_IST(ha, CFG_CTRL_81XX)) {
1365                                 rval = (int)snprintf(ver_fmt + rval,
1366                                     (size_t)sizeof (ver_fmt),
1367                                     ", MPI fw version %d.%d.%d",
1368                                     ha->mpi_fw_major_version,
1369                                     ha->mpi_fw_minor_version,
1370                                     ha->mpi_fw_subminor_version);
1371 
1372                                 if (ha->subsys_id == 0x17B ||
1373                                     ha->subsys_id == 0x17D) {
1374                                         (void) snprintf(ver_fmt + rval,
1375                                             (size_t)sizeof (ver_fmt),
1376                                             ", PHY fw version %d.%d.%d",
1377                                             ha->phy_fw_major_version,
1378                                             ha->phy_fw_minor_version,
1379                                             ha->phy_fw_subminor_version);
1380                                 }
1381                         }
1382                         cmn_err(CE_NOTE, "!%s(%d): %s",
1383                             QL_NAME, ha->instance, ver_fmt);
1384                 }
1385 
1386                 ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1387                     "controller", KSTAT_TYPE_RAW,
1388                     (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1389                 if (ha->k_stats == NULL) {
1390                         cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1391                             QL_NAME, instance);
1392                         goto attach_failed;
1393                 }
1394                 progress |= QL_KSTAT_CREATED;
 
1405                     instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1406                         cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1407                             QL_NAME, instance);
1408                         goto attach_failed;
1409                 }
1410                 progress |= QL_MINOR_NODE_CREATED;
1411 
1412                 /* Allocate a transport structure for this instance */
1413                 tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1414                 if (tran == NULL) {
1415                         cmn_err(CE_WARN, "%s(%d): failed to allocate transport",
1416                             QL_NAME, instance);
1417                         goto attach_failed;
1418                 }
1419 
1420                 progress |= QL_FCA_TRAN_ALLOCED;
1421 
1422                 /* fill in the structure */
1423                 tran->fca_numports = 1;
1424                 tran->fca_version = FCTL_FCA_MODREV_5;
1425                 if (CFG_IST(ha, CFG_CTRL_2422)) {
1426                         tran->fca_num_npivports = MAX_24_VIRTUAL_PORTS;
1427                 } else if (CFG_IST(ha, CFG_CTRL_2581)) {
1428                         tran->fca_num_npivports = MAX_25_VIRTUAL_PORTS;
1429                 }
1430                 bcopy(ha->loginparams.node_ww_name.raw_wwn,
1431                     tran->fca_perm_pwwn.raw_wwn, 8);
1432 
1433                 EL(ha, "FCA version %d\n", tran->fca_version);
1434 
1435                 /* Specify the amount of space needed in each packet */
1436                 tran->fca_pkt_size = sizeof (ql_srb_t);
1437 
1438                 /* command limits are usually dictated by hardware */
1439                 tran->fca_cmd_max = MAX_OUTSTANDING_COMMANDS;
1440 
1441                 /* dmaattr are static, set elsewhere. */
1442                 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1443                         tran->fca_dma_attr = &ql_64bit_io_dma_attr;
1444                         tran->fca_dma_fcp_cmd_attr = &ql_64fcp_cmd_dma_attr;
1445                         tran->fca_dma_fcp_rsp_attr = &ql_64fcp_rsp_dma_attr;
1446                         tran->fca_dma_fcp_data_attr = &ql_64fcp_data_dma_attr;
1447                         tran->fca_dma_fcsm_cmd_attr = &ql_64fcsm_cmd_dma_attr;
1448                         tran->fca_dma_fcsm_rsp_attr = &ql_64fcsm_rsp_dma_attr;
1449                         tran->fca_dma_fcip_cmd_attr = &ql_64fcip_cmd_dma_attr;
1450                         tran->fca_dma_fcip_rsp_attr = &ql_64fcip_rsp_dma_attr;
1451                 } else {
1452                         tran->fca_dma_attr = &ql_32bit_io_dma_attr;
1453                         tran->fca_dma_fcp_cmd_attr = &ql_32fcp_cmd_dma_attr;
1454                         tran->fca_dma_fcp_rsp_attr = &ql_32fcp_rsp_dma_attr;
1455                         tran->fca_dma_fcp_data_attr = &ql_32fcp_data_dma_attr;
1456                         tran->fca_dma_fcsm_cmd_attr = &ql_32fcsm_cmd_dma_attr;
1457                         tran->fca_dma_fcsm_rsp_attr = &ql_32fcsm_rsp_dma_attr;
1458                         tran->fca_dma_fcip_cmd_attr = &ql_32fcip_cmd_dma_attr;
1459                         tran->fca_dma_fcip_rsp_attr = &ql_32fcip_rsp_dma_attr;
1460                 }
1461 
1462                 tran->fca_acc_attr = &ql_dev_acc_attr;
1463                 tran->fca_iblock = &(ha->iblock_cookie);
1464 
1465                 /* the remaining values are simply function vectors */
1466                 tran->fca_bind_port = ql_bind_port;
1467                 tran->fca_unbind_port = ql_unbind_port;
1468                 tran->fca_init_pkt = ql_init_pkt;
1469                 tran->fca_un_init_pkt = ql_un_init_pkt;
1470                 tran->fca_els_send = ql_els_send;
1471                 tran->fca_get_cap = ql_get_cap;
1472                 tran->fca_set_cap = ql_set_cap;
1473                 tran->fca_getmap = ql_getmap;
1474                 tran->fca_transport = ql_transport;
1475                 tran->fca_ub_alloc = ql_ub_alloc;
1476                 tran->fca_ub_free = ql_ub_free;
1477                 tran->fca_ub_release = ql_ub_release;
1478                 tran->fca_abort = ql_abort;
1479                 tran->fca_reset = ql_reset;
1480                 tran->fca_port_manage = ql_port_manage;
1481                 tran->fca_get_device = ql_get_device;
1482 
1483                 /* give it to the FC transport */
1484                 if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1485                         cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1486                             instance);
1487                         goto attach_failed;
1488                 }
1489                 progress |= QL_FCA_ATTACH_DONE;
1490 
1491                 /* Stash the structure so it can be freed at detach */
1492                 ha->tran = tran;
1493 
1494                 /* Acquire global state lock. */
1495                 GLOBAL_STATE_LOCK();
1496 
1497                 /* Add adapter structure to link list. */
1498                 ql_add_link_b(&ql_hba, &ha->hba);
1499 
1500                 /* Start one second driver timer. */
1501                 if (ql_timer_timeout_id == NULL) {
1502                         ql_timer_ticks = drv_usectohz(1000000);
1503                         ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1504                             ql_timer_ticks);
1505                 }
1506 
1507                 /* Release global state lock. */
1508                 GLOBAL_STATE_UNLOCK();
1509 
1510                 /* Determine and populate HBA fru info */
1511                 ql_setup_fruinfo(ha);
1512 
1513                 /* Setup task_daemon thread. */
1514                 (void) thread_create(NULL, 0, (void (*)())ql_task_daemon, ha,
1515                     0, &p0, TS_RUN, minclsyspri);
1516 
1517                 progress |= QL_TASK_DAEMON_STARTED;
1518 
1519                 ddi_report_dev(dip);
1520 
1521                 /* Disable link reset in panic path */
1522                 ha->lip_on_panic = 1;
1523 
1524                 rval = DDI_SUCCESS;
1525                 break;
1526 
1527 attach_failed:
1528                 if (progress & QL_FCA_ATTACH_DONE) {
1529                         (void) fc_fca_detach(dip);
1530                         progress &= ~QL_FCA_ATTACH_DONE;
1531                 }
1532 
1533                 if (progress & QL_FCA_TRAN_ALLOCED) {
1534                         kmem_free(tran, sizeof (fc_fca_tran_t));
1535                         progress &= ~QL_FCA_TRAN_ALLOCED;
1536                 }
1537 
1538                 if (progress & QL_MINOR_NODE_CREATED) {
1539                         ddi_remove_minor_node(dip, "devctl");
1540                         progress &= ~QL_MINOR_NODE_CREATED;
1541                 }
1542 
1543                 if (progress & QL_KSTAT_CREATED) {
1544                         kstat_delete(ha->k_stats);
1545                         progress &= ~QL_KSTAT_CREATED;
1546                 }
1547 
1548                 if (progress & QL_N_PORT_INFO_CREATED) {
1549                         kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1550                         progress &= ~QL_N_PORT_INFO_CREATED;
1551                 }
1552 
1553                 if (progress & QL_TASK_DAEMON_STARTED) {
1554                         TASK_DAEMON_LOCK(ha);
1555 
1556                         ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1557 
1558                         cv_signal(&ha->cv_task_daemon);
1559 
1560                         /* Release task daemon lock. */
1561                         TASK_DAEMON_UNLOCK(ha);
1562 
1563                         /* Wait for for task daemon to stop running. */
1564                         while (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1565                                 ql_delay(ha, 10000);
1566                         }
1567                         progress &= ~QL_TASK_DAEMON_STARTED;
1568                 }
1569 
1570                 if (progress & QL_DB_IOBASE_MAPPED) {
1571                         ql_8021_clr_drv_active(ha);
1572                         ddi_regs_map_free(&ha->db_dev_handle);
1573                         progress &= ~QL_DB_IOBASE_MAPPED;
1574                 }
1575                 if (progress & QL_IOMAP_IOBASE_MAPPED) {
1576                         ddi_regs_map_free(&ha->iomap_dev_handle);
1577                         progress &= ~QL_IOMAP_IOBASE_MAPPED;
1578                 }
1579 
1580                 if (progress & QL_CONFIG_SPACE_SETUP) {
1581                         if (CFG_IST(ha, CFG_SBUS_CARD)) {
1582                                 ddi_regs_map_free(&ha->sbus_config_handle);
1583                         } else {
1584                                 pci_config_teardown(&ha->pci_handle);
1585                         }
1586                         progress &= ~QL_CONFIG_SPACE_SETUP;
1587                 }
1588 
1589                 if (progress & QL_INTR_ADDED) {
1590                         ql_disable_intr(ha);
1591                         ql_release_intr(ha);
1592                         progress &= ~QL_INTR_ADDED;
1593                 }
1594 
1595                 if (progress & QL_MUTEX_CV_INITED) {
1596                         ql_destroy_mutex(ha);
1597                         progress &= ~QL_MUTEX_CV_INITED;
1598                 }
1599 
1600                 if (progress & QL_HBA_BUFFER_SETUP) {
1601                         ql_free_phys(ha, &ha->hba_buf);
1602                         progress &= ~QL_HBA_BUFFER_SETUP;
1603                 }
1604 
1605                 if (progress & QL_REGS_MAPPED) {
1606                         ddi_regs_map_free(&ha->dev_handle);
1607                         if (ha->sbus_fpga_iobase != NULL) {
1608                                 ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1609                         }
1610                         progress &= ~QL_REGS_MAPPED;
1611                 }
1612 
1613                 if (progress & QL_SOFT_STATE_ALLOCED) {
1614 
1615                         ql_fcache_rel(ha->fcache);
1616 
1617                         kmem_free(ha->adapter_stats,
1618                             sizeof (*ha->adapter_stats));
1619 
1620                         kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1621                             QL_UB_LIMIT);
1622 
1623                         kmem_free(ha->outstanding_cmds,
1624                             sizeof (*ha->outstanding_cmds) *
1625                             MAX_OUTSTANDING_COMMANDS);
1626 
1627                         if (ha->devpath != NULL) {
1628                                 kmem_free(ha->devpath,
1629                                     strlen(ha->devpath) + 1);
1630                         }
1631 
1632                         kmem_free(ha->dev, sizeof (*ha->dev) *
1633                             DEVICE_HEAD_LIST_SIZE);
1634 
1635                         if (ha->xioctl != NULL) {
1636                                 ql_free_xioctl_resource(ha);
1637                         }
1638 
1639                         if (ha->fw_module != NULL) {
1640                                 (void) ddi_modclose(ha->fw_module);
1641                         }
1642                         (void) ql_el_trace_desc_dtor(ha);
1643                         (void) ql_nvram_cache_desc_dtor(ha);
1644 
1645                         ddi_soft_state_free(ql_state, instance);
1646                         progress &= ~QL_SOFT_STATE_ALLOCED;
1647                 }
1648 
1649                 ddi_prop_remove_all(dip);
1650                 rval = DDI_FAILURE;
1651                 break;
1652 
1653         case DDI_RESUME:
1654                 rval = DDI_FAILURE;
1655 
1656                 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1657                 if (ha == NULL) {
1658                         cmn_err(CE_WARN, "%s(%d): can't get soft state",
1659                             QL_NAME, instance);
1660                         break;
1661                 }
1662 
1663                 ha->power_level = PM_LEVEL_D3;
 
1672                         }
1673                 }
1674 
1675                 /*
1676                  * There is a bug in DR that prevents PM framework
1677                  * from calling ql_power.
1678                  */
1679                 if (ha->power_level == PM_LEVEL_D3) {
1680                         ha->power_level = PM_LEVEL_D0;
1681 
1682                         if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1683                                 cmn_err(CE_WARN, "%s(%d): can't initialize the"
1684                                     " adapter", QL_NAME, instance);
1685                         }
1686 
1687                         /* Wake up task_daemon. */
1688                         ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1689                             0);
1690                 }
1691 
1692                 /* Acquire global state lock. */
1693                 GLOBAL_STATE_LOCK();
1694 
1695                 /* Restart driver timer. */
1696                 if (ql_timer_timeout_id == NULL) {
1697                         ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1698                             ql_timer_ticks);
1699                 }
1700 
1701                 /* Release global state lock. */
1702                 GLOBAL_STATE_UNLOCK();
1703 
1704                 /* Wake up command start routine. */
1705                 ADAPTER_STATE_LOCK(ha);
1706                 ha->flags &= ~ADAPTER_SUSPENDED;
1707                 ADAPTER_STATE_UNLOCK(ha);
1708 
1709                 /*
1710                  * Transport doesn't make FC discovery in polled
1711                  * mode; So we need the daemon thread's services
1712                  * right here.
1713                  */
1714                 (void) callb_generic_cpr(&ha->cprinfo, CB_CODE_CPR_RESUME);
1715 
1716                 rval = DDI_SUCCESS;
1717 
1718                 /* Restart IP if it was running. */
1719                 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1720                         (void) ql_initialize_ip(ha);
1721                         ql_isp_rcvbuf(ha);
1722                 }
1723                 break;
1724 
1725         default:
1726                 cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1727                     " %x", QL_NAME, ddi_get_instance(dip), cmd);
1728                 rval = DDI_FAILURE;
1729                 break;
1730         }
1731 
1732         kmem_free(buf, MAXPATHLEN);
1733 
1734         if (rval != DDI_SUCCESS) {
1735                 /*EMPTY*/
1736                 QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1737                     ddi_get_instance(dip), rval);
1738         } else {
1739                 /*EMPTY*/
1740                 QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1741         }
1742 
1743         return (rval);
1744 }
1745 
1746 /*
1747  * ql_detach
1748  *      Used to remove all the states associated with a given
1749  *      instances of a device node prior to the removal of that
1750  *      instance from the system.
1751  *
1752  * Input:
1753  *      dip = pointer to device information structure.
1754  *      cmd = type of detach.
1755  *
1756  * Returns:
1757  *      DDI_SUCCESS or DDI_FAILURE.
1758  *
1759  * Context:
1760  *      Kernel context.
1761  */
1762 static int
1763 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1764 {
1765         ql_adapter_state_t      *ha, *vha;
1766         ql_tgt_t                *tq;
1767         int                     delay_cnt;
1768         uint16_t                index;
1769         ql_link_t               *link;
1770         char                    *buf;
1771         timeout_id_t            timer_id = NULL;
1772         int                     suspend, rval = DDI_SUCCESS;
1773 
1774         ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1775         if (ha == NULL) {
1776                 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
1777                     ddi_get_instance(dip));
1778                 return (DDI_FAILURE);
1779         }
1780 
1781         QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n", ha->instance, cmd);
1782 
1783         buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1784 
1785         switch (cmd) {
1786         case DDI_DETACH:
1787                 ADAPTER_STATE_LOCK(ha);
1788                 ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO);
1789                 ADAPTER_STATE_UNLOCK(ha);
1790 
1791                 TASK_DAEMON_LOCK(ha);
1792 
1793                 if (ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
1794                         ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1795                         cv_signal(&ha->cv_task_daemon);
1796 
1797                         TASK_DAEMON_UNLOCK(ha);
1798 
1799                         (void) ql_wait_for_td_stop(ha);
1800 
1801                         TASK_DAEMON_LOCK(ha);
1802                         if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1803                                 ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1804                                 EL(ha, "failed, could not stop task daemon\n");
1805                         }
1806                 }
1807                 TASK_DAEMON_UNLOCK(ha);
1808 
1809                 GLOBAL_STATE_LOCK();
1810 
1811                 /* Disable driver timer if no adapters. */
1812                 if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1813                     ql_hba.last == &ha->hba) {
1814                         timer_id = ql_timer_timeout_id;
1815                         ql_timer_timeout_id = NULL;
1816                 }
1817                 ql_remove_link(&ql_hba, &ha->hba);
1818 
1819                 GLOBAL_STATE_UNLOCK();
1820 
1821                 if (timer_id) {
1822                         (void) untimeout(timer_id);
1823                 }
1824 
1825                 if (ha->pm_capable) {
1826                         if (pm_lower_power(dip, QL_POWER_COMPONENT,
1827                             PM_LEVEL_D3) != DDI_SUCCESS) {
1828                                 cmn_err(CE_WARN, "%s(%d): failed to lower the"
1829                                     " power", QL_NAME, ha->instance);
1830                         }
1831                 }
1832 
1833                 /*
1834                  * If pm_lower_power shutdown the adapter, there
1835                  * isn't much else to do
1836                  */
1837                 if (ha->power_level != PM_LEVEL_D3) {
1838                         ql_halt(ha, PM_LEVEL_D3);
1839                 }
1840 
1841                 /* Remove virtual ports. */
1842                 while ((vha = ha->vp_next) != NULL) {
1843                         ql_vport_destroy(vha);
1844                 }
 
1892                         ha->risc_code_size = 0;
1893                 }
1894 
1895                 if (ha->fw_module != NULL) {
1896                         (void) ddi_modclose(ha->fw_module);
1897                         ha->fw_module = NULL;
1898                 }
1899 
1900                 /* Free resources. */
1901                 ddi_prop_remove_all(dip);
1902                 (void) fc_fca_detach(dip);
1903                 kmem_free(ha->tran, sizeof (fc_fca_tran_t));
1904                 ddi_remove_minor_node(dip, "devctl");
1905                 if (ha->k_stats != NULL) {
1906                         kstat_delete(ha->k_stats);
1907                 }
1908 
1909                 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1910                         ddi_regs_map_free(&ha->sbus_config_handle);
1911                 } else {
1912                         if (CFG_IST(ha, CFG_CTRL_8021)) {
1913                                 ql_8021_clr_drv_active(ha);
1914                                 ddi_regs_map_free(&ha->db_dev_handle);
1915                         }
1916                         if (ha->iomap_dev_handle != ha->dev_handle) {
1917                                 ddi_regs_map_free(&ha->iomap_dev_handle);
1918                         }
1919                         pci_config_teardown(&ha->pci_handle);
1920                 }
1921 
1922                 ql_disable_intr(ha);
1923                 ql_release_intr(ha);
1924 
1925                 ql_free_xioctl_resource(ha);
1926 
1927                 ql_destroy_mutex(ha);
1928 
1929                 ql_free_phys(ha, &ha->hba_buf);
1930                 ql_free_phys(ha, &ha->fwexttracebuf);
1931                 ql_free_phys(ha, &ha->fwfcetracebuf);
1932 
1933                 ddi_regs_map_free(&ha->dev_handle);
1934                 if (ha->sbus_fpga_iobase != NULL) {
1935                         ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1936                 }
1937 
1938                 ql_fcache_rel(ha->fcache);
1939                 if (ha->vcache != NULL) {
1940                         kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
1941                 }
1942 
1943                 if (ha->pi_attrs != NULL) {
1944                         kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
1945                 }
1946 
1947                 kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
1948 
1949                 kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
1950 
1951                 kmem_free(ha->outstanding_cmds,
1952                     sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS);
1953 
1954                 if (ha->n_port != NULL) {
1955                         kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1956                 }
1957 
1958                 if (ha->devpath != NULL) {
1959                         kmem_free(ha->devpath, strlen(ha->devpath) + 1);
1960                 }
1961 
1962                 kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
1963 
1964                 EL(ha, "detached\n");
1965 
1966                 ddi_soft_state_free(ql_state, (int)ha->instance);
1967 
1968                 break;
1969 
1970         case DDI_SUSPEND:
1971                 ADAPTER_STATE_LOCK(ha);
1972 
1973                 delay_cnt = 0;
1974                 ha->flags |= ADAPTER_SUSPENDED;
1975                 while (ha->flags & ADAPTER_TIMER_BUSY && delay_cnt++ < 10) {
1976                         ADAPTER_STATE_UNLOCK(ha);
1977                         delay(drv_usectohz(1000000));
1978                         ADAPTER_STATE_LOCK(ha);
1979                 }
1980                 if (ha->busy || ha->flags & ADAPTER_TIMER_BUSY) {
1981                         ha->flags &= ~ADAPTER_SUSPENDED;
1982                         ADAPTER_STATE_UNLOCK(ha);
1983                         rval = DDI_FAILURE;
1984                         cmn_err(CE_WARN, "!%s(%d): Fail suspend"
1985                             " busy %xh flags %xh", QL_NAME, ha->instance,
1986                             ha->busy, ha->flags);
1987                         break;
1988                 }
1989 
1990                 ADAPTER_STATE_UNLOCK(ha);
1991 
1992                 if (ha->flags & IP_INITIALIZED) {
1993                         (void) ql_shutdown_ip(ha);
1994                 }
1995 
1996                 if ((suspend = ql_suspend_adapter(ha)) != QL_SUCCESS) {
1997                         ADAPTER_STATE_LOCK(ha);
1998                         ha->flags &= ~ADAPTER_SUSPENDED;
1999                         ADAPTER_STATE_UNLOCK(ha);
2000                         cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
2001                             QL_NAME, ha->instance, suspend);
2002 
2003                         /* Restart IP if it was running. */
2004                         if (ha->flags & IP_ENABLED &&
2005                             !(ha->flags & IP_INITIALIZED)) {
2006                                 (void) ql_initialize_ip(ha);
2007                                 ql_isp_rcvbuf(ha);
2008                         }
2009                         rval = DDI_FAILURE;
2010                         break;
2011                 }
2012 
2013                 /* Acquire global state lock. */
2014                 GLOBAL_STATE_LOCK();
2015 
2016                 /* Disable driver timer if last adapter. */
2017                 if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
2018                     ql_hba.last == &ha->hba) {
2019                         timer_id = ql_timer_timeout_id;
2020                         ql_timer_timeout_id = NULL;
2021                 }
2022                 GLOBAL_STATE_UNLOCK();
2023 
2024                 if (timer_id) {
2025                         (void) untimeout(timer_id);
2026                 }
2027 
2028                 EL(ha, "suspended\n");
2029 
2030                 break;
2031 
2032         default:
2033                 rval = DDI_FAILURE;
2034                 break;
2035         }
2036 
2037         kmem_free(buf, MAXPATHLEN);
2038 
2039         if (rval != DDI_SUCCESS) {
2040                 if (ha != NULL) {
2041                         EL(ha, "failed, rval = %xh\n", rval);
2042                 } else {
2043                         /*EMPTY*/
2044                         QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
2045                             ddi_get_instance(dip), rval);
2046                 }
2047         } else {
2048                 /*EMPTY*/
2049                 QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
2050         }
2051 
2052         return (rval);
2053 }
2054 
2055 
2056 /*
2057  * ql_power
2058  *      Power a device attached to the system.
2059  *
2060  * Input:
2061  *      dip = pointer to device information structure.
2062  *      component = device.
2063  *      level = power level.
2064  *
2065  * Returns:
2066  *      DDI_SUCCESS or DDI_FAILURE.
2067  *
2068  * Context:
2069  *      Kernel context.
2070  */
2071 /* ARGSUSED */
2072 static int
2073 ql_power(dev_info_t *dip, int component, int level)
2074 {
2075         int                     rval = DDI_FAILURE;
2076         off_t                   csr;
2077         uint8_t                 saved_pm_val;
2078         ql_adapter_state_t      *ha;
2079         char                    *buf;
2080         char                    *path;
2081 
2082         ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2083         if (ha == NULL || ha->pm_capable == 0) {
2084                 QL_PRINT_2(CE_CONT, "(%d): no hba or PM not supported\n",
2085                     ddi_get_instance(dip));
2086                 return (rval);
2087         }
2088 
2089         QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2090 
2091         buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2092         path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2093 
2094         if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
2095             level != PM_LEVEL_D3)) {
2096                 EL(ha, "invalid, component=%xh or level=%xh\n",
2097                     component, level);
2098                 return (rval);
2099         }
2100 
2101         GLOBAL_HW_LOCK();
2102         csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
2103         GLOBAL_HW_UNLOCK();
2104 
2105         (void) snprintf(buf, sizeof (buf),
2106             "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
2107             ddi_pathname(dip, path));
2108 
2109         switch (level) {
2110         case PM_LEVEL_D0:       /* power up to D0 state - fully on */
2111 
2112                 QL_PM_LOCK(ha);
2113                 if (ha->power_level == PM_LEVEL_D0) {
2114                         QL_PM_UNLOCK(ha);
2115                         rval = DDI_SUCCESS;
2116                         break;
2117                 }
2118 
2119                 /*
2120                  * Enable interrupts now
2121                  */
2122                 saved_pm_val = ha->power_level;
2123                 ha->power_level = PM_LEVEL_D0;
2124                 QL_PM_UNLOCK(ha);
2125 
 
2155                 /* Wake up task_daemon. */
2156                 ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
2157                     TASK_DAEMON_SLEEPING_FLG, 0);
2158 
2159                 /* Restart IP if it was running. */
2160                 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
2161                         (void) ql_initialize_ip(ha);
2162                         ql_isp_rcvbuf(ha);
2163                 }
2164 
2165                 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
2166                     ha->instance, QL_NAME);
2167 
2168                 rval = DDI_SUCCESS;
2169                 break;
2170 
2171         case PM_LEVEL_D3:       /* power down to D3 state - off */
2172 
2173                 QL_PM_LOCK(ha);
2174 
2175                 if (ha->busy || ((ha->task_daemon_flags &
2176                     TASK_DAEMON_SLEEPING_FLG) == 0)) {
2177                         QL_PM_UNLOCK(ha);
2178                         break;
2179                 }
2180 
2181                 if (ha->power_level == PM_LEVEL_D3) {
2182                         rval = DDI_SUCCESS;
2183                         QL_PM_UNLOCK(ha);
2184                         break;
2185                 }
2186                 QL_PM_UNLOCK(ha);
2187 
2188                 if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2189                         cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
2190                             " config regs", QL_NAME, ha->instance, buf);
2191                         break;
2192                 }
2193                 ha->config_saved = 1;
2194 
2195                 /*
 
2212                 ha->power_level = PM_LEVEL_D3;
2213                 QL_PM_UNLOCK(ha);
2214 
2215                 /*
2216                  * Wait for ISR to complete.
2217                  */
2218                 INTR_LOCK(ha);
2219                 ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
2220                 INTR_UNLOCK(ha);
2221 
2222                 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
2223                     ha->instance, QL_NAME);
2224 
2225                 rval = DDI_SUCCESS;
2226                 break;
2227         }
2228 
2229         kmem_free(buf, MAXPATHLEN);
2230         kmem_free(path, MAXPATHLEN);
2231 
2232         QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
2233 
2234         return (rval);
2235 }
2236 
2237 /*
2238  * ql_quiesce
2239  *      quiesce a device attached to the system.
2240  *
2241  * Input:
2242  *      dip = pointer to device information structure.
2243  *
2244  * Returns:
2245  *      DDI_SUCCESS
2246  *
2247  * Context:
2248  *      Kernel context.
2249  */
2250 static int
2251 ql_quiesce(dev_info_t *dip)
2252 {
2253         ql_adapter_state_t      *ha;
2254         uint32_t                timer;
2255         uint32_t                stat;
2256 
2257         ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2258         if (ha == NULL) {
2259                 /* Oh well.... */
2260                 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2261                     ddi_get_instance(dip));
2262                 return (DDI_SUCCESS);
2263         }
2264 
2265         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2266 
2267         if (CFG_IST(ha, CFG_CTRL_8021)) {
2268                 (void) ql_stop_firmware(ha);
2269         } else if (CFG_IST(ha, CFG_CTRL_242581)) {
2270                 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2271                 WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE);
2272                 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
2273                 for (timer = 0; timer < 30000; timer++) {
2274                         stat = RD32_IO_REG(ha, risc2host);
2275                         if (stat & BIT_15) {
2276                                 if ((stat & 0xff) < 0x12) {
2277                                         WRT32_IO_REG(ha, hccr,
2278                                             HC24_CLR_RISC_INT);
2279                                         break;
2280                                 }
2281                                 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2282                         }
2283                         drv_usecwait(100);
2284                 }
2285                 /* Reset the chip. */
2286                 WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2287                     MWB_4096_BYTES);
2288                 drv_usecwait(100);
2289 
2290         } else {
2291                 /* Disable ISP interrupts. */
2292                 WRT16_IO_REG(ha, ictrl, 0);
2293                 /* Select RISC module registers. */
2294                 WRT16_IO_REG(ha, ctrl_status, 0);
2295                 /* Reset ISP semaphore. */
2296                 WRT16_IO_REG(ha, semaphore, 0);
2297                 /* Reset RISC module. */
2298                 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
2299                 /* Release RISC module. */
2300                 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
2301         }
2302 
2303         ql_disable_intr(ha);
2304 
2305         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2306 
2307         return (DDI_SUCCESS);
2308 }
2309 
2310 /* ************************************************************************ */
2311 /*              Fibre Channel Adapter (FCA) Transport Functions.            */
2312 /* ************************************************************************ */
2313 
2314 /*
2315  * ql_bind_port
2316  *      Handling port binding. The FC Transport attempts to bind an FCA port
2317  *      when it is ready to start transactions on the port. The FC Transport
2318  *      will call the fca_bind_port() function specified in the fca_transport
2319  *      structure it receives. The FCA must fill in the port_info structure
2320  *      passed in the call and also stash the information for future calls.
2321  *
2322  * Input:
2323  *      dip = pointer to FCA information structure.
2324  *      port_info = pointer to port information structure.
2325  *      bind_info = pointer to bind information structure.
2326  *
2327  * Returns:
2328  *      NULL = failure
2329  *
2330  * Context:
2331  *      Kernel context.
2332  */
2333 static opaque_t
2334 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2335     fc_fca_bind_info_t *bind_info)
2336 {
2337         ql_adapter_state_t      *ha, *vha;
2338         opaque_t                fca_handle = NULL;
2339         port_id_t               d_id;
2340         int                     port_npiv = bind_info->port_npiv;
2341         uchar_t                 *port_nwwn = bind_info->port_nwwn.raw_wwn;
2342         uchar_t                 *port_pwwn = bind_info->port_pwwn.raw_wwn;
2343 
2344         /* get state info based on the dip */
2345         ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2346         if (ha == NULL) {
2347                 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2348                     ddi_get_instance(dip));
2349                 return (NULL);
2350         }
2351         QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2352 
2353         /* Verify port number is supported. */
2354         if (port_npiv != 0) {
2355                 if (!(ha->flags & VP_ENABLED)) {
2356                         QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_NOT_SUPPORTED\n",
2357                             ha->instance);
2358                         port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2359                         return (NULL);
2360                 }
2361                 if (!(ha->flags & POINT_TO_POINT)) {
2362                         QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_WRONG_TOPOLOGY\n",
2363                             ha->instance);
2364                         port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2365                         return (NULL);
2366                 }
2367                 if (!(ha->flags & FDISC_ENABLED)) {
2368                         QL_PRINT_2(CE_CONT, "(%d): switch does not support "
2369                             "FDISC\n", ha->instance);
2370                         port_info->pi_error = FC_NPIV_FDISC_FAILED;
2371                         return (NULL);
2372                 }
2373                 if (bind_info->port_num > (CFG_IST(ha, CFG_CTRL_2422) ?
2374                     MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
2375                         QL_PRINT_2(CE_CONT, "(%d): port number=%d "
2376                             "FC_OUTOFBOUNDS\n", ha->instance);
2377                         port_info->pi_error = FC_OUTOFBOUNDS;
2378                         return (NULL);
2379                 }
2380         } else if (bind_info->port_num != 0) {
2381                 QL_PRINT_2(CE_CONT, "(%d): failed, port number=%d is not "
2382                     "supported\n", ha->instance, bind_info->port_num);
2383                 port_info->pi_error = FC_OUTOFBOUNDS;
2384                 return (NULL);
2385         }
2386 
2387         /* Locate port context. */
2388         for (vha = ha; vha != NULL; vha = vha->vp_next) {
2389                 if (vha->vp_index == bind_info->port_num) {
2390                         break;
2391                 }
2392         }
2393 
2394         /* If virtual port does not exist. */
2395         if (vha == NULL) {
2396                 vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2397         }
2398 
2399         /* make sure this port isn't already bound */
2400         if (vha->flags & FCA_BOUND) {
2401                 port_info->pi_error = FC_ALREADY;
2402         } else {
2403                 if (vha->vp_index != 0) {
2404                         bcopy(port_nwwn,
2405                             vha->loginparams.node_ww_name.raw_wwn, 8);
2406                         bcopy(port_pwwn,
2407                             vha->loginparams.nport_ww_name.raw_wwn, 8);
2408                 }
2409                 if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2410                         if (ql_vport_enable(vha) != QL_SUCCESS) {
2411                                 QL_PRINT_2(CE_CONT, "(%d): failed to enable "
2412                                     "virtual port=%d\n", ha->instance,
2413                                     vha->vp_index);
2414                                 port_info->pi_error = FC_NPIV_FDISC_FAILED;
2415                                 return (NULL);
2416                         }
2417                         cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2418                             "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2419                             "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2420                             QL_NAME, ha->instance, vha->vp_index,
2421                             port_pwwn[0], port_pwwn[1], port_pwwn[2],
2422                             port_pwwn[3], port_pwwn[4], port_pwwn[5],
2423                             port_pwwn[6], port_pwwn[7],
2424                             port_nwwn[0], port_nwwn[1], port_nwwn[2],
2425                             port_nwwn[3], port_nwwn[4], port_nwwn[5],
2426                             port_nwwn[6], port_nwwn[7]);
2427                 }
2428 
2429                 /* stash the bind_info supplied by the FC Transport */
2430                 vha->bind_info.port_handle = bind_info->port_handle;
2431                 vha->bind_info.port_statec_cb =
2432                     bind_info->port_statec_cb;
2433                 vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2434 
2435                 /* Set port's source ID. */
2436                 port_info->pi_s_id.port_id = vha->d_id.b24;
2437 
2438                 /* copy out the default login parameters */
2439                 bcopy((void *)&vha->loginparams,
2440                     (void *)&port_info->pi_login_params,
2441                     sizeof (la_els_logi_t));
2442 
2443                 /* Set port's hard address if enabled. */
2444                 port_info->pi_hard_addr.hard_addr = 0;
2445                 if (bind_info->port_num == 0) {
2446                         d_id.b24 = ha->d_id.b24;
2447                         if (CFG_IST(ha, CFG_CTRL_24258081)) {
2448                                 if (ha->init_ctrl_blk.cb24.
2449                                     firmware_options_1[0] & BIT_0) {
2450                                         d_id.b.al_pa = ql_index_to_alpa[ha->
2451                                             init_ctrl_blk.cb24.
2452                                             hard_address[0]];
2453                                         port_info->pi_hard_addr.hard_addr =
2454                                             d_id.b24;
2455                                 }
2456                         } else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2457                             BIT_0) {
2458                                 d_id.b.al_pa = ql_index_to_alpa[ha->
2459                                     init_ctrl_blk.cb.hard_address[0]];
2460                                 port_info->pi_hard_addr.hard_addr = d_id.b24;
2461                         }
2462 
2463                         /* Set the node id data */
2464                         if (ql_get_rnid_params(ha,
2465                             sizeof (port_info->pi_rnid_params.params),
2466                             (caddr_t)&port_info->pi_rnid_params.params) ==
2467                             QL_SUCCESS) {
 
2479                                     sizeof (fca_port_attrs_t));
2480                         }
2481                 } else {
2482                         port_info->pi_rnid_params.status = FC_FAILURE;
2483                         if (ha->pi_attrs != NULL) {
2484                                 bcopy(ha->pi_attrs, &port_info->pi_attrs,
2485                                     sizeof (fca_port_attrs_t));
2486                         }
2487                 }
2488 
2489                 /* Generate handle for this FCA. */
2490                 fca_handle = (opaque_t)vha;
2491 
2492                 ADAPTER_STATE_LOCK(ha);
2493                 vha->flags |= FCA_BOUND;
2494                 ADAPTER_STATE_UNLOCK(ha);
2495                 /* Set port's current state. */
2496                 port_info->pi_port_state = vha->state;
2497         }
2498 
2499         QL_PRINT_10(CE_CONT, "(%d,%d): done, pi_port_state=%xh, "
2500             "pi_s_id.port_id=%xh\n", ha->instance, ha->vp_index,
2501             port_info->pi_port_state, port_info->pi_s_id.port_id);
2502 
2503         return (fca_handle);
2504 }
2505 
2506 /*
2507  * ql_unbind_port
2508  *      To unbind a Fibre Channel Adapter from an FC Port driver.
2509  *
2510  * Input:
2511  *      fca_handle = handle setup by ql_bind_port().
2512  *
2513  * Context:
2514  *      Kernel context.
2515  */
2516 static void
2517 ql_unbind_port(opaque_t fca_handle)
2518 {
2519         ql_adapter_state_t      *ha;
2520         ql_tgt_t                *tq;
2521         uint32_t                flgs;
2522 
2523         ha = ql_fca_handle_to_state(fca_handle);
2524         if (ha == NULL) {
2525                 /*EMPTY*/
2526                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2527                     (void *)fca_handle);
2528         } else {
2529                 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance,
2530                     ha->vp_index);
2531 
2532                 if (!(ha->flags & FCA_BOUND)) {
2533                         /*EMPTY*/
2534                         QL_PRINT_2(CE_CONT, "(%d): port=%d already unbound\n",
2535                             ha->instance, ha->vp_index);
2536                 } else {
2537                         if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2538                                 if ((tq = ql_loop_id_to_queue(ha,
2539                                     FL_PORT_24XX_HDL)) != NULL) {
2540                                         (void) ql_logout_fabric_port(ha, tq);
2541                                 }
2542                                 (void) ql_vport_control(ha, (uint8_t)
2543                                     (CFG_IST(ha, CFG_CTRL_2425) ?
2544                                     VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT));
2545                                 flgs = FCA_BOUND | VP_ENABLED;
2546                         } else {
2547                                 flgs = FCA_BOUND;
2548                         }
2549                         ADAPTER_STATE_LOCK(ha);
2550                         ha->flags &= ~flgs;
2551                         ADAPTER_STATE_UNLOCK(ha);
2552                 }
2553 
2554                 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
2555                     ha->vp_index);
2556         }
2557 }
2558 
2559 /*
2560  * ql_init_pkt
2561  *      Initialize FCA portion of packet.
2562  *
2563  * Input:
2564  *      fca_handle = handle setup by ql_bind_port().
2565  *      pkt = pointer to fc_packet.
2566  *
2567  * Returns:
2568  *      FC_SUCCESS - the packet has successfully been initialized.
2569  *      FC_UNBOUND - the fca_handle specified is not bound.
2570  *      FC_NOMEM - the FCA failed initialization due to an allocation error.
2571  *      FC_FAILURE - the FCA failed initialization for undisclosed reasons
2572  *
2573  * Context:
2574  *      Kernel context.
2575  */
2576 /* ARGSUSED */
2577 static int
2578 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2579 {
2580         ql_adapter_state_t      *ha;
2581         ql_srb_t                *sp;
2582         int                     rval = FC_SUCCESS;
2583 
2584         ha = ql_fca_handle_to_state(fca_handle);
2585         if (ha == NULL) {
2586                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2587                     (void *)fca_handle);
2588                 return (FC_UNBOUND);
2589         }
2590         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2591 
2592         sp = (ql_srb_t *)pkt->pkt_fca_private;
2593         sp->flags = 0;
2594 
2595         /* init cmd links */
2596         sp->cmd.base_address = sp;
2597         sp->cmd.prev = NULL;
2598         sp->cmd.next = NULL;
2599         sp->cmd.head = NULL;
2600 
2601         /* init watchdog links */
2602         sp->wdg.base_address = sp;
2603         sp->wdg.prev = NULL;
2604         sp->wdg.next = NULL;
2605         sp->wdg.head = NULL;
2606         sp->pkt = pkt;
2607         sp->ha = ha;
2608         sp->magic_number = QL_FCA_BRAND;
2609         sp->sg_dma.dma_handle = NULL;
2610 #ifndef __sparc
2611         if (CFG_IST(ha, CFG_CTRL_8021)) {
2612                 /* Setup DMA for scatter gather list. */
2613                 sp->sg_dma.size = sizeof (cmd6_2400_dma_t);
2614                 sp->sg_dma.type = LITTLE_ENDIAN_DMA;
2615                 sp->sg_dma.cookie_count = 1;
2616                 sp->sg_dma.alignment = 64;
2617                 if (ql_alloc_phys(ha, &sp->sg_dma, KM_SLEEP) != QL_SUCCESS) {
2618                         rval = FC_NOMEM;
2619                 }
2620         }
2621 #endif  /* __sparc */
2622 
2623         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2624 
2625         return (rval);
2626 }
2627 
2628 /*
2629  * ql_un_init_pkt
2630  *      Release all local resources bound to packet.
2631  *
2632  * Input:
2633  *      fca_handle = handle setup by ql_bind_port().
2634  *      pkt = pointer to fc_packet.
2635  *
2636  * Returns:
2637  *      FC_SUCCESS - the packet has successfully been invalidated.
2638  *      FC_UNBOUND - the fca_handle specified is not bound.
2639  *      FC_BADPACKET - the packet has not been initialized or has
2640  *                      already been freed by this FCA.
2641  *
2642  * Context:
2643  *      Kernel context.
2644  */
2645 static int
2646 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2647 {
2648         ql_adapter_state_t *ha;
2649         int rval;
2650         ql_srb_t *sp;
2651 
2652         ha = ql_fca_handle_to_state(fca_handle);
2653         if (ha == NULL) {
2654                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2655                     (void *)fca_handle);
2656                 return (FC_UNBOUND);
2657         }
2658         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2659 
2660         sp = (ql_srb_t *)pkt->pkt_fca_private;
2661 
2662         if (sp->magic_number != QL_FCA_BRAND) {
2663                 EL(ha, "failed, FC_BADPACKET\n");
2664                 rval = FC_BADPACKET;
2665         } else {
2666                 sp->magic_number = NULL;
2667                 ql_free_phys(ha, &sp->sg_dma);
2668                 rval = FC_SUCCESS;
2669         }
2670 
2671         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2672 
2673         return (rval);
2674 }
2675 
2676 /*
2677  * ql_els_send
2678  *      Issue a extended link service request.
2679  *
2680  * Input:
2681  *      fca_handle = handle setup by ql_bind_port().
2682  *      pkt = pointer to fc_packet.
2683  *
2684  * Returns:
2685  *      FC_SUCCESS - the command was successful.
2686  *      FC_ELS_FREJECT - the command was rejected by a Fabric.
2687  *      FC_ELS_PREJECT - the command was rejected by an N-port.
2688  *      FC_TRANSPORT_ERROR - a transport error occurred.
2689  *      FC_UNBOUND - the fca_handle specified is not bound.
2690  *      FC_ELS_BAD - the FCA can not issue the requested ELS.
2691  *
2692  * Context:
2693  *      Kernel context.
2694  */
2695 static int
2696 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2697 {
2698         ql_adapter_state_t      *ha;
2699         int                     rval;
2700         clock_t                 timer = drv_usectohz(30000000);
2701         ls_code_t               els;
2702         la_els_rjt_t            rjt;
2703         ql_srb_t                *sp = (ql_srb_t *)pkt->pkt_fca_private;
2704 
2705         /* Verify proper command. */
2706         ha = ql_cmd_setup(fca_handle, pkt, &rval);
2707         if (ha == NULL) {
2708                 QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2709                     rval, fca_handle);
2710                 return (FC_INVALID_REQUEST);
2711         }
2712         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2713 
2714         /* Wait for suspension to end. */
2715         TASK_DAEMON_LOCK(ha);
2716         while (ha->task_daemon_flags & QL_SUSPENDED) {
2717                 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2718 
2719                 /* 30 seconds from now */
2720                 if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
2721                     &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
2722                         /*
2723                          * The timeout time 'timer' was
2724                          * reached without the condition
2725                          * being signaled.
2726                          */
2727                         pkt->pkt_state = FC_PKT_TRAN_BSY;
2728                         pkt->pkt_reason = FC_REASON_XCHG_BSY;
2729 
2730                         /* Release task daemon lock. */
2731                         TASK_DAEMON_UNLOCK(ha);
2732 
2733                         EL(ha, "QL_SUSPENDED failed=%xh\n",
2734                             QL_FUNCTION_TIMEOUT);
2735                         return (FC_TRAN_BUSY);
2736                 }
 
2746                 bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2747         }
2748 
2749         pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2750         pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2751         pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
2752             R_CTL_SOLICITED_CONTROL;
2753         pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
2754             F_CTL_END_SEQ;
2755 
2756         sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
2757             SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
2758             SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
2759 
2760         sp->flags |= SRB_ELS_PKT;
2761 
2762         /* map the type of ELS to a function */
2763         ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
2764             (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
2765 
2766 #if 0
2767         QL_PRINT_3(CE_CONT, "(%d): command fhdr:\n", ha->instance);
2768         QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2769             sizeof (fc_frame_hdr_t) / 4);
2770         QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2771         QL_DUMP_3((uint8_t *)&els, 32, sizeof (els) / 4);
2772 #endif
2773 
2774         sp->iocb = ha->els_cmd;
2775         sp->req_cnt = 1;
2776 
2777         switch (els.ls_code) {
2778         case LA_ELS_RJT:
2779         case LA_ELS_ACC:
2780                 EL(ha, "LA_ELS_RJT\n");
2781                 pkt->pkt_state = FC_PKT_SUCCESS;
2782                 rval = FC_SUCCESS;
2783                 break;
2784         case LA_ELS_PLOGI:
2785         case LA_ELS_PDISC:
2786                 rval = ql_els_plogi(ha, pkt);
2787                 break;
2788         case LA_ELS_FLOGI:
2789         case LA_ELS_FDISC:
2790                 rval = ql_els_flogi(ha, pkt);
2791                 break;
2792         case LA_ELS_LOGO:
2793                 rval = ql_els_logo(ha, pkt);
2794                 break;
2795         case LA_ELS_PRLI:
2796                 rval = ql_els_prli(ha, pkt);
2797                 break;
2798         case LA_ELS_PRLO:
2799                 rval = ql_els_prlo(ha, pkt);
2800                 break;
 
2828         case LA_ELS_RNID:
2829                 rval = ql_els_rnid(ha, pkt);
2830                 break;
2831         default:
2832                 EL(ha, "LA_ELS_RJT, FC_REASON_CMD_UNSUPPORTED=%xh\n",
2833                     els.ls_code);
2834                 /* Build RJT. */
2835                 bzero(&rjt, sizeof (rjt));
2836                 rjt.ls_code.ls_code = LA_ELS_RJT;
2837                 rjt.reason = FC_REASON_CMD_UNSUPPORTED;
2838 
2839                 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
2840                     (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
2841 
2842                 pkt->pkt_state = FC_PKT_LOCAL_RJT;
2843                 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2844                 rval = FC_SUCCESS;
2845                 break;
2846         }
2847 
2848 #if 0
2849         QL_PRINT_3(CE_CONT, "(%d): response fhdr:\n", ha->instance);
2850         QL_DUMP_3((uint8_t *)&pkt->pkt_resp_fhdr, 32,
2851             sizeof (fc_frame_hdr_t) / 4);
2852 #endif
2853         /*
2854          * Return success if the srb was consumed by an iocb. The packet
2855          * completion callback will be invoked by the response handler.
2856          */
2857         if (rval == QL_CONSUMED) {
2858                 rval = FC_SUCCESS;
2859         } else if (rval == FC_SUCCESS &&
2860             !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
2861                 /* Do command callback only if no error */
2862                 ql_awaken_task_daemon(ha, sp, 0, 0);
2863         }
2864 
2865         if (rval != FC_SUCCESS) {
2866                 EL(ha, "failed, rval = %xh\n", rval);
2867         } else {
2868                 /*EMPTY*/
2869                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2870         }
2871         return (rval);
2872 }
2873 
2874 /*
2875  * ql_get_cap
2876  *      Export FCA hardware and software capabilities.
2877  *
2878  * Input:
2879  *      fca_handle = handle setup by ql_bind_port().
2880  *      cap = pointer to the capabilities string.
2881  *      ptr = buffer pointer for return capability.
2882  *
2883  * Returns:
2884  *      FC_CAP_ERROR - no such capability
2885  *      FC_CAP_FOUND - the capability was returned and cannot be set
2886  *      FC_CAP_SETTABLE - the capability was returned and can be set
2887  *      FC_UNBOUND - the fca_handle specified is not bound.
2888  *
2889  * Context:
2890  *      Kernel context.
2891  */
2892 static int
2893 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
2894 {
2895         ql_adapter_state_t      *ha;
2896         int                     rval;
2897         uint32_t                *rptr = (uint32_t *)ptr;
2898 
2899         ha = ql_fca_handle_to_state(fca_handle);
2900         if (ha == NULL) {
2901                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2902                     (void *)fca_handle);
2903                 return (FC_UNBOUND);
2904         }
2905         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2906 
2907         if (strcmp(cap, FC_NODE_WWN) == 0) {
2908                 bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
2909                     ptr, 8);
2910                 rval = FC_CAP_FOUND;
2911         } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2912                 bcopy((void *)&ha->loginparams, ptr,
2913                     sizeof (la_els_logi_t));
2914                 rval = FC_CAP_FOUND;
2915         } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2916                 *rptr = (uint32_t)QL_UB_LIMIT;
2917                 rval = FC_CAP_FOUND;
2918         } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2919 
2920                 dev_info_t      *psydip = NULL;
2921 #ifdef __sparc
2922                 /*
2923                  * Disable streaming for certain 2 chip adapters
2924                  * below Psycho to handle Psycho byte hole issue.
2925                  */
2926                 if ((CFG_IST(ha, CFG_MULTI_CHIP_ADAPTER)) &&
2927                     (!CFG_IST(ha, CFG_SBUS_CARD))) {
2928                         for (psydip = ddi_get_parent(ha->dip); psydip;
2929                             psydip = ddi_get_parent(psydip)) {
2930                                 if (strcmp(ddi_driver_name(psydip),
2931                                     "pcipsy") == 0) {
2932                                         break;
2933                                 }
2934                         }
2935                 }
2936 #endif  /* __sparc */
2937 
2938                 if (psydip) {
2939                         *rptr = (uint32_t)FC_NO_STREAMING;
2940                         EL(ha, "No Streaming\n");
2941                 } else {
2942                         *rptr = (uint32_t)FC_ALLOW_STREAMING;
2943                         EL(ha, "Allow Streaming\n");
2944                 }
2945                 rval = FC_CAP_FOUND;
2946         } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2947                 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2948                         *rptr = (uint32_t)CHAR_TO_SHORT(
2949                             ha->init_ctrl_blk.cb24.max_frame_length[0],
2950                             ha->init_ctrl_blk.cb24.max_frame_length[1]);
2951                 } else {
2952                         *rptr = (uint32_t)CHAR_TO_SHORT(
2953                             ha->init_ctrl_blk.cb.max_frame_length[0],
2954                             ha->init_ctrl_blk.cb.max_frame_length[1]);
2955                 }
2956                 rval = FC_CAP_FOUND;
2957         } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2958                 *rptr = FC_RESET_RETURN_ALL;
2959                 rval = FC_CAP_FOUND;
2960         } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2961                 *rptr = FC_NO_DVMA_SPACE;
2962                 rval = FC_CAP_FOUND;
2963         } else {
2964                 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2965                 rval = FC_CAP_ERROR;
2966         }
2967 
2968         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2969 
2970         return (rval);
2971 }
2972 
2973 /*
2974  * ql_set_cap
2975  *      Allow the FC Transport to set FCA capabilities if possible.
2976  *
2977  * Input:
2978  *      fca_handle = handle setup by ql_bind_port().
2979  *      cap = pointer to the capabilities string.
2980  *      ptr = buffer pointer for capability.
2981  *
2982  * Returns:
2983  *      FC_CAP_ERROR - no such capability
2984  *      FC_CAP_FOUND - the capability cannot be set by the FC Transport.
2985  *      FC_CAP_SETTABLE - the capability was successfully set.
2986  *      FC_UNBOUND - the fca_handle specified is not bound.
2987  *
2988  * Context:
2989  *      Kernel context.
2990  */
2991 /* ARGSUSED */
2992 static int
2993 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
2994 {
2995         ql_adapter_state_t      *ha;
2996         int                     rval;
2997 
2998         ha = ql_fca_handle_to_state(fca_handle);
2999         if (ha == NULL) {
3000                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3001                     (void *)fca_handle);
3002                 return (FC_UNBOUND);
3003         }
3004         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3005 
3006         if (strcmp(cap, FC_NODE_WWN) == 0) {
3007                 rval = FC_CAP_FOUND;
3008         } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
3009                 rval = FC_CAP_FOUND;
3010         } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
3011                 rval = FC_CAP_FOUND;
3012         } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
3013                 rval = FC_CAP_FOUND;
3014         } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
3015                 rval = FC_CAP_FOUND;
3016         } else {
3017                 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
3018                 rval = FC_CAP_ERROR;
3019         }
3020 
3021         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3022 
3023         return (rval);
3024 }
3025 
3026 /*
3027  * ql_getmap
3028  *      Request of Arbitrated Loop (AL-PA) map.
3029  *
3030  * Input:
3031  *      fca_handle = handle setup by ql_bind_port().
3032  *      mapbuf= buffer pointer for map.
3033  *
3034  * Returns:
3035  *      FC_OLDPORT - the specified port is not operating in loop mode.
3036  *      FC_OFFLINE - the specified port is not online.
3037  *      FC_NOMAP - there is no loop map available for this port.
3038  *      FC_UNBOUND - the fca_handle specified is not bound.
3039  *      FC_SUCCESS - a valid map has been placed in mapbuf.
3040  *
3041  * Context:
3042  *      Kernel context.
3043  */
3044 static int
3045 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
3046 {
3047         ql_adapter_state_t      *ha;
3048         clock_t                 timer = drv_usectohz(30000000);
3049         int                     rval = FC_SUCCESS;
3050 
3051         ha = ql_fca_handle_to_state(fca_handle);
3052         if (ha == NULL) {
3053                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3054                     (void *)fca_handle);
3055                 return (FC_UNBOUND);
3056         }
3057         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3058 
3059         mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
3060         mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
3061 
3062         /* Wait for suspension to end. */
3063         TASK_DAEMON_LOCK(ha);
3064         while (ha->task_daemon_flags & QL_SUSPENDED) {
3065                 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
3066 
3067                 /* 30 seconds from now */
3068                 if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
3069                     &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
3070                         /*
3071                          * The timeout time 'timer' was
3072                          * reached without the condition
3073                          * being signaled.
3074                          */
3075 
3076                         /* Release task daemon lock. */
3077                         TASK_DAEMON_UNLOCK(ha);
3078 
3079                         EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
3080                         return (FC_TRAN_BUSY);
3081                 }
3082         }
3083         /* Release task daemon lock. */
3084         TASK_DAEMON_UNLOCK(ha);
3085 
3086         if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
3087             (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
3088                 /*
3089                  * Now, since transport drivers cosider this as an
3090                  * offline condition, let's wait for few seconds
3091                  * for any loop transitions before we reset the.
3092                  * chip and restart all over again.
3093                  */
3094                 ql_delay(ha, 2000000);
3095                 EL(ha, "failed, FC_NOMAP\n");
3096                 rval = FC_NOMAP;
3097         } else {
3098                 /*EMPTY*/
3099                 QL_PRINT_3(CE_CONT, "(%d): my_alpa %xh len %xh "
3100                     "data %xh %xh %xh %xh\n", ha->instance,
3101                     mapbuf->lilp_myalpa, mapbuf->lilp_length,
3102                     mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
3103                     mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
3104         }
3105 
3106         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3107 #if 0
3108         QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
3109 #endif
3110         return (rval);
3111 }
3112 
3113 /*
3114  * ql_transport
3115  *      Issue an I/O request. Handles all regular requests.
3116  *
3117  * Input:
3118  *      fca_handle = handle setup by ql_bind_port().
3119  *      pkt = pointer to fc_packet.
3120  *
3121  * Returns:
3122  *      FC_SUCCESS - the packet was accepted for transport.
3123  *      FC_TRANSPORT_ERROR - a transport error occurred.
3124  *      FC_BADPACKET - the packet to be transported had not been
3125  *                      initialized by this FCA.
3126  *      FC_UNBOUND - the fca_handle specified is not bound.
3127  *
3128  * Context:
3129  *      Kernel context.
3130  */
3131 static int
3132 ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
3133 {
3134         ql_adapter_state_t      *ha;
3135         int                     rval = FC_TRANSPORT_ERROR;
3136         ql_srb_t                *sp = (ql_srb_t *)pkt->pkt_fca_private;
3137 
3138         /* Verify proper command. */
3139         ha = ql_cmd_setup(fca_handle, pkt, &rval);
3140         if (ha == NULL) {
3141                 QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
3142                     rval, fca_handle);
3143                 return (rval);
3144         }
3145         QL_PRINT_3(CE_CONT, "(%d): started command:\n", ha->instance);
3146 #if 0
3147         QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
3148             sizeof (fc_frame_hdr_t) / 4);
3149         QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
3150         QL_DUMP_3((uint8_t *)pkt->pkt_cmd, 8, pkt->pkt_cmdlen);
3151 #endif
3152 
3153         /* Reset SRB flags. */
3154         sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
3155             SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_ABORT | SRB_UB_CALLBACK |
3156             SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
3157             SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
3158             SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
3159             SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
3160             SRB_MS_PKT | SRB_ELS_PKT);
3161 
3162         pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
3163         pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
3164         pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
3165         pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
3166         pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
3167 
3168         switch (pkt->pkt_cmd_fhdr.r_ctl) {
3169         case R_CTL_COMMAND:
3170                 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
3171                         sp->flags |= SRB_FCP_CMD_PKT;
3172                         rval = ql_fcp_scsi_cmd(ha, pkt, sp);
3173                 }
3174                 break;
3175 
3176         default:
3177                 /* Setup response header and buffer. */
3178                 if (pkt->pkt_rsplen) {
3179                         bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
3180                 }
3181 
3182                 switch (pkt->pkt_cmd_fhdr.r_ctl) {
3183                 case R_CTL_UNSOL_DATA:
3184                         if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
3185                                 sp->flags |= SRB_IP_PKT;
3186                                 rval = ql_fcp_ip_cmd(ha, pkt, sp);
3187                         }
3188                         break;
3189 
3190                 case R_CTL_UNSOL_CONTROL:
3191                         if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
3192                                 sp->flags |= SRB_GENERIC_SERVICES_PKT;
3193                                 rval = ql_fc_services(ha, pkt);
3194                         }
3195                         break;
3196 
3197                 case R_CTL_SOLICITED_DATA:
3198                 case R_CTL_STATUS:
3199                 default:
3200                         pkt->pkt_state = FC_PKT_LOCAL_RJT;
3201                         pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3202                         rval = FC_TRANSPORT_ERROR;
3203                         EL(ha, "unknown, r_ctl=%xh\n",
3204                             pkt->pkt_cmd_fhdr.r_ctl);
3205                         break;
3206                 }
3207         }
3208 
3209         if (rval != FC_SUCCESS) {
3210                 EL(ha, "failed, rval = %xh\n", rval);
3211         } else {
3212                 /*EMPTY*/
3213                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3214         }
3215 
3216         return (rval);
3217 }
3218 
3219 /*
3220  * ql_ub_alloc
3221  *      Allocate buffers for unsolicited exchanges.
3222  *
3223  * Input:
3224  *      fca_handle = handle setup by ql_bind_port().
3225  *      tokens = token array for each buffer.
3226  *      size = size of each buffer.
3227  *      count = pointer to number of buffers.
3228  *      type = the FC-4 type the buffers are reserved for.
3229  *              1 = Extended Link Services, 5 = LLC/SNAP
3230  *
3231  * Returns:
3232  *      FC_FAILURE - buffers could not be allocated.
3233  *      FC_TOOMANY - the FCA could not allocate the requested
 
3238  * Context:
3239  *      Kernel context.
3240  */
3241 static int
3242 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
3243     uint32_t *count, uint32_t type)
3244 {
3245         ql_adapter_state_t      *ha;
3246         caddr_t                 bufp = NULL;
3247         fc_unsol_buf_t          *ubp;
3248         ql_srb_t                *sp;
3249         uint32_t                index;
3250         uint32_t                cnt;
3251         uint32_t                ub_array_index = 0;
3252         int                     rval = FC_SUCCESS;
3253         int                     ub_updated = FALSE;
3254 
3255         /* Check handle. */
3256         ha = ql_fca_handle_to_state(fca_handle);
3257         if (ha == NULL) {
3258                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3259                     (void *)fca_handle);
3260                 return (FC_UNBOUND);
3261         }
3262         QL_PRINT_3(CE_CONT, "(%d,%d): started, count = %xh\n",
3263             ha->instance, ha->vp_index, *count);
3264 
3265         QL_PM_LOCK(ha);
3266         if (ha->power_level != PM_LEVEL_D0) {
3267                 QL_PM_UNLOCK(ha);
3268                 QL_PRINT_3(CE_CONT, "(%d,%d): down done\n", ha->instance,
3269                     ha->vp_index);
3270                 return (FC_FAILURE);
3271         }
3272         QL_PM_UNLOCK(ha);
3273 
3274         /* Acquire adapter state lock. */
3275         ADAPTER_STATE_LOCK(ha);
3276 
3277         /* Check the count. */
3278         if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
3279                 *count = 0;
3280                 EL(ha, "failed, FC_TOOMANY\n");
3281                 rval = FC_TOOMANY;
3282         }
3283 
3284         /*
3285          * reset ub_array_index
3286          */
3287         ub_array_index = 0;
3288 
3289         /*
3290          * Now proceed to allocate any buffers required
3291          */
3292         for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
3293                 /* Allocate all memory needed. */
3294                 ubp = (fc_unsol_buf_t *)kmem_zalloc(sizeof (fc_unsol_buf_t),
3295                     KM_SLEEP);
3296                 if (ubp == NULL) {
 
3372                         ubp->ub_buffer = bufp;
3373                         ubp->ub_bufsize = size;
3374                         ubp->ub_port_handle = fca_handle;
3375                         ubp->ub_token = ub_array_index;
3376 
3377                         /* Save the token. */
3378                         tokens[index] = ub_array_index;
3379 
3380                         /* Setup FCA private information. */
3381                         sp->ub_type = type;
3382                         sp->handle = ub_array_index;
3383                         sp->flags |= SRB_UB_IN_FCA;
3384 
3385                         ha->ub_array[ub_array_index] = ubp;
3386                         ha->ub_allocated++;
3387                         ub_updated = TRUE;
3388                         QL_UB_UNLOCK(ha);
3389                 }
3390         }
3391 
3392         /* Release adapter state lock. */
3393         ADAPTER_STATE_UNLOCK(ha);
3394 
3395         /* IP buffer. */
3396         if (ub_updated) {
3397                 if ((type == FC_TYPE_IS8802_SNAP) &&
3398                     (!(CFG_IST(ha, (CFG_CTRL_6322 | CFG_CTRL_2581))))) {
3399 
3400                         ADAPTER_STATE_LOCK(ha);
3401                         ha->flags |= IP_ENABLED;
3402                         ADAPTER_STATE_UNLOCK(ha);
3403 
3404                         if (!(ha->flags & IP_INITIALIZED)) {
3405                                 if (CFG_IST(ha, CFG_CTRL_2422)) {
3406                                         ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3407                                             LSB(ql_ip_mtu);
3408                                         ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3409                                             MSB(ql_ip_mtu);
3410                                         ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3411                                             LSB(size);
3412                                         ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3413                                             MSB(size);
3414 
3415                                         cnt = CHAR_TO_SHORT(
3416                                             ha->ip_init_ctrl_blk.cb24.cc[0],
3417                                             ha->ip_init_ctrl_blk.cb24.cc[1]);
3418 
3419                                         if (cnt < *count) {
3420                                                 ha->ip_init_ctrl_blk.cb24.cc[0]
3421                                                     = LSB(*count);
3422                                                 ha->ip_init_ctrl_blk.cb24.cc[1]
3423                                                     = MSB(*count);
3424                                         }
3425                                 } else {
 
3437                                             ha->ip_init_ctrl_blk.cb.cc[1]);
3438 
3439                                         if (cnt < *count) {
3440                                                 ha->ip_init_ctrl_blk.cb.cc[0] =
3441                                                     LSB(*count);
3442                                                 ha->ip_init_ctrl_blk.cb.cc[1] =
3443                                                     MSB(*count);
3444                                         }
3445                                 }
3446 
3447                                 (void) ql_initialize_ip(ha);
3448                         }
3449                         ql_isp_rcvbuf(ha);
3450                 }
3451         }
3452 
3453         if (rval != FC_SUCCESS) {
3454                 EL(ha, "failed=%xh\n", rval);
3455         } else {
3456                 /*EMPTY*/
3457                 QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3458                     ha->vp_index);
3459         }
3460         return (rval);
3461 }
3462 
3463 /*
3464  * ql_ub_free
3465  *      Free unsolicited buffers.
3466  *
3467  * Input:
3468  *      fca_handle = handle setup by ql_bind_port().
3469  *      count = number of buffers.
3470  *      tokens = token array for each buffer.
3471  *
3472  * Returns:
3473  *      FC_SUCCESS - the requested buffers have been freed.
3474  *      FC_UNBOUND - the fca_handle specified is not bound.
3475  *      FC_UB_BADTOKEN - an invalid token was encountered.
3476  *                       No buffers have been released.
3477  *
3478  * Context:
3479  *      Kernel context.
3480  */
3481 static int
3482 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3483 {
3484         ql_adapter_state_t      *ha;
3485         ql_srb_t                *sp;
3486         uint32_t                index;
3487         uint64_t                ub_array_index;
3488         int                     rval = FC_SUCCESS;
3489 
3490         /* Check handle. */
3491         ha = ql_fca_handle_to_state(fca_handle);
3492         if (ha == NULL) {
3493                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3494                     (void *)fca_handle);
3495                 return (FC_UNBOUND);
3496         }
3497         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3498 
3499         /* Acquire adapter state lock. */
3500         ADAPTER_STATE_LOCK(ha);
3501 
3502         /* Check all returned tokens. */
3503         for (index = 0; index < count; index++) {
3504                 fc_unsol_buf_t  *ubp;
3505 
3506                 /* Check the token range. */
3507                 if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3508                         EL(ha, "failed, FC_UB_BADTOKEN\n");
3509                         rval = FC_UB_BADTOKEN;
3510                         break;
3511                 }
3512 
3513                 /* Check the unsolicited buffer array. */
3514                 QL_UB_LOCK(ha);
3515                 ubp = ha->ub_array[ub_array_index];
3516 
3517                 if (ubp == NULL) {
3518                         EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3519                         rval = FC_UB_BADTOKEN;
3520                         QL_UB_UNLOCK(ha);
3521                         break;
3522                 }
3523 
3524                 /* Check the state of the unsolicited buffer. */
3525                 sp = ha->ub_array[ub_array_index]->ub_fca_private;
3526                 sp->flags |= SRB_UB_FREE_REQUESTED;
3527 
3528                 while (!(sp->flags & SRB_UB_IN_FCA) ||
3529                     (sp->flags & (SRB_UB_CALLBACK | SRB_UB_ACQUIRED))) {
3530                         QL_UB_UNLOCK(ha);
3531                         ADAPTER_STATE_UNLOCK(ha);
3532                         delay(drv_usectohz(100000));
3533                         ADAPTER_STATE_LOCK(ha);
3534                         QL_UB_LOCK(ha);
3535                 }
3536                 ha->ub_array[ub_array_index] = NULL;
3537                 QL_UB_UNLOCK(ha);
3538                 ql_free_unsolicited_buffer(ha, ubp);
3539         }
3540 
3541         if (rval == FC_SUCCESS) {
3542                 /*
3543                  * Signal any pending hardware reset when there are
3544                  * no more unsolicited buffers in use.
3545                  */
3546                 if (ha->ub_allocated == 0) {
3547                         cv_broadcast(&ha->pha->cv_ub);
3548                 }
3549         }
3550 
3551         /* Release adapter state lock. */
3552         ADAPTER_STATE_UNLOCK(ha);
3553 
3554         if (rval != FC_SUCCESS) {
3555                 EL(ha, "failed=%xh\n", rval);
3556         } else {
3557                 /*EMPTY*/
3558                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3559         }
3560         return (rval);
3561 }
3562 
3563 /*
3564  * ql_ub_release
3565  *      Release unsolicited buffers from FC Transport
3566  *      to FCA for future use.
3567  *
3568  * Input:
3569  *      fca_handle = handle setup by ql_bind_port().
3570  *      count = number of buffers.
3571  *      tokens = token array for each buffer.
3572  *
3573  * Returns:
3574  *      FC_SUCCESS - the requested buffers have been released.
3575  *      FC_UNBOUND - the fca_handle specified is not bound.
3576  *      FC_UB_BADTOKEN - an invalid token was encountered.
3577  *              No buffers have been released.
3578  *
3579  * Context:
3580  *      Kernel context.
3581  */
3582 static int
3583 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3584 {
3585         ql_adapter_state_t      *ha;
3586         ql_srb_t                *sp;
3587         uint32_t                index;
3588         uint64_t                ub_array_index;
3589         int                     rval = FC_SUCCESS;
3590         int                     ub_ip_updated = FALSE;
3591 
3592         /* Check handle. */
3593         ha = ql_fca_handle_to_state(fca_handle);
3594         if (ha == NULL) {
3595                 QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3596                     (void *)fca_handle);
3597                 return (FC_UNBOUND);
3598         }
3599         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3600 
3601         /* Acquire adapter state lock. */
3602         ADAPTER_STATE_LOCK(ha);
3603         QL_UB_LOCK(ha);
3604 
3605         /* Check all returned tokens. */
3606         for (index = 0; index < count; index++) {
3607                 /* Check the token range. */
3608                 if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3609                         EL(ha, "failed, FC_UB_BADTOKEN\n");
3610                         rval = FC_UB_BADTOKEN;
3611                         break;
3612                 }
3613 
3614                 /* Check the unsolicited buffer array. */
3615                 if (ha->ub_array[ub_array_index] == NULL) {
3616                         EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3617                         rval = FC_UB_BADTOKEN;
3618                         break;
3619                 }
3620 
3621                 /* Check the state of the unsolicited buffer. */
3622                 sp = ha->ub_array[ub_array_index]->ub_fca_private;
 
3632                 /* Check all returned tokens. */
3633                 for (index = 0; index < count; index++) {
3634                         fc_unsol_buf_t  *ubp;
3635 
3636                         ub_array_index = tokens[index];
3637                         ubp = ha->ub_array[ub_array_index];
3638                         sp = ubp->ub_fca_private;
3639 
3640                         ubp->ub_resp_flags = 0;
3641                         sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
3642                         sp->flags |= SRB_UB_IN_FCA;
3643 
3644                         /* IP buffer. */
3645                         if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3646                                 ub_ip_updated = TRUE;
3647                         }
3648                 }
3649         }
3650 
3651         QL_UB_UNLOCK(ha);
3652         /* Release adapter state lock. */
3653         ADAPTER_STATE_UNLOCK(ha);
3654 
3655         /*
3656          * XXX: We should call ql_isp_rcvbuf() to return a
3657          * buffer to ISP only if the number of buffers fall below
3658          * the low water mark.
3659          */
3660         if (ub_ip_updated) {
3661                 ql_isp_rcvbuf(ha);
3662         }
3663 
3664         if (rval != FC_SUCCESS) {
3665                 EL(ha, "failed, rval = %xh\n", rval);
3666         } else {
3667                 /*EMPTY*/
3668                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3669         }
3670         return (rval);
3671 }
3672 
3673 /*
3674  * ql_abort
3675  *      Abort a packet.
3676  *
3677  * Input:
3678  *      fca_handle = handle setup by ql_bind_port().
3679  *      pkt = pointer to fc_packet.
3680  *      flags = KM_SLEEP flag.
3681  *
3682  * Returns:
3683  *      FC_SUCCESS - the packet has successfully aborted.
3684  *      FC_ABORTED - the packet has successfully aborted.
3685  *      FC_ABORTING - the packet is being aborted.
3686  *      FC_ABORT_FAILED - the packet could not be aborted.
3687  *      FC_TRANSPORT_ERROR - a transport error occurred while attempting
3688  *              to abort the packet.
3689  *      FC_BADEXCHANGE - no packet found.
3690  *      FC_UNBOUND - the fca_handle specified is not bound.
3691  *
3692  * Context:
3693  *      Kernel context.
3694  */
3695 static int
3696 ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3697 {
3698         port_id_t               d_id;
3699         ql_link_t               *link;
3700         ql_adapter_state_t      *ha, *pha;
3701         ql_srb_t                *sp;
3702         ql_tgt_t                *tq;
3703         ql_lun_t                *lq;
3704         int                     rval = FC_ABORTED;
3705 
3706         ha = ql_fca_handle_to_state(fca_handle);
3707         if (ha == NULL) {
3708                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3709                     (void *)fca_handle);
3710                 return (FC_UNBOUND);
3711         }
3712 
3713         pha = ha->pha;
3714 
3715         QL_PRINT_3(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3716 
3717         /* Get target queue pointer. */
3718         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3719         tq = ql_d_id_to_queue(ha, d_id);
3720 
3721         if ((tq == NULL) || (pha->task_daemon_flags & LOOP_DOWN)) {
3722                 if (tq == NULL) {
3723                         EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3724                         rval = FC_TRANSPORT_ERROR;
3725                 } else {
3726                         EL(ha, "failed, FC_OFFLINE\n");
3727                         rval = FC_OFFLINE;
3728                 }
3729                 return (rval);
3730         }
3731 
3732         sp = (ql_srb_t *)pkt->pkt_fca_private;
3733         lq = sp->lun_queue;
3734 
3735         /* Set poll flag if sleep wanted. */
3736         if (flags == KM_SLEEP) {
3737                 sp->flags |= SRB_POLL;
3738         }
3739 
3740         /* Acquire target queue lock. */
3741         DEVICE_QUEUE_LOCK(tq);
3742         REQUEST_RING_LOCK(ha);
3743 
3744         /* If command not already started. */
3745         if (!(sp->flags & SRB_ISP_STARTED)) {
3746                 /* Check pending queue for command. */
3747                 sp = NULL;
3748                 for (link = pha->pending_cmds.first; link != NULL;
3749                     link = link->next) {
3750                         sp = link->base_address;
3751                         if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3752                                 /* Remove srb from q. */
3753                                 ql_remove_link(&pha->pending_cmds, &sp->cmd);
3754                                 break;
3755                         } else {
3756                                 sp = NULL;
3757                         }
3758                 }
3759                 REQUEST_RING_UNLOCK(ha);
 
3766                                 if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3767                                         /* Remove srb from q. */
3768                                         ql_remove_link(&lq->cmd, &sp->cmd);
3769                                         break;
3770                                 } else {
3771                                         sp = NULL;
3772                                 }
3773                         }
3774                 }
3775                 /* Release device lock */
3776                 DEVICE_QUEUE_UNLOCK(tq);
3777 
3778                 /* If command on target queue. */
3779                 if (sp != NULL) {
3780                         sp->flags &= ~SRB_IN_DEVICE_QUEUE;
3781 
3782                         /* Set return status */
3783                         pkt->pkt_reason = CS_ABORTED;
3784 
3785                         sp->cmd.next = NULL;
3786                         ql_done(&sp->cmd);
3787                         rval = FC_ABORTED;
3788                 } else {
3789                         EL(ha, "failed, FC_BADEXCHANGE\n");
3790                         rval = FC_BADEXCHANGE;
3791                 }
3792         } else if (sp->flags & SRB_ISP_COMPLETED) {
3793                 /* Release device queue lock. */
3794                 REQUEST_RING_UNLOCK(ha);
3795                 DEVICE_QUEUE_UNLOCK(tq);
3796                 EL(ha, "failed, already done, FC_FAILURE\n");
3797                 rval = FC_FAILURE;
3798         } else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
3799             (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
3800                 /*
3801                  * If here, target data/resp ctio is with Fw.
3802                  * Since firmware is supposed to terminate such I/Os
3803                  * with an error, we need not do any thing. If FW
3804                  * decides not to terminate those IOs and simply keep
3805                  * quite then we need to initiate cleanup here by
3806                  * calling ql_done.
3807                  */
3808                 REQUEST_RING_UNLOCK(ha);
3809                 DEVICE_QUEUE_UNLOCK(tq);
3810                 rval = FC_ABORTED;
3811         } else {
3812                 request_t       *ep = pha->request_ring_bp;
3813                 uint16_t        cnt;
3814 
3815                 if (sp->handle != 0) {
3816                         for (cnt = 0; cnt < REQUEST_ENTRY_CNT; cnt++) {
3817                                 if (sp->handle == ddi_get32(
3818                                     pha->hba_buf.acc_handle, &ep->handle)) {
3819                                         ep->entry_type = INVALID_ENTRY_TYPE;
3820                                         break;
3821                                 }
3822                                 ep++;
3823                         }
3824                 }
3825 
3826                 /* Release device queue lock. */
3827                 REQUEST_RING_UNLOCK(ha);
3828                 DEVICE_QUEUE_UNLOCK(tq);
3829 
3830                 sp->flags |= SRB_ABORTING;
3831                 (void) ql_abort_command(ha, sp);
3832                 pkt->pkt_reason = CS_ABORTED;
3833                 rval = FC_ABORTED;
3834         }
3835 
3836         QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
3837 
3838         return (rval);
3839 }
3840 
3841 /*
3842  * ql_reset
3843  *      Reset link or hardware.
3844  *
3845  * Input:
3846  *      fca_handle = handle setup by ql_bind_port().
3847  *      cmd = reset type command.
3848  *
3849  * Returns:
3850  *      FC_SUCCESS - reset has successfully finished.
3851  *      FC_UNBOUND - the fca_handle specified is not bound.
3852  *      FC_FAILURE - reset failed.
3853  *
3854  * Context:
3855  *      Kernel context.
3856  */
3857 static int
3858 ql_reset(opaque_t fca_handle, uint32_t cmd)
3859 {
3860         ql_adapter_state_t      *ha;
3861         int                     rval = FC_SUCCESS, rval2;
3862 
3863         ha = ql_fca_handle_to_state(fca_handle);
3864         if (ha == NULL) {
3865                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3866                     (void *)fca_handle);
3867                 return (FC_UNBOUND);
3868         }
3869 
3870         QL_PRINT_3(CE_CONT, "(%d,%d): started, cmd=%d\n", ha->instance,
3871             ha->vp_index, cmd);
3872 
3873         switch (cmd) {
3874         case FC_FCA_CORE:
3875                 /* dump firmware core if specified. */
3876                 if (ha->vp_index == 0) {
3877                         if (ql_dump_firmware(ha) != QL_SUCCESS) {
3878                                 EL(ha, "failed, FC_FAILURE\n");
3879                                 rval = FC_FAILURE;
3880                         }
3881                 }
3882                 break;
3883         case FC_FCA_LINK_RESET:
3884                 if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
3885                         if (ql_loop_reset(ha) != QL_SUCCESS) {
3886                                 EL(ha, "failed, FC_FAILURE-2\n");
3887                                 rval = FC_FAILURE;
3888                         }
3889                 }
3890                 break;
3891         case FC_FCA_RESET_CORE:
3892         case FC_FCA_RESET:
 
3943                                 ha->state |= FC_STATE_ONLINE;
3944                         }
3945                 }
3946 
3947                 TASK_DAEMON_LOCK(ha);
3948                 ha->task_daemon_flags |= FC_STATE_CHANGE;
3949                 TASK_DAEMON_UNLOCK(ha);
3950 
3951                 ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
3952 
3953                 break;
3954         default:
3955                 EL(ha, "unknown cmd=%xh\n", cmd);
3956                 break;
3957         }
3958 
3959         if (rval != FC_SUCCESS) {
3960                 EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
3961         } else {
3962                 /*EMPTY*/
3963                 QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3964                     ha->vp_index);
3965         }
3966 
3967         return (rval);
3968 }
3969 
3970 /*
3971  * ql_port_manage
3972  *      Perform port management or diagnostics.
3973  *
3974  * Input:
3975  *      fca_handle = handle setup by ql_bind_port().
3976  *      cmd = pointer to command structure.
3977  *
3978  * Returns:
3979  *      FC_SUCCESS - the request completed successfully.
3980  *      FC_FAILURE - the request did not complete successfully.
3981  *      FC_UNBOUND - the fca_handle specified is not bound.
3982  *
3983  * Context:
3984  *      Kernel context.
 
3988 {
3989         clock_t                 timer;
3990         uint16_t                index;
3991         uint32_t                *bp;
3992         port_id_t               d_id;
3993         ql_link_t               *link;
3994         ql_adapter_state_t      *ha, *pha;
3995         ql_tgt_t                *tq;
3996         dma_mem_t               buffer_xmt, buffer_rcv;
3997         size_t                  length;
3998         uint32_t                cnt;
3999         char                    buf[80];
4000         lbp_t                   *lb;
4001         ql_mbx_data_t           mr;
4002         app_mbx_cmd_t           *mcp;
4003         int                     i0;
4004         uint8_t                 *bptr;
4005         int                     rval2, rval = FC_SUCCESS;
4006         uint32_t                opcode;
4007         uint32_t                set_flags = 0;
4008 
4009         ha = ql_fca_handle_to_state(fca_handle);
4010         if (ha == NULL) {
4011                 QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
4012                     (void *)fca_handle);
4013                 return (FC_UNBOUND);
4014         }
4015         pha = ha->pha;
4016 
4017         QL_PRINT_3(CE_CONT, "(%d): started=%xh\n", ha->instance,
4018             cmd->pm_cmd_code);
4019 
4020         ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
4021 
4022         /*
4023          * Wait for all outstanding commands to complete
4024          */
4025         index = (uint16_t)ql_wait_outstanding(ha);
4026 
4027         if (index != MAX_OUTSTANDING_COMMANDS) {
4028                 ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
4029                 ql_restart_queues(ha);
4030                 EL(ha, "failed, FC_TRAN_BUSY\n");
4031                 return (FC_TRAN_BUSY);
4032         }
4033 
4034         switch (cmd->pm_cmd_code) {
4035         case FC_PORT_BYPASS:
4036                 d_id.b24 = *cmd->pm_cmd_buf;
4037                 tq = ql_d_id_to_queue(ha, d_id);
4038                 if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
4039                         EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
4040                         rval = FC_FAILURE;
4041                 }
4042                 break;
4043         case FC_PORT_UNBYPASS:
4044                 d_id.b24 = *cmd->pm_cmd_buf;
4045                 tq = ql_d_id_to_queue(ha, d_id);
4046                 if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
4047                         EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
4048                         rval = FC_FAILURE;
4049                 }
4050                 break;
 
4100         case FC_PORT_GET_DUMP:
4101                 QL_DUMP_LOCK(pha);
4102                 if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
4103                         EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
4104                             "length=%lxh\n", cmd->pm_data_len);
4105                         cmd->pm_data_len = pha->risc_dump_size;
4106                         rval = FC_FAILURE;
4107                 } else if (pha->ql_dump_state & QL_DUMPING) {
4108                         EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
4109                         rval = FC_TRAN_BUSY;
4110                 } else if (pha->ql_dump_state & QL_DUMP_VALID) {
4111                         (void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
4112                         pha->ql_dump_state |= QL_DUMP_UPLOADED;
4113                 } else {
4114                         EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
4115                         rval = FC_FAILURE;
4116                 }
4117                 QL_DUMP_UNLOCK(pha);
4118                 break;
4119         case FC_PORT_FORCE_DUMP:
4120                 PORTMANAGE_LOCK(ha);
4121                 if (ql_dump_firmware(ha) != QL_SUCCESS) {
4122                         EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
4123                         rval = FC_FAILURE;
4124                 }
4125                 PORTMANAGE_UNLOCK(ha);
4126                 break;
4127         case FC_PORT_DOWNLOAD_FW:
4128                 PORTMANAGE_LOCK(ha);
4129                 if (CFG_IST(ha, CFG_CTRL_24258081)) {
4130                         if (ql_24xx_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4131                             (uint32_t)cmd->pm_data_len,
4132                             ha->flash_fw_addr << 2) != QL_SUCCESS) {
4133                                 EL(ha, "failed, FC_PORT_DOWNLOAD_FW\n");
4134                                 rval = FC_FAILURE;
4135                         }
4136                         ql_reset_chip(ha);
4137                         set_flags |= ISP_ABORT_NEEDED;
4138                 } else {
4139                         /* Save copy of the firmware. */
4140                         if (pha->risc_code != NULL) {
4141                                 kmem_free(pha->risc_code, pha->risc_code_size);
4142                                 pha->risc_code = NULL;
4143                                 pha->risc_code_size = 0;
4144                         }
4145 
4146                         pha->risc_code = kmem_alloc(cmd->pm_data_len,
4147                             KM_SLEEP);
4148                         if (pha->risc_code != NULL) {
4149                                 pha->risc_code_size =
4150                                     (uint32_t)cmd->pm_data_len;
4151                                 bcopy(cmd->pm_data_buf, pha->risc_code,
4152                                     cmd->pm_data_len);
4153 
4154                                 /* Do abort to force reload. */
4155                                 ql_reset_chip(ha);
4156                                 if (ql_abort_isp(ha) != QL_SUCCESS) {
4157                                         kmem_free(pha->risc_code,
4158                                             pha->risc_code_size);
4159                                         pha->risc_code = NULL;
4160                                         pha->risc_code_size = 0;
4161                                         ql_reset_chip(ha);
4162                                         (void) ql_abort_isp(ha);
4163                                         EL(ha, "failed, FC_PORT_DOWNLOAD_FW"
4164                                             " FC_FAILURE\n");
4165                                         rval = FC_FAILURE;
4166                                 }
4167                         }
4168                 }
4169                 PORTMANAGE_UNLOCK(ha);
4170                 break;
4171         case FC_PORT_GET_DUMP_SIZE:
4172                 bp = (uint32_t *)cmd->pm_data_buf;
4173                 *bp = pha->risc_dump_size;
4174                 break;
4175         case FC_PORT_DIAG:
4176                 /*
4177                  * Prevents concurrent diags
4178                  */
4179                 PORTMANAGE_LOCK(ha);
4180 
4181                 /* Wait for suspension to end. */
4182                 for (timer = 0; timer < 3000 &&
4183                     pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
4184                         ql_delay(ha, 10000);
4185                 }
4186 
4187                 if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
4188                         EL(ha, "failed, FC_TRAN_BUSY-2\n");
4189                         rval = FC_TRAN_BUSY;
4190                         PORTMANAGE_UNLOCK(ha);
4191                         break;
4192                 }
4193 
4194                 switch (cmd->pm_cmd_flags) {
4195                 case QL_DIAG_EXEFMW:
4196                         if (ql_start_firmware(ha) != QL_SUCCESS) {
4197                                 EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
4198                                 rval = FC_FAILURE;
4199                         }
4200                         break;
4201                 case QL_DIAG_CHKCMDQUE:
4202                         for (i0 = 1, cnt = 0; i0 < MAX_OUTSTANDING_COMMANDS;
4203                             i0++) {
4204                                 cnt += (pha->outstanding_cmds[i0] != NULL);
4205                         }
4206                         if (cnt != 0) {
4207                                 EL(ha, "failed, QL_DIAG_CHKCMDQUE "
4208                                     "FC_FAILURE\n");
4209                                 rval = FC_FAILURE;
4210                         }
4211                         break;
4212                 case QL_DIAG_FMWCHKSUM:
4213                         if (ql_verify_checksum(ha) != QL_SUCCESS) {
4214                                 EL(ha, "failed, QL_DIAG_FMWCHKSUM "
4215                                     "FC_FAILURE\n");
4216                                 rval = FC_FAILURE;
4217                         }
4218                         break;
4219                 case QL_DIAG_SLFTST:
4220                         if (ql_online_selftest(ha) != QL_SUCCESS) {
4221                                 EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
4222                                 rval = FC_FAILURE;
 
4237                                     cmd->pm_stat_buf,
4238                                     (size_t)cmd->pm_stat_len);
4239                                 cmd->pm_stat_len =
4240                                     sizeof (ql_adapter_revlvl_t);
4241                         }
4242                         break;
4243                 case QL_DIAG_LPBMBX:
4244 
4245                         if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
4246                                 EL(ha, "failed, QL_DIAG_LPBMBX "
4247                                     "FC_INVALID_REQUEST, pmlen=%lxh, "
4248                                     "reqd=%lxh\n", cmd->pm_data_len,
4249                                     sizeof (struct app_mbx_cmd));
4250                                 rval = FC_INVALID_REQUEST;
4251                                 break;
4252                         }
4253                         /*
4254                          * Don't do the wrap test on a 2200 when the
4255                          * firmware is running.
4256                          */
4257                         if (!CFG_IST(ha, CFG_CTRL_2200)) {
4258                                 mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4259                                 mr.mb[1] = mcp->mb[1];
4260                                 mr.mb[2] = mcp->mb[2];
4261                                 mr.mb[3] = mcp->mb[3];
4262                                 mr.mb[4] = mcp->mb[4];
4263                                 mr.mb[5] = mcp->mb[5];
4264                                 mr.mb[6] = mcp->mb[6];
4265                                 mr.mb[7] = mcp->mb[7];
4266 
4267                                 bcopy(&mr.mb[0], &mr.mb[10],
4268                                     sizeof (uint16_t) * 8);
4269 
4270                                 if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4271                                         EL(ha, "failed, QL_DIAG_LPBMBX "
4272                                             "FC_FAILURE\n");
4273                                         rval = FC_FAILURE;
4274                                         break;
4275                                 } else {
4276                                         for (i0 = 1; i0 < 8; i0++) {
4277                                                 if (mr.mb[i0] !=
 
4315                         if (ql_get_dma_mem(ha, &buffer_xmt,
4316                             (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4317                             QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4318                                 EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4319                                 rval = FC_NOMEM;
4320                                 break;
4321                         }
4322                         if (ql_get_dma_mem(ha, &buffer_rcv,
4323                             (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4324                             QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4325                                 EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4326                                 rval = FC_NOMEM;
4327                                 break;
4328                         }
4329                         ddi_rep_put8(buffer_xmt.acc_handle,
4330                             (uint8_t *)cmd->pm_data_buf,
4331                             (uint8_t *)buffer_xmt.bp,
4332                             cmd->pm_data_len, DDI_DEV_AUTOINCR);
4333 
4334                         /* 22xx's adapter must be in loop mode for test. */
4335                         if (CFG_IST(ha, CFG_CTRL_2200)) {
4336                                 bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4337                                 if (ha->flags & POINT_TO_POINT ||
4338                                     (ha->task_daemon_flags & LOOP_DOWN &&
4339                                     *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4340                                         cnt = *bptr;
4341                                         *bptr = (uint8_t)
4342                                             (*bptr & ~(BIT_6|BIT_5|BIT_4));
4343                                         (void) ql_abort_isp(ha);
4344                                         *bptr = (uint8_t)cnt;
4345                                 }
4346                         }
4347 
4348                         /* Shutdown IP. */
4349                         if (pha->flags & IP_INITIALIZED) {
4350                                 (void) ql_shutdown_ip(pha);
4351                         }
4352 
4353                         lb = (lbp_t *)cmd->pm_cmd_buf;
4354                         lb->transfer_count =
4355                             (uint32_t)cmd->pm_data_len;
4356                         lb->transfer_segment_count = 0;
4357                         lb->receive_segment_count = 0;
4358                         lb->transfer_data_address =
4359                             buffer_xmt.cookie.dmac_address;
4360                         lb->receive_data_address =
4361                             buffer_rcv.cookie.dmac_address;
4362 
4363                         if (ql_loop_back(ha, 0, lb,
4364                             buffer_xmt.cookie.dmac_notused,
4365                             buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4366                                 bzero((void *)cmd->pm_stat_buf,
4367                                     cmd->pm_stat_len);
4368                                 ddi_rep_get8(buffer_rcv.acc_handle,
4369                                     (uint8_t *)cmd->pm_stat_buf,
4370                                     (uint8_t *)buffer_rcv.bp,
4371                                     cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4372                                 rval = FC_SUCCESS;
4373                         } else {
4374                                 EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4375                                 rval = FC_FAILURE;
4376                         }
4377 
4378                         ql_free_phys(ha, &buffer_xmt);
4379                         ql_free_phys(ha, &buffer_rcv);
4380 
4381                         /* Needed to recover the f/w */
4382                         set_flags |= ISP_ABORT_NEEDED;
4383 
4384                         /* Restart IP if it was shutdown. */
4385                         if (pha->flags & IP_ENABLED &&
4386                             !(pha->flags & IP_INITIALIZED)) {
4387                                 (void) ql_initialize_ip(pha);
4388                                 ql_isp_rcvbuf(pha);
4389                         }
4390 
4391                         break;
4392                 case QL_DIAG_ECHO: {
4393                         /*
4394                          * issue an echo command with a user supplied
4395                          * data pattern and destination address
4396                          */
4397                         echo_t          echo;           /* temp echo struct */
 
4427                                 EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4428                                     " cmdl1=%lxh, statl2=%lxh\n",
4429                                     cmd->pm_cmd_len, cmd->pm_stat_len);
4430                                 rval = FC_TOOMANY;
4431                                 break;
4432                         }
4433                         /* add four bytes for the opcode */
4434                         echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4435 
4436                         /*
4437                          * are we 32 or 64 bit addressed???
4438                          * We need to get the appropriate
4439                          * DMA and set the command options;
4440                          * 64 bit (bit 6) or 32 bit
4441                          * (no bit 6) addressing.
4442                          * while we are at it lets ask for
4443                          * real echo (bit 15)
4444                          */
4445                         echo.options = BIT_15;
4446                         if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) &&
4447                             !(CFG_IST(ha, CFG_CTRL_8081))) {
4448                                 echo.options = (uint16_t)
4449                                     (echo.options | BIT_6);
4450                         }
4451 
4452                         /*
4453                          * Set up the DMA mappings for the
4454                          * output and input data buffers.
4455                          * First the output buffer
4456                          */
4457                         if (ql_get_dma_mem(ha, &buffer_xmt,
4458                             (uint32_t)(cmd->pm_data_len + 4),
4459                             LITTLE_ENDIAN_DMA,
4460                             QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4461                                 EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4462                                 rval = FC_NOMEM;
4463                                 break;
4464                         }
4465                         echo.transfer_data_address = buffer_xmt.cookie;
4466 
4467                         /* Next the input buffer */
 
4516                                 rval = FC_FAILURE;
4517                         }
4518 
4519                         /* Restart IP if it was shutdown. */
4520                         if (pha->flags & IP_ENABLED &&
4521                             !(pha->flags & IP_INITIALIZED)) {
4522                                 (void) ql_initialize_ip(pha);
4523                                 ql_isp_rcvbuf(pha);
4524                         }
4525                         /* free up our DMA buffers */
4526                         ql_free_phys(ha, &buffer_xmt);
4527                         ql_free_phys(ha, &buffer_rcv);
4528                         break;
4529                 }
4530                 default:
4531                         EL(ha, "unknown=%xh, FC_PORT_DIAG "
4532                             "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4533                         rval = FC_INVALID_REQUEST;
4534                         break;
4535                 }
4536                 PORTMANAGE_UNLOCK(ha);
4537                 break;
4538         case FC_PORT_LINK_STATE:
4539                 /* Check for name equal to null. */
4540                 for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4541                     index++) {
4542                         if (cmd->pm_cmd_buf[index] != 0) {
4543                                 break;
4544                         }
4545                 }
4546 
4547                 /* If name not null. */
4548                 if (index < 8 && cmd->pm_cmd_len >= 8) {
4549                         /* Locate device queue. */
4550                         tq = NULL;
4551                         for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4552                             tq == NULL; index++) {
4553                                 for (link = ha->dev[index].first; link != NULL;
4554                                     link = link->next) {
4555                                         tq = link->base_address;
4556 
 
4561                                                 tq = NULL;
4562                                         }
4563                                 }
4564                         }
4565 
4566                         if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4567                                 cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4568                                 cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4569                         } else {
4570                                 cnt = FC_PORT_SPEED_MASK(ha->state) |
4571                                     FC_STATE_OFFLINE;
4572                                 cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4573                                 cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4574                         }
4575                 } else {
4576                         cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4577                         cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4578                 }
4579                 break;
4580         case FC_PORT_INITIALIZE:
4581                 if (cmd->pm_cmd_len >= 8) {
4582                         tq = NULL;
4583                         for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4584                             tq == NULL; index++) {
4585                                 for (link = ha->dev[index].first; link != NULL;
4586                                     link = link->next) {
4587                                         tq = link->base_address;
4588 
4589                                         if (bcmp((void *)&tq->port_name[0],
4590                                             (void *)cmd->pm_cmd_buf, 8) == 0) {
4591                                                 if (!VALID_DEVICE_ID(ha,
4592                                                     tq->loop_id)) {
4593                                                         tq = NULL;
4594                                                 }
4595                                                 break;
4596                                         } else {
4597                                                 tq = NULL;
4598                                         }
4599                                 }
4600                         }
4601 
4602                         if (tq == NULL || ql_target_reset(ha, tq,
4603                             ha->loop_reset_delay) != QL_SUCCESS) {
4604                                 EL(ha, "failed, FC_PORT_INITIALIZE "
4605                                     "FC_FAILURE\n");
4606                                 rval = FC_FAILURE;
4607                         }
4608                 } else {
4609                         EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4610                             "clen=%lxh\n", cmd->pm_cmd_len);
4611 
4612                         rval = FC_FAILURE;
4613                 }
4614                 break;
4615         case FC_PORT_RLS:
4616                 if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4617                         EL(ha, "failed, buffer size passed: %lxh, "
4618                             "req: %lxh\n", cmd->pm_data_len,
4619                             (sizeof (fc_rls_acc_t)));
4620                         rval = FC_FAILURE;
4621                 } else if (LOOP_NOT_READY(pha)) {
4622                         EL(ha, "loop NOT ready\n");
4623                         bzero(cmd->pm_data_buf, cmd->pm_data_len);
4624                 } else if (ql_get_link_status(ha, ha->loop_id,
4625                     cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
4626                         EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4627                         rval = FC_FAILURE;
4628 #ifdef _BIG_ENDIAN
4629                 } else {
4630                         fc_rls_acc_t            *rls;
4631 
4632                         rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4633                         LITTLE_ENDIAN_32(&rls->rls_link_fail);
4634                         LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4635                         LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4636                         LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4637 #endif /* _BIG_ENDIAN */
4638                 }
4639                 break;
4640         case FC_PORT_GET_NODE_ID:
4641                 if (ql_get_rnid_params(ha, cmd->pm_data_len,
4642                     cmd->pm_data_buf) != QL_SUCCESS) {
4643                         EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4644                         rval = FC_FAILURE;
4645                 }
4646                 break;
4647         case FC_PORT_SET_NODE_ID:
4648                 if (ql_set_rnid_params(ha, cmd->pm_data_len,
4649                     cmd->pm_data_buf) != QL_SUCCESS) {
4650                         EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4651                         rval = FC_FAILURE;
4652                 }
4653                 break;
4654         case FC_PORT_DOWNLOAD_FCODE:
4655                 PORTMANAGE_LOCK(ha);
4656                 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
4657                         rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4658                             (uint32_t)cmd->pm_data_len);
4659                 } else {
4660                         if (cmd->pm_data_buf[0] == 4 &&
4661                             cmd->pm_data_buf[8] == 0 &&
4662                             cmd->pm_data_buf[9] == 0x10 &&
4663                             cmd->pm_data_buf[10] == 0 &&
4664                             cmd->pm_data_buf[11] == 0) {
4665                                 rval = ql_24xx_load_flash(ha,
4666                                     (uint8_t *)cmd->pm_data_buf,
4667                                     (uint32_t)cmd->pm_data_len,
4668                                     ha->flash_fw_addr << 2);
4669                         } else {
4670                                 rval = ql_24xx_load_flash(ha,
4671                                     (uint8_t *)cmd->pm_data_buf,
4672                                     (uint32_t)cmd->pm_data_len, 0);
4673                         }
4674                 }
4675 
4676                 if (rval != QL_SUCCESS) {
4677                         EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4678                         rval = FC_FAILURE;
4679                 } else {
4680                         rval = FC_SUCCESS;
4681                 }
4682                 ql_reset_chip(ha);
4683                 set_flags |= ISP_ABORT_NEEDED;
4684                 PORTMANAGE_UNLOCK(ha);
4685                 break;
4686         default:
4687                 EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
4688                 rval = FC_BADCMD;
4689                 break;
4690         }
4691 
4692         /* Wait for suspension to end. */
4693         ql_awaken_task_daemon(ha, NULL, set_flags, DRIVER_STALL);
4694         timer = 0;
4695 
4696         while (timer++ < 3000 &&
4697             ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
4698                 ql_delay(ha, 10000);
4699         }
4700 
4701         ql_restart_queues(ha);
4702 
4703         if (rval != FC_SUCCESS) {
4704                 EL(ha, "failed, rval = %xh\n", rval);
4705         } else {
4706                 /*EMPTY*/
4707                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4708         }
4709 
4710         return (rval);
4711 }
4712 
4713 static opaque_t
4714 ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
4715 {
4716         port_id_t               id;
4717         ql_adapter_state_t      *ha;
4718         ql_tgt_t                *tq;
4719 
4720         id.r.rsvd_1 = 0;
4721         id.b24 = d_id.port_id;
4722 
4723         ha = ql_fca_handle_to_state(fca_handle);
4724         if (ha == NULL) {
4725                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4726                     (void *)fca_handle);
4727                 return (NULL);
4728         }
4729         QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance, id.b24);
4730 
4731         tq = ql_d_id_to_queue(ha, id);
4732 
4733         if (tq == NULL) {
4734                 EL(ha, "failed, tq=NULL\n");
4735         } else {
4736                 /*EMPTY*/
4737                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4738         }
4739         return (tq);
4740 }
4741 
4742 /* ************************************************************************ */
4743 /*                      FCA Driver Local Support Functions.                 */
4744 /* ************************************************************************ */
4745 
4746 /*
4747  * ql_cmd_setup
4748  *      Verifies proper command.
4749  *
4750  * Input:
4751  *      fca_handle = handle setup by ql_bind_port().
4752  *      pkt = pointer to fc_packet.
4753  *      rval = pointer for return value.
4754  *
4755  * Returns:
4756  *      Adapter state pointer, NULL = failure.
4757  *
4758  * Context:
4759  *      Kernel context.
4760  */
4761 static ql_adapter_state_t *
4762 ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
4763 {
4764         ql_adapter_state_t      *ha, *pha;
4765         ql_srb_t                *sp = (ql_srb_t *)pkt->pkt_fca_private;
4766         ql_tgt_t                *tq;
4767         port_id_t               d_id;
4768 
4769         pkt->pkt_resp_resid = 0;
4770         pkt->pkt_data_resid = 0;
4771 
4772         /* check that the handle is assigned by this FCA */
4773         ha = ql_fca_handle_to_state(fca_handle);
4774         if (ha == NULL) {
4775                 *rval = FC_UNBOUND;
4776                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4777                     (void *)fca_handle);
4778                 return (NULL);
4779         }
4780         pha = ha->pha;
4781 
4782         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4783 
4784         if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
4785                 return (ha);
4786         }
4787 
4788         if (!(pha->flags & ONLINE)) {
4789                 pkt->pkt_state = FC_PKT_LOCAL_RJT;
4790                 pkt->pkt_reason = FC_REASON_HW_ERROR;
4791                 *rval = FC_TRANSPORT_ERROR;
4792                 EL(ha, "failed, not online hf=%xh\n", pha->flags);
4793                 return (NULL);
4794         }
4795 
4796         /* Exit on loop down. */
4797         if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
4798             pha->task_daemon_flags & LOOP_DOWN &&
4799             pha->loop_down_timer <= pha->loop_down_abort_time) {
4800                 pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4801                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4802                 *rval = FC_OFFLINE;
 
4812                         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4813                         tq = ql_d_id_to_queue(ha, d_id);
4814 
4815                         pkt->pkt_fca_device = (opaque_t)tq;
4816                 }
4817 
4818                 if (tq != NULL) {
4819                         DEVICE_QUEUE_LOCK(tq);
4820                         if (tq->flags & (TQF_RSCN_RCVD |
4821                             TQF_NEED_AUTHENTICATION)) {
4822                                 *rval = FC_DEVICE_BUSY;
4823                                 DEVICE_QUEUE_UNLOCK(tq);
4824                                 EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
4825                                     tq->flags, tq->d_id.b24);
4826                                 return (NULL);
4827                         }
4828                         DEVICE_QUEUE_UNLOCK(tq);
4829                 }
4830         }
4831 
4832         /*
4833          * Check DMA pointers.
4834          */
4835         *rval = DDI_SUCCESS;
4836         if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
4837                 QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
4838                 *rval = ddi_check_dma_handle(pkt->pkt_cmd_dma);
4839                 if (*rval == DDI_SUCCESS) {
4840                         *rval = ddi_check_acc_handle(pkt->pkt_cmd_acc);
4841                 }
4842         }
4843 
4844         if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
4845             pkt->pkt_rsplen != 0) {
4846                 QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
4847                 *rval = ddi_check_dma_handle(pkt->pkt_resp_dma);
4848                 if (*rval == DDI_SUCCESS) {
4849                         *rval = ddi_check_acc_handle(pkt->pkt_resp_acc);
4850                 }
4851         }
4852 
4853         /*
4854          * Minimum branch conditional; Change it with care.
4855          */
4856         if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
4857             (pkt->pkt_datalen != 0)) != 0) {
4858                 QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
4859                 *rval = ddi_check_dma_handle(pkt->pkt_data_dma);
4860                 if (*rval == DDI_SUCCESS) {
4861                         *rval = ddi_check_acc_handle(pkt->pkt_data_acc);
4862                 }
4863         }
4864 
4865         if (*rval != DDI_SUCCESS) {
4866                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
4867                 pkt->pkt_reason = FC_REASON_DMA_ERROR;
4868 
4869                 /* Do command callback. */
4870                 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
4871                         ql_awaken_task_daemon(ha, sp, 0, 0);
4872                 }
4873                 *rval = FC_BADPACKET;
4874                 EL(ha, "failed, bad DMA pointers\n");
4875                 return (NULL);
4876         }
4877 
4878         if (sp->magic_number != QL_FCA_BRAND) {
4879                 *rval = FC_BADPACKET;
4880                 EL(ha, "failed, magic number=%xh\n", sp->magic_number);
4881                 return (NULL);
4882         }
4883         *rval = FC_SUCCESS;
4884 
4885         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4886 
4887         return (ha);
4888 }
4889 
4890 /*
4891  * ql_els_plogi
4892  *      Issue a extended link service port login request.
4893  *
4894  * Input:
4895  *      ha = adapter state pointer.
4896  *      pkt = pointer to fc_packet.
4897  *
4898  * Returns:
4899  *      FC_SUCCESS - the packet was accepted for transport.
4900  *      FC_TRANSPORT_ERROR - a transport error occurred.
4901  *
4902  * Context:
4903  *      Kernel context.
4904  */
4905 static int
4906 ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4907 {
4908         ql_tgt_t                *tq = NULL;
4909         port_id_t               d_id;
4910         la_els_logi_t           acc;
4911         class_svc_param_t       *class3_param;
4912         int                     ret;
4913         int                     rval = FC_SUCCESS;
4914 
4915         QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4916             pkt->pkt_cmd_fhdr.d_id);
4917 
4918         TASK_DAEMON_LOCK(ha);
4919         if (!(ha->task_daemon_flags & STATE_ONLINE)) {
4920                 TASK_DAEMON_UNLOCK(ha);
4921                 QL_PRINT_3(CE_CONT, "(%d): offline done\n", ha->instance);
4922                 return (FC_OFFLINE);
4923         }
4924         TASK_DAEMON_UNLOCK(ha);
4925 
4926         bzero(&acc, sizeof (acc));
4927         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4928 
4929         ret = QL_SUCCESS;
4930 
4931         if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
4932                 /*
4933                  * In p2p topology it sends a PLOGI after determining
4934                  * it has the N_Port login initiative.
4935                  */
4936                 ret = ql_p2p_plogi(ha, pkt);
4937         }
4938         if (ret == QL_CONSUMED) {
4939                 return (ret);
4940         }
4941 
4942         switch (ret = ql_login_port(ha, d_id)) {
4943         case QL_SUCCESS:
4944                 tq = ql_d_id_to_queue(ha, d_id);
4945                 break;
4946 
4947         case QL_LOOP_ID_USED:
4948                 if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
4949                         tq = ql_d_id_to_queue(ha, d_id);
4950                 }
4951                 break;
 
4957         if (ret != QL_SUCCESS) {
4958                 /*
4959                  * Invalidate this entry so as to seek a fresh loop ID
4960                  * in case firmware reassigns it to something else
4961                  */
4962                 tq = ql_d_id_to_queue(ha, d_id);
4963                 if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
4964                         tq->loop_id = PORT_NO_LOOP_ID;
4965                 }
4966         } else if (tq) {
4967                 (void) ql_get_port_database(ha, tq, PDF_ADISC);
4968         }
4969 
4970         if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
4971             (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
4972 
4973                 /* Build ACC. */
4974                 acc.ls_code.ls_code = LA_ELS_ACC;
4975                 acc.common_service.fcph_version = 0x2006;
4976                 acc.common_service.cmn_features = 0x8800;
4977                 acc.common_service.rx_bufsize = QL_MAX_FRAME_SIZE(ha);
4978                 acc.common_service.conc_sequences = 0xff;
4979                 acc.common_service.relative_offset = 0x03;
4980                 acc.common_service.e_d_tov = 0x7d0;
4981 
4982                 bcopy((void *)&tq->port_name[0],
4983                     (void *)&acc.nport_ww_name.raw_wwn[0], 8);
4984                 bcopy((void *)&tq->node_name[0],
4985                     (void *)&acc.node_ww_name.raw_wwn[0], 8);
4986 
4987                 class3_param = (class_svc_param_t *)&acc.class_3;
4988                 class3_param->class_valid_svc_opt = 0x8000;
4989                 class3_param->recipient_ctl = tq->class3_recipient_ctl;
4990                 class3_param->rcv_data_size = tq->class3_rcv_data_size;
4991                 class3_param->conc_sequences = tq->class3_conc_sequences;
4992                 class3_param->open_sequences_per_exch =
4993                     tq->class3_open_sequences_per_exch;
4994 
4995                 if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
4996                         acc.ls_code.ls_code = LA_ELS_RJT;
4997                         pkt->pkt_state = FC_PKT_TRAN_BSY;
4998                         pkt->pkt_reason = FC_REASON_XCHG_BSY;
4999                         EL(ha, "LA_ELS_RJT, FC_REASON_XCHG_BSY\n");
5000                         rval = FC_TRAN_BUSY;
5001                 } else {
5002                         DEVICE_QUEUE_LOCK(tq);
5003                         tq->logout_sent = 0;
5004                         tq->flags &= ~TQF_NEED_AUTHENTICATION;
5005                         if (CFG_IST(ha, CFG_CTRL_242581)) {
5006                                 tq->flags |= TQF_IIDMA_NEEDED;
5007                         }
5008                         DEVICE_QUEUE_UNLOCK(tq);
5009 
5010                         if (CFG_IST(ha, CFG_CTRL_242581)) {
5011                                 TASK_DAEMON_LOCK(ha);
5012                                 ha->task_daemon_flags |= TD_IIDMA_NEEDED;
5013                                 TASK_DAEMON_UNLOCK(ha);
5014                         }
5015 
5016                         pkt->pkt_state = FC_PKT_SUCCESS;
5017                 }
5018         } else {
5019                 /* Build RJT. */
5020                 acc.ls_code.ls_code = LA_ELS_RJT;
5021 
5022                 switch (ret) {
5023                 case QL_FUNCTION_TIMEOUT:
5024                         pkt->pkt_state = FC_PKT_TIMEOUT;
5025                         pkt->pkt_reason = FC_REASON_HW_ERROR;
5026                         break;
5027 
5028                 case QL_MEMORY_ALLOC_FAILED:
5029                         pkt->pkt_state = FC_PKT_LOCAL_BSY;
5030                         pkt->pkt_reason = FC_REASON_NOMEM;
 
5049         }
5050 
5051         if (tq != NULL) {
5052                 DEVICE_QUEUE_LOCK(tq);
5053                 tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
5054                 if (rval == FC_TRAN_BUSY) {
5055                         if (tq->d_id.b24 != BROADCAST_ADDR) {
5056                                 tq->flags |= TQF_NEED_AUTHENTICATION;
5057                         }
5058                 }
5059                 DEVICE_QUEUE_UNLOCK(tq);
5060         }
5061 
5062         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5063             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5064 
5065         if (rval != FC_SUCCESS) {
5066                 EL(ha, "failed, rval = %xh\n", rval);
5067         } else {
5068                 /*EMPTY*/
5069                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5070         }
5071         return (rval);
5072 }
5073 
5074 /*
5075  * ql_p2p_plogi
5076  *      Start an extended link service port login request using
5077  *      an ELS Passthru iocb.
5078  *
5079  * Input:
5080  *      ha = adapter state pointer.
5081  *      pkt = pointer to fc_packet.
5082  *
5083  * Returns:
5084  *      QL_CONSUMMED - the iocb was queued for transport.
5085  *
5086  * Context:
5087  *      Kernel context.
5088  */
5089 static int
5090 ql_p2p_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5091 {
5092         uint16_t        id;
5093         ql_tgt_t        tmp;
5094         ql_tgt_t        *tq = &tmp;
5095         int             rval;
5096         port_id_t       d_id;
5097         ql_srb_t        *sp = (ql_srb_t *)pkt->pkt_fca_private;
5098 
5099         tq->d_id.b.al_pa = 0;
5100         tq->d_id.b.area = 0;
5101         tq->d_id.b.domain = 0;
5102 
5103         /*
5104          * Verify that the port database hasn't moved beneath our feet by
5105          * switching to the appropriate n_port_handle if necessary.  This is
5106          * less unplesant than the error recovery if the wrong one is used.
5107          */
5108         for (id = 0; id <= LAST_LOCAL_LOOP_ID; id++) {
5109                 tq->loop_id = id;
5110                 rval = ql_get_port_database(ha, tq, PDF_NONE);
5111                 EL(ha, "rval=%xh\n", rval);
5112                 /* check all the ones not logged in for possible use */
5113                 if (rval == QL_NOT_LOGGED_IN) {
5114                         if (tq->master_state == PD_STATE_PLOGI_PENDING) {
5115                                 ha->n_port->n_port_handle = tq->loop_id;
5116                                 EL(ha, "n_port_handle =%xh, master state=%x\n",
5117                                     tq->loop_id, tq->master_state);
5118                                 break;
5119                         }
5120                         /*
5121                          * Use a 'port unavailable' entry only
5122                          * if we used it before.
5123                          */
5124                         if (tq->master_state == PD_STATE_PORT_UNAVAILABLE) {
5125                                 /* if the port_id matches, reuse it */
5126                                 if (pkt->pkt_cmd_fhdr.d_id == tq->d_id.b24) {
5127                                         EL(ha, "n_port_handle =%xh,"
5128                                             "master state=%xh\n",
5129                                             tq->loop_id, tq->master_state);
5130                                         break;
5131                                 } else if (tq->loop_id ==
5132                                     ha->n_port->n_port_handle) {
5133                                     // avoid a lint error
5134                                         uint16_t *hndl;
5135                                         uint16_t val;
5136 
5137                                         hndl = &ha->n_port->n_port_handle;
5138                                         val = *hndl;
5139                                         val++;
5140                                         val++;
5141                                         *hndl = val;
5142                                 }
5143                         EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
5144                             "master state=%x\n", rval, id, tq->loop_id,
5145                             tq->master_state);
5146                         }
5147 
5148                 }
5149                 if (rval == QL_SUCCESS) {
5150                         if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
5151                                 ha->n_port->n_port_handle = tq->loop_id;
5152                                 EL(ha, "n_port_handle =%xh, master state=%x\n",
5153                                     tq->loop_id, tq->master_state);
5154                                 break;
5155                         }
5156                         EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
5157                             "master state=%x\n", rval, id, tq->loop_id,
5158                             tq->master_state);
5159                 }
5160         }
5161         (void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0, DDI_DMA_SYNC_FORDEV);
5162 
5163         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5164         tq = ql_d_id_to_queue(ha, d_id);
5165         ql_timeout_insert(ha, tq, sp);
5166         ql_start_iocb(ha, sp);
5167 
5168         return (QL_CONSUMED);
5169 }
5170 
5171 
5172 /*
5173  * ql_els_flogi
5174  *      Issue a extended link service fabric login request.
5175  *
5176  * Input:
5177  *      ha = adapter state pointer.
5178  *      pkt = pointer to fc_packet.
5179  *
5180  * Returns:
5181  *      FC_SUCCESS - the packet was accepted for transport.
5182  *      FC_TRANSPORT_ERROR - a transport error occurred.
5183  *
5184  * Context:
5185  *      Kernel context.
5186  */
5187 static int
5188 ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5189 {
5190         ql_tgt_t                *tq = NULL;
5191         port_id_t               d_id;
5192         la_els_logi_t           acc;
5193         class_svc_param_t       *class3_param;
5194         int                     rval = FC_SUCCESS;
5195         int                     accept = 0;
5196 
5197         QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5198             pkt->pkt_cmd_fhdr.d_id);
5199 
5200         bzero(&acc, sizeof (acc));
5201         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5202 
5203         if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
5204                 /*
5205                  * d_id of zero in a FLOGI accept response in a point to point
5206                  * topology triggers evaluation of N Port login initiative.
5207                  */
5208                 pkt->pkt_resp_fhdr.d_id = 0;
5209                 /*
5210                  * An N_Port already logged in with the firmware
5211                  * will have the only database entry.
5212                  */
5213                 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
5214                         tq = ql_loop_id_to_queue(ha, ha->n_port->n_port_handle);
5215                 }
5216 
5217                 if (tq != NULL) {
5218                         /*
5219                          * If the target port has initiative send
5220                          * up a PLOGI about the new device.
5221                          */
5222                         if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
5223                             (la_wwn_t *)(CFG_IST(ha, CFG_CTRL_2425) ?
5224                             &ha->init_ctrl_blk.cb24.port_name[0] :
5225                             &ha->init_ctrl_blk.cb.port_name[0])) == 1)) {
5226                                 ha->send_plogi_timer = 3;
5227                         } else {
5228                                 ha->send_plogi_timer = 0;
5229                         }
5230                         pkt->pkt_resp_fhdr.s_id = tq->d_id.b24;
5231                 } else {
5232                         /*
5233                          * An N_Port not logged in with the firmware will not
5234                          * have a database entry.  We accept anyway and rely
5235                          * on a PLOGI from the upper layers to set the d_id
5236                          * and s_id.
5237                          */
5238                         accept = 1;
5239                 }
5240         } else {
5241                 tq = ql_d_id_to_queue(ha, d_id);
5242         }
5243         if ((tq != NULL) || (accept != NULL)) {
5244                 /* Build ACC. */
5245                 pkt->pkt_state = FC_PKT_SUCCESS;
5246                 class3_param = (class_svc_param_t *)&acc.class_3;
5247 
5248                 acc.ls_code.ls_code = LA_ELS_ACC;
5249                 acc.common_service.fcph_version = 0x2006;
5250                 if (ha->topology & QL_N_PORT) {
5251                         /* clear F_Port indicator */
5252                         acc.common_service.cmn_features = 0x0800;
5253                 } else {
5254                         acc.common_service.cmn_features = 0x1b00;
5255                 }
5256                 CFG_IST(ha, CFG_CTRL_24258081) ?
5257                     (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5258                     ha->init_ctrl_blk.cb24.max_frame_length[0],
5259                     ha->init_ctrl_blk.cb24.max_frame_length[1])) :
5260                     (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5261                     ha->init_ctrl_blk.cb.max_frame_length[0],
5262                     ha->init_ctrl_blk.cb.max_frame_length[1]));
5263                 acc.common_service.conc_sequences = 0xff;
5264                 acc.common_service.relative_offset = 0x03;
5265                 acc.common_service.e_d_tov = 0x7d0;
5266                 if (accept) {
5267                         /* Use the saved N_Port WWNN and WWPN */
5268                         if (ha->n_port != NULL) {
5269                                 bcopy((void *)&ha->n_port->port_name[0],
5270                                     (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5271                                 bcopy((void *)&ha->n_port->node_name[0],
5272                                     (void *)&acc.node_ww_name.raw_wwn[0], 8);
5273                                 /* mark service options invalid */
5274                                 class3_param->class_valid_svc_opt = 0x0800;
5275                         } else {
5276                                 EL(ha, "ha->n_port is NULL\n");
5277                                 /* Build RJT. */
5278                                 acc.ls_code.ls_code = LA_ELS_RJT;
5279 
5280                                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5281                                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5282                         }
 
5294                             tq->class3_conc_sequences;
5295                         class3_param->open_sequences_per_exch =
5296                             tq->class3_open_sequences_per_exch;
5297                 }
5298         } else {
5299                 /* Build RJT. */
5300                 acc.ls_code.ls_code = LA_ELS_RJT;
5301 
5302                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5303                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5304                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5305         }
5306 
5307         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5308             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5309 
5310         if (rval != FC_SUCCESS) {
5311                 EL(ha, "failed, rval = %xh\n", rval);
5312         } else {
5313                 /*EMPTY*/
5314                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5315         }
5316         return (rval);
5317 }
5318 
5319 /*
5320  * ql_els_logo
5321  *      Issue a extended link service logout request.
5322  *
5323  * Input:
5324  *      ha = adapter state pointer.
5325  *      pkt = pointer to fc_packet.
5326  *
5327  * Returns:
5328  *      FC_SUCCESS - the packet was accepted for transport.
5329  *      FC_TRANSPORT_ERROR - a transport error occurred.
5330  *
5331  * Context:
5332  *      Kernel context.
5333  */
5334 static int
5335 ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5336 {
5337         port_id_t       d_id;
5338         ql_tgt_t        *tq;
5339         la_els_logo_t   acc;
5340         int             rval = FC_SUCCESS;
5341 
5342         QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5343             pkt->pkt_cmd_fhdr.d_id);
5344 
5345         bzero(&acc, sizeof (acc));
5346         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5347 
5348         tq = ql_d_id_to_queue(ha, d_id);
5349         if (tq) {
5350                 DEVICE_QUEUE_LOCK(tq);
5351                 if (tq->d_id.b24 == BROADCAST_ADDR) {
5352                         DEVICE_QUEUE_UNLOCK(tq);
5353                         return (FC_SUCCESS);
5354                 }
5355 
5356                 tq->flags |= TQF_NEED_AUTHENTICATION;
5357 
5358                 do {
5359                         DEVICE_QUEUE_UNLOCK(tq);
5360                         (void) ql_abort_device(ha, tq, 1);
5361 
5362                         /*
5363                          * Wait for commands to drain in F/W (doesn't
 
5371                 DEVICE_QUEUE_UNLOCK(tq);
5372         }
5373 
5374         if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5375                 /* Build ACC. */
5376                 acc.ls_code.ls_code = LA_ELS_ACC;
5377 
5378                 pkt->pkt_state = FC_PKT_SUCCESS;
5379         } else {
5380                 /* Build RJT. */
5381                 acc.ls_code.ls_code = LA_ELS_RJT;
5382 
5383                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5384                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5385                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5386         }
5387 
5388         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5389             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5390 
5391         if (rval != FC_SUCCESS) {
5392                 EL(ha, "failed, rval = %xh\n", rval);
5393         } else {
5394                 /*EMPTY*/
5395                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5396         }
5397         return (rval);
5398 }
5399 
5400 /*
5401  * ql_els_prli
5402  *      Issue a extended link service process login request.
5403  *
5404  * Input:
5405  *      ha = adapter state pointer.
5406  *      pkt = pointer to fc_packet.
5407  *
5408  * Returns:
5409  *      FC_SUCCESS - the packet was accepted for transport.
5410  *      FC_TRANSPORT_ERROR - a transport error occurred.
5411  *
5412  * Context:
5413  *      Kernel context.
5414  */
5415 static int
5416 ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5417 {
5418         ql_tgt_t                *tq;
5419         port_id_t               d_id;
5420         la_els_prli_t           acc;
5421         prli_svc_param_t        *param;
5422         ql_srb_t                *sp = (ql_srb_t *)pkt->pkt_fca_private;
5423         int                     rval = FC_SUCCESS;
5424 
5425         QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5426             pkt->pkt_cmd_fhdr.d_id);
5427 
5428         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5429 
5430         tq = ql_d_id_to_queue(ha, d_id);
5431         if (tq != NULL) {
5432                 (void) ql_get_port_database(ha, tq, PDF_NONE);
5433 
5434                 if ((ha->topology & QL_N_PORT) &&
5435                     (tq->master_state == PD_STATE_PLOGI_COMPLETED)) {
5436                         ql_timeout_insert(ha, tq, sp);
5437                         ql_start_iocb(ha, sp);
5438                         rval = QL_CONSUMED;
5439                 } else {
5440                         /* Build ACC. */
5441                         bzero(&acc, sizeof (acc));
5442                         acc.ls_code = LA_ELS_ACC;
5443                         acc.page_length = 0x10;
5444                         acc.payload_length = tq->prli_payload_length;
5445 
5446                         param = (prli_svc_param_t *)&acc.service_params[0];
5447                         param->type = 0x08;
5448                         param->rsvd = 0x00;
5449                         param->process_assoc_flags = tq->prli_svc_param_word_0;
5450                         param->process_flags = tq->prli_svc_param_word_3;
5451 
5452                         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5453                             (uint8_t *)pkt->pkt_resp, sizeof (acc),
5454                             DDI_DEV_AUTOINCR);
5455 
5456                         pkt->pkt_state = FC_PKT_SUCCESS;
5457                 }
5458         } else {
5459                 la_els_rjt_t rjt;
5460 
5461                 /* Build RJT. */
5462                 bzero(&rjt, sizeof (rjt));
5463                 rjt.ls_code.ls_code = LA_ELS_RJT;
5464 
5465                 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5466                     (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5467 
5468                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5469                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5470                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5471         }
5472 
5473         if ((rval != FC_SUCCESS) && (rval != QL_CONSUMED)) {
5474                 EL(ha, "failed, rval = %xh\n", rval);
5475         } else {
5476                 /*EMPTY*/
5477                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5478         }
5479         return (rval);
5480 }
5481 
5482 /*
5483  * ql_els_prlo
5484  *      Issue a extended link service process logout request.
5485  *
5486  * Input:
5487  *      ha = adapter state pointer.
5488  *      pkt = pointer to fc_packet.
5489  *
5490  * Returns:
5491  *      FC_SUCCESS - the packet was accepted for transport.
5492  *      FC_TRANSPORT_ERROR - a transport error occurred.
5493  *
5494  * Context:
5495  *      Kernel context.
5496  */
5497 /* ARGSUSED */
5498 static int
5499 ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5500 {
5501         la_els_prli_t   acc;
5502         int             rval = FC_SUCCESS;
5503 
5504         QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5505             pkt->pkt_cmd_fhdr.d_id);
5506 
5507         /* Build ACC. */
5508         ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5509             (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5510 
5511         acc.ls_code = LA_ELS_ACC;
5512         acc.service_params[2] = 1;
5513 
5514         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5515             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5516 
5517         pkt->pkt_state = FC_PKT_SUCCESS;
5518 
5519         if (rval != FC_SUCCESS) {
5520                 EL(ha, "failed, rval = %xh\n", rval);
5521         } else {
5522                 /*EMPTY*/
5523                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5524         }
5525         return (rval);
5526 }
5527 
5528 /*
5529  * ql_els_adisc
5530  *      Issue a extended link service address discovery request.
5531  *
5532  * Input:
5533  *      ha = adapter state pointer.
5534  *      pkt = pointer to fc_packet.
5535  *
5536  * Returns:
5537  *      FC_SUCCESS - the packet was accepted for transport.
5538  *      FC_TRANSPORT_ERROR - a transport error occurred.
5539  *
5540  * Context:
5541  *      Kernel context.
5542  */
5543 static int
5544 ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5545 {
5546         ql_dev_id_list_t        *list;
5547         uint32_t                list_size;
5548         ql_link_t               *link;
5549         ql_tgt_t                *tq;
5550         ql_lun_t                *lq;
5551         port_id_t               d_id;
5552         la_els_adisc_t          acc;
5553         uint16_t                index, loop_id;
5554         ql_mbx_data_t           mr;
5555         int                     rval = FC_SUCCESS;
5556 
5557         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5558 
5559         bzero(&acc, sizeof (acc));
5560         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5561 
5562         /*
5563          * MBC_GET_PORT_DATABASE causes ADISC to go out to
5564          * the device from the firmware
5565          */
5566         index = ql_alpa_to_index[d_id.b.al_pa];
5567         tq = NULL;
5568         for (link = ha->dev[index].first; link != NULL; link = link->next) {
5569                 tq = link->base_address;
5570                 if (tq->d_id.b24 == d_id.b24) {
5571                         break;
5572                 } else {
5573                         tq = NULL;
5574                 }
5575         }
5576 
5577         if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
 
5632                 bcopy((void *)&tq->port_name[0],
5633                     (void *)&acc.port_wwn.raw_wwn[0], 8);
5634                 bcopy((void *)&tq->node_name[0],
5635                     (void *)&acc.node_wwn.raw_wwn[0], 8);
5636 
5637                 acc.nport_id.port_id = tq->d_id.b24;
5638 
5639                 pkt->pkt_state = FC_PKT_SUCCESS;
5640         } else {
5641                 /* Build RJT. */
5642                 acc.ls_code.ls_code = LA_ELS_RJT;
5643 
5644                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5645                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5646                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5647         }
5648 
5649         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5650             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5651 
5652         if (rval != FC_SUCCESS) {
5653                 EL(ha, "failed, rval = %xh\n", rval);
5654         } else {
5655                 /*EMPTY*/
5656                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5657         }
5658         return (rval);
5659 }
5660 
5661 /*
5662  * ql_els_linit
5663  *      Issue a extended link service loop initialize request.
5664  *
5665  * Input:
5666  *      ha = adapter state pointer.
5667  *      pkt = pointer to fc_packet.
5668  *
5669  * Returns:
5670  *      FC_SUCCESS - the packet was accepted for transport.
5671  *      FC_TRANSPORT_ERROR - a transport error occurred.
5672  *
5673  * Context:
5674  *      Kernel context.
5675  */
5676 static int
5677 ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
5678 {
5679         ddi_dma_cookie_t        *cp;
5680         uint32_t                cnt;
5681         conv_num_t              n;
5682         port_id_t               d_id;
5683         int                     rval = FC_SUCCESS;
5684 
5685         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5686 
5687         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5688         if (ha->topology & QL_SNS_CONNECTION) {
5689                 fc_linit_req_t els;
5690                 lfa_cmd_t lfa;
5691 
5692                 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5693                     (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5694 
5695                 /* Setup LFA mailbox command data. */
5696                 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5697 
5698                 lfa.resp_buffer_length[0] = 4;
5699 
5700                 cp = pkt->pkt_resp_cookie;
5701                 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5702                         n.size64 = (uint64_t)cp->dmac_laddress;
5703                         LITTLE_ENDIAN_64(&n.size64);
5704                 } else {
5705                         n.size32[0] = LSD(cp->dmac_laddress);
5706                         LITTLE_ENDIAN_32(&n.size32[0]);
5707                         n.size32[1] = MSD(cp->dmac_laddress);
5708                         LITTLE_ENDIAN_32(&n.size32[1]);
 
5727                 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5728                         pkt->pkt_state = FC_PKT_TRAN_ERROR;
5729                 } else {
5730                         pkt->pkt_state = FC_PKT_SUCCESS;
5731                 }
5732         } else {
5733                 fc_linit_resp_t rjt;
5734 
5735                 /* Build RJT. */
5736                 bzero(&rjt, sizeof (rjt));
5737                 rjt.ls_code.ls_code = LA_ELS_RJT;
5738 
5739                 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5740                     (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5741 
5742                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5743                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5744                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5745         }
5746 
5747         if (rval != FC_SUCCESS) {
5748                 EL(ha, "failed, rval = %xh\n", rval);
5749         } else {
5750                 /*EMPTY*/
5751                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5752         }
5753         return (rval);
5754 }
5755 
5756 /*
5757  * ql_els_lpc
5758  *      Issue a extended link service loop control request.
5759  *
5760  * Input:
5761  *      ha = adapter state pointer.
5762  *      pkt = pointer to fc_packet.
5763  *
5764  * Returns:
5765  *      FC_SUCCESS - the packet was accepted for transport.
5766  *      FC_TRANSPORT_ERROR - a transport error occurred.
5767  *
5768  * Context:
5769  *      Kernel context.
5770  */
5771 static int
5772 ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5773 {
5774         ddi_dma_cookie_t        *cp;
5775         uint32_t                cnt;
5776         conv_num_t              n;
5777         port_id_t               d_id;
5778         int                     rval = FC_SUCCESS;
5779 
5780         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5781 
5782         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5783         if (ha->topology & QL_SNS_CONNECTION) {
5784                 ql_lpc_t els;
5785                 lfa_cmd_t lfa;
5786 
5787                 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5788                     (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5789 
5790                 /* Setup LFA mailbox command data. */
5791                 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5792 
5793                 lfa.resp_buffer_length[0] = 4;
5794 
5795                 cp = pkt->pkt_resp_cookie;
5796                 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5797                         n.size64 = (uint64_t)(cp->dmac_laddress);
5798                         LITTLE_ENDIAN_64(&n.size64);
5799                 } else {
5800                         n.size32[0] = cp->dmac_address;
5801                         LITTLE_ENDIAN_32(&n.size32[0]);
5802                         n.size32[1] = 0;
5803                 }
5804 
5805                 /* Set buffer address. */
5806                 for (cnt = 0; cnt < 8; cnt++) {
5807                         lfa.resp_buffer_address[cnt] = n.size8[cnt];
5808                 }
5809 
5810                 lfa.subcommand_length[0] = 20;
5811                 n.size32[0] = d_id.b24;
5812                 LITTLE_ENDIAN_32(&n.size32[0]);
5813                 lfa.addr[0] = n.size8[0];
5814                 lfa.addr[1] = n.size8[1];
5815                 lfa.addr[2] = n.size8[2];
5816                 lfa.subcommand[1] = 0x71;
5817                 lfa.payload[4] = els.port_control;
5818                 bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 32);
5819 
5820                 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5821                         pkt->pkt_state = FC_PKT_TRAN_ERROR;
5822                 } else {
5823                         pkt->pkt_state = FC_PKT_SUCCESS;
5824                 }
5825         } else {
5826                 ql_lpc_resp_t rjt;
5827 
5828                 /* Build RJT. */
5829                 bzero(&rjt, sizeof (rjt));
5830                 rjt.ls_code.ls_code = LA_ELS_RJT;
5831 
5832                 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5833                     (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5834 
5835                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5836                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5837                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5838         }
5839 
5840         if (rval != FC_SUCCESS) {
5841                 EL(ha, "failed, rval = %xh\n", rval);
5842         } else {
5843                 /*EMPTY*/
5844                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5845         }
5846         return (rval);
5847 }
5848 
5849 /*
5850  * ql_els_lsts
5851  *      Issue a extended link service loop status request.
5852  *
5853  * Input:
5854  *      ha = adapter state pointer.
5855  *      pkt = pointer to fc_packet.
5856  *
5857  * Returns:
5858  *      FC_SUCCESS - the packet was accepted for transport.
5859  *      FC_TRANSPORT_ERROR - a transport error occurred.
5860  *
5861  * Context:
5862  *      Kernel context.
5863  */
5864 static int
5865 ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
5866 {
5867         ddi_dma_cookie_t        *cp;
5868         uint32_t                cnt;
5869         conv_num_t              n;
5870         port_id_t               d_id;
5871         int                     rval = FC_SUCCESS;
5872 
5873         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5874 
5875         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5876         if (ha->topology & QL_SNS_CONNECTION) {
5877                 fc_lsts_req_t els;
5878                 lfa_cmd_t lfa;
5879 
5880                 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5881                     (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5882 
5883                 /* Setup LFA mailbox command data. */
5884                 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5885 
5886                 lfa.resp_buffer_length[0] = 84;
5887 
5888                 cp = pkt->pkt_resp_cookie;
5889                 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5890                         n.size64 = cp->dmac_laddress;
5891                         LITTLE_ENDIAN_64(&n.size64);
5892                 } else {
5893                         n.size32[0] = cp->dmac_address;
5894                         LITTLE_ENDIAN_32(&n.size32[0]);
5895                         n.size32[1] = 0;
5896                 }
 
5911                 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5912                         pkt->pkt_state = FC_PKT_TRAN_ERROR;
5913                 } else {
5914                         pkt->pkt_state = FC_PKT_SUCCESS;
5915                 }
5916         } else {
5917                 fc_lsts_resp_t rjt;
5918 
5919                 /* Build RJT. */
5920                 bzero(&rjt, sizeof (rjt));
5921                 rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
5922 
5923                 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5924                     (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5925 
5926                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5927                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5928                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5929         }
5930 
5931         if (rval != FC_SUCCESS) {
5932                 EL(ha, "failed=%xh\n", rval);
5933         } else {
5934                 /*EMPTY*/
5935                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5936         }
5937         return (rval);
5938 }
5939 
5940 /*
5941  * ql_els_scr
5942  *      Issue a extended link service state change registration request.
5943  *
5944  * Input:
5945  *      ha = adapter state pointer.
5946  *      pkt = pointer to fc_packet.
5947  *
5948  * Returns:
5949  *      FC_SUCCESS - the packet was accepted for transport.
5950  *      FC_TRANSPORT_ERROR - a transport error occurred.
5951  *
5952  * Context:
5953  *      Kernel context.
5954  */
5955 static int
5956 ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
5957 {
5958         fc_scr_resp_t   acc;
5959         int             rval = FC_SUCCESS;
5960 
5961         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5962 
5963         bzero(&acc, sizeof (acc));
5964         if (ha->topology & QL_SNS_CONNECTION) {
5965                 fc_scr_req_t els;
5966 
5967                 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5968                     (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5969 
5970                 if (ql_send_change_request(ha, els.scr_func) ==
5971                     QL_SUCCESS) {
5972                         /* Build ACC. */
5973                         acc.scr_acc = LA_ELS_ACC;
5974 
5975                         pkt->pkt_state = FC_PKT_SUCCESS;
5976                 } else {
5977                         /* Build RJT. */
5978                         acc.scr_acc = LA_ELS_RJT;
5979 
5980                         pkt->pkt_state = FC_PKT_TRAN_ERROR;
5981                         pkt->pkt_reason = FC_REASON_HW_ERROR;
5982                         EL(ha, "LA_ELS_RJT, FC_REASON_HW_ERROR\n");
5983                 }
5984         } else {
5985                 /* Build RJT. */
5986                 acc.scr_acc = LA_ELS_RJT;
5987 
5988                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5989                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5990                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5991         }
5992 
5993         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5994             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5995 
5996         if (rval != FC_SUCCESS) {
5997                 EL(ha, "failed, rval = %xh\n", rval);
5998         } else {
5999                 /*EMPTY*/
6000                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6001         }
6002         return (rval);
6003 }
6004 
6005 /*
6006  * ql_els_rscn
6007  *      Issue a extended link service register state
6008  *      change notification request.
6009  *
6010  * Input:
6011  *      ha = adapter state pointer.
6012  *      pkt = pointer to fc_packet.
6013  *
6014  * Returns:
6015  *      FC_SUCCESS - the packet was accepted for transport.
6016  *      FC_TRANSPORT_ERROR - a transport error occurred.
6017  *
6018  * Context:
6019  *      Kernel context.
6020  */
6021 static int
6022 ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
6023 {
6024         ql_rscn_resp_t  acc;
6025         int             rval = FC_SUCCESS;
6026 
6027         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6028 
6029         bzero(&acc, sizeof (acc));
6030         if (ha->topology & QL_SNS_CONNECTION) {
6031                 /* Build ACC. */
6032                 acc.scr_acc = LA_ELS_ACC;
6033 
6034                 pkt->pkt_state = FC_PKT_SUCCESS;
6035         } else {
6036                 /* Build RJT. */
6037                 acc.scr_acc = LA_ELS_RJT;
6038 
6039                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6040                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6041                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6042         }
6043 
6044         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6045             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6046 
6047         if (rval != FC_SUCCESS) {
6048                 EL(ha, "failed, rval = %xh\n", rval);
6049         } else {
6050                 /*EMPTY*/
6051                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6052         }
6053         return (rval);
6054 }
6055 
6056 /*
6057  * ql_els_farp_req
6058  *      Issue FC Address Resolution Protocol (FARP)
6059  *      extended link service request.
6060  *
6061  *      Note: not supported.
6062  *
6063  * Input:
6064  *      ha = adapter state pointer.
6065  *      pkt = pointer to fc_packet.
6066  *
6067  * Returns:
6068  *      FC_SUCCESS - the packet was accepted for transport.
6069  *      FC_TRANSPORT_ERROR - a transport error occurred.
6070  *
6071  * Context:
6072  *      Kernel context.
6073  */
6074 static int
6075 ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
6076 {
6077         ql_acc_rjt_t    acc;
6078         int             rval = FC_SUCCESS;
6079 
6080         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6081 
6082         bzero(&acc, sizeof (acc));
6083 
6084         /* Build ACC. */
6085         acc.ls_code.ls_code = LA_ELS_ACC;
6086 
6087         pkt->pkt_state = FC_PKT_SUCCESS;
6088 
6089         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6090             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6091 
6092         if (rval != FC_SUCCESS) {
6093                 EL(ha, "failed, rval = %xh\n", rval);
6094         } else {
6095                 /*EMPTY*/
6096                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6097         }
6098         return (rval);
6099 }
6100 
6101 /*
6102  * ql_els_farp_reply
6103  *      Issue FC Address Resolution Protocol (FARP)
6104  *      extended link service reply.
6105  *
6106  *      Note: not supported.
6107  *
6108  * Input:
6109  *      ha = adapter state pointer.
6110  *      pkt = pointer to fc_packet.
6111  *
6112  * Returns:
6113  *      FC_SUCCESS - the packet was accepted for transport.
6114  *      FC_TRANSPORT_ERROR - a transport error occurred.
6115  *
6116  * Context:
6117  *      Kernel context.
6118  */
6119 /* ARGSUSED */
6120 static int
6121 ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
6122 {
6123         ql_acc_rjt_t    acc;
6124         int             rval = FC_SUCCESS;
6125 
6126         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6127 
6128         bzero(&acc, sizeof (acc));
6129 
6130         /* Build ACC. */
6131         acc.ls_code.ls_code = LA_ELS_ACC;
6132 
6133         pkt->pkt_state = FC_PKT_SUCCESS;
6134 
6135         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6136             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6137 
6138         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6139 
6140         return (rval);
6141 }
6142 
6143 static int
6144 ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
6145 {
6146         uchar_t                 *rnid_acc;
6147         port_id_t               d_id;
6148         ql_link_t               *link;
6149         ql_tgt_t                *tq;
6150         uint16_t                index;
6151         la_els_rnid_acc_t       acc;
6152         la_els_rnid_t           *req;
6153         size_t                  req_len;
6154 
6155         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6156 
6157         req_len =  FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
6158         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6159         index = ql_alpa_to_index[d_id.b.al_pa];
6160 
6161         tq = NULL;
6162         for (link = ha->dev[index].first; link != NULL; link = link->next) {
6163                 tq = link->base_address;
6164                 if (tq->d_id.b24 == d_id.b24) {
6165                         break;
6166                 } else {
6167                         tq = NULL;
6168                 }
6169         }
6170 
6171         /* Allocate memory for rnid status block */
6172         rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
6173 
6174         bzero(&acc, sizeof (acc));
6175 
6176         req = (la_els_rnid_t *)pkt->pkt_cmd;
6177         if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6178             (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
6179             (caddr_t)rnid_acc) != QL_SUCCESS)) {
6180 
6181                 kmem_free(rnid_acc, req_len);
6182                 acc.ls_code.ls_code = LA_ELS_RJT;
6183 
6184                 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6185                     (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6186 
6187                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6188                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6189                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6190 
6191                 return (FC_FAILURE);
6192         }
6193 
6194         acc.ls_code.ls_code = LA_ELS_ACC;
6195         bcopy(rnid_acc, &acc.hdr, req_len);
6196         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6197             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6198 
6199         kmem_free(rnid_acc, req_len);
6200         pkt->pkt_state = FC_PKT_SUCCESS;
6201 
6202         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6203 
6204         return (FC_SUCCESS);
6205 }
6206 
6207 static int
6208 ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
6209 {
6210         fc_rls_acc_t            *rls_acc;
6211         port_id_t               d_id;
6212         ql_link_t               *link;
6213         ql_tgt_t                *tq;
6214         uint16_t                index;
6215         la_els_rls_acc_t        acc;
6216 
6217         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6218 
6219         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6220         index = ql_alpa_to_index[d_id.b.al_pa];
6221 
6222         tq = NULL;
6223         for (link = ha->dev[index].first; link != NULL; link = link->next) {
6224                 tq = link->base_address;
6225                 if (tq->d_id.b24 == d_id.b24) {
6226                         break;
6227                 } else {
6228                         tq = NULL;
6229                 }
6230         }
6231 
6232         /* Allocate memory for link error status block */
6233         rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
6234 
6235         bzero(&acc, sizeof (la_els_rls_acc_t));
6236 
6237         if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
 
6252         }
6253 
6254         LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
6255         LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
6256         LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
6257         LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
6258         LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
6259 
6260         acc.ls_code.ls_code = LA_ELS_ACC;
6261         acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
6262         acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
6263         acc.rls_link_params.rls_sig_loss  = rls_acc->rls_sig_loss;
6264         acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
6265         acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
6266         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6267             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6268 
6269         kmem_free(rls_acc, sizeof (*rls_acc));
6270         pkt->pkt_state = FC_PKT_SUCCESS;
6271 
6272         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6273 
6274         return (FC_SUCCESS);
6275 }
6276 
6277 static int
6278 ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
6279 {
6280         port_id_t       d_id;
6281         ql_srb_t        *sp;
6282         fc_unsol_buf_t  *ubp;
6283         ql_link_t       *link, *next_link;
6284         int             rval = FC_SUCCESS;
6285         int             cnt = 5;
6286 
6287         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6288 
6289         /*
6290          * we need to ensure that q->outcnt == 0, otherwise
6291          * any cmd completed with PKT_PORT_OFFLINE after PLOGI
6292          * will confuse ulps.
6293          */
6294 
6295         DEVICE_QUEUE_LOCK(tq);
6296         do {
6297                 /*
6298                  * wait for the cmds to get drained. If they
6299                  * don't get drained then the transport will
6300                  * retry PLOGI after few secs.
6301                  */
6302                 if (tq->outcnt != 0) {
6303                         rval = FC_TRAN_BUSY;
6304                         DEVICE_QUEUE_UNLOCK(tq);
6305                         ql_delay(ha, 10000);
6306                         DEVICE_QUEUE_LOCK(tq);
6307                         cnt--;
6308                         if (!cnt) {
6309                                 cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
6310                                     " for %xh outcount %xh", QL_NAME,
6311                                     ha->instance, tq->d_id.b24, tq->outcnt);
6312                         }
6313                 } else {
6314                         rval = FC_SUCCESS;
6315                         break;
6316                 }
6317         } while (cnt > 0);
6318         DEVICE_QUEUE_UNLOCK(tq);
6319 
6320         /*
6321          * return, if busy or if the plogi was asynchronous.
6322          */
6323         if ((rval != FC_SUCCESS) ||
6324             (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
6325             pkt->pkt_comp)) {
6326                 QL_PRINT_3(CE_CONT, "(%d): done, busy or async\n",
6327                     ha->instance);
6328                 return (rval);
6329         }
6330 
6331         /*
6332          * Let us give daemon sufficient time and hopefully
6333          * when transport retries PLOGI, it would have flushed
6334          * callback queue.
6335          */
6336         TASK_DAEMON_LOCK(ha);
6337         for (link = ha->callback_queue.first; link != NULL;
6338             link = next_link) {
6339                 next_link = link->next;
6340                 sp = link->base_address;
6341                 if (sp->flags & SRB_UB_CALLBACK) {
6342                         ubp = ha->ub_array[sp->handle];
6343                         d_id.b24 = ubp->ub_frame.s_id;
6344                 } else {
6345                         d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
6346                 }
6347                 if (tq->d_id.b24 == d_id.b24) {
6348                         cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
6349                             ha->instance, tq->d_id.b24);
6350                         rval = FC_TRAN_BUSY;
6351                         break;
6352                 }
6353         }
6354         TASK_DAEMON_UNLOCK(ha);
6355 
6356         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6357 
6358         return (rval);
6359 }
6360 
6361 /*
6362  * ql_login_port
6363  *      Logs in a device if not already logged in.
6364  *
6365  * Input:
6366  *      ha = adapter state pointer.
6367  *      d_id = 24 bit port ID.
6368  *      DEVICE_QUEUE_LOCK must be released.
6369  *
6370  * Returns:
6371  *      QL local function return status code.
6372  *
6373  * Context:
6374  *      Kernel context.
6375  */
6376 static int
6377 ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6378 {
6379         ql_adapter_state_t      *vha;
6380         ql_link_t               *link;
6381         uint16_t                index;
6382         ql_tgt_t                *tq, *tq2;
6383         uint16_t                loop_id, first_loop_id, last_loop_id;
6384         int                     rval = QL_SUCCESS;
6385 
6386         QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6387             d_id.b24);
6388 
6389         /* Get head queue index. */
6390         index = ql_alpa_to_index[d_id.b.al_pa];
6391 
6392         /* Check for device already has a queue. */
6393         tq = NULL;
6394         for (link = ha->dev[index].first; link != NULL; link = link->next) {
6395                 tq = link->base_address;
6396                 if (tq->d_id.b24 == d_id.b24) {
6397                         loop_id = tq->loop_id;
6398                         break;
6399                 } else {
6400                         tq = NULL;
6401                 }
6402         }
6403 
6404         /* Let's stop issuing any IO and unsolicited logo */
6405         if ((tq != NULL) && (!(ddi_in_panic()))) {
6406                 DEVICE_QUEUE_LOCK(tq);
6407                 tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6408                 tq->flags &= ~TQF_RSCN_RCVD;
6409                 DEVICE_QUEUE_UNLOCK(tq);
6410         }
6411         if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6412             !(tq->flags & TQF_FABRIC_DEVICE)) {
6413                 loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6414         }
6415 
6416         /* Special case for Nameserver */
6417         if (d_id.b24 == 0xFFFFFC) {
6418                 loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
6419                     SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6420                 if (tq == NULL) {
6421                         ADAPTER_STATE_LOCK(ha);
6422                         tq = ql_dev_init(ha, d_id, loop_id);
6423                         ADAPTER_STATE_UNLOCK(ha);
6424                         if (tq == NULL) {
6425                                 EL(ha, "failed=%xh, d_id=%xh\n",
6426                                     QL_FUNCTION_FAILED, d_id.b24);
6427                                 return (QL_FUNCTION_FAILED);
6428                         }
6429                 }
6430                 if (!(CFG_IST(ha, CFG_CTRL_8021))) {
6431                         rval = ql_login_fabric_port(ha, tq, loop_id);
6432                         if (rval == QL_SUCCESS) {
6433                                 tq->loop_id = loop_id;
6434                                 tq->flags |= TQF_FABRIC_DEVICE;
6435                                 (void) ql_get_port_database(ha, tq, PDF_NONE);
6436                         }
6437                 } else {
6438                         ha->topology = (uint8_t)
6439                             (ha->topology | QL_SNS_CONNECTION);
6440                 }
6441         /* Check for device already logged in. */
6442         } else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6443                 if (tq->flags & TQF_FABRIC_DEVICE) {
6444                         rval = ql_login_fabric_port(ha, tq, loop_id);
6445                         if (rval == QL_PORT_ID_USED) {
6446                                 rval = QL_SUCCESS;
6447                         }
6448                 } else if (LOCAL_LOOP_ID(loop_id)) {
6449                         rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6450                             (tq->flags & TQF_INITIATOR_DEVICE ?
6451                             LLF_NONE : LLF_PLOGI));
6452                         if (rval == QL_SUCCESS) {
6453                                 DEVICE_QUEUE_LOCK(tq);
6454                                 tq->loop_id = loop_id;
6455                                 DEVICE_QUEUE_UNLOCK(tq);
6456                         }
6457                 }
6458         } else if (ha->topology & QL_SNS_CONNECTION) {
6459                 /* Locate unused loop ID. */
6460                 if (CFG_IST(ha, CFG_CTRL_24258081)) {
6461                         first_loop_id = 0;
6462                         last_loop_id = LAST_N_PORT_HDL;
6463                 } else if (ha->topology & QL_F_PORT) {
6464                         first_loop_id = 0;
6465                         last_loop_id = SNS_LAST_LOOP_ID;
6466                 } else {
6467                         first_loop_id = SNS_FIRST_LOOP_ID;
6468                         last_loop_id = SNS_LAST_LOOP_ID;
6469                 }
6470 
6471                 /* Acquire adapter state lock. */
6472                 ADAPTER_STATE_LOCK(ha);
6473 
6474                 tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6475                 if (tq == NULL) {
6476                         EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6477                             d_id.b24);
6478 
6479                         ADAPTER_STATE_UNLOCK(ha);
6480 
 
6517                         case QL_PORT_ID_USED:
6518                                 /*
6519                                  * use f/w handle and try to
6520                                  * login again.
6521                                  */
6522                                 ADAPTER_STATE_LOCK(ha);
6523                                 ha->pha->free_loop_id--;
6524                                 ADAPTER_STATE_UNLOCK(ha);
6525                                 loop_id = tq->loop_id;
6526                                 break;
6527 
6528                         case QL_SUCCESS:
6529                                 tq->flags |= TQF_FABRIC_DEVICE;
6530                                 (void) ql_get_port_database(ha,
6531                                     tq, PDF_NONE);
6532                                 index = 1;
6533                                 break;
6534 
6535                         case QL_LOOP_ID_USED:
6536                                 tq->loop_id = PORT_NO_LOOP_ID;
6537                                 loop_id = ha->pha->free_loop_id++;
6538                                 break;
6539 
6540                         case QL_ALL_IDS_IN_USE:
6541                                 tq->loop_id = PORT_NO_LOOP_ID;
6542                                 index = 1;
6543                                 break;
6544 
6545                         default:
6546                                 tq->loop_id = PORT_NO_LOOP_ID;
6547                                 index = 1;
6548                                 break;
6549                         }
6550 
6551                         ADAPTER_STATE_LOCK(ha);
6552                 }
6553 
6554                 ADAPTER_STATE_UNLOCK(ha);
6555         } else {
6556                 rval = QL_FUNCTION_FAILED;
6557         }
6558 
6559         if (rval != QL_SUCCESS) {
6560                 EL(ha, "failed=%xh, d_id=%xh\n", rval, d_id.b24);
6561         } else {
6562                 EL(ha, "d_id=%xh, loop_id=%xh, "
6563                     "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6564                     tq->loop_id, tq->port_name[0], tq->port_name[1],
6565                     tq->port_name[2], tq->port_name[3], tq->port_name[4],
6566                     tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6567         }
6568         return (rval);
6569 }
6570 
6571 /*
6572  * ql_login_fabric_port
6573  *      Issue login fabric port mailbox command.
6574  *
6575  * Input:
6576  *      ha:             adapter state pointer.
6577  *      tq:             target queue pointer.
6578  *      loop_id:        FC Loop ID.
6579  *
6580  * Returns:
6581  *      ql local function return status code.
6582  *
6583  * Context:
6584  *      Kernel context.
6585  */
6586 static int
6587 ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6588 {
6589         int             rval;
6590         int             index;
6591         int             retry = 0;
6592         port_id_t       d_id;
6593         ql_tgt_t        *newq;
6594         ql_mbx_data_t   mr;
6595 
6596         QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6597             tq->d_id.b24);
6598 
6599         /*
6600          * QL_PARAMETER_ERROR also means the firmware is
6601          * not able to allocate PCB entry due to resource
6602          * issues, or collision.
6603          */
6604         do {
6605                 rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6606                 if ((rval == QL_PARAMETER_ERROR) ||
6607                     ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6608                     mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6609                         retry++;
6610                         drv_usecwait(10 * MILLISEC);
6611                 } else {
6612                         break;
6613                 }
6614         } while (retry < 5);
6615 
6616         switch (rval) {
6617         case QL_SUCCESS:
6618                 tq->loop_id = loop_id;
6619                 break;
6620 
6621         case QL_PORT_ID_USED:
6622                 /*
6623                  * This Loop ID should NOT be in use in drivers
6624                  */
6625                 newq = ql_loop_id_to_queue(ha, mr.mb[1]);
6626 
6627                 if (newq != NULL && newq != tq && tq->logout_sent == 0) {
6628                         cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
6629                             "dup loop_id=%xh, d_id=%xh", ha->instance,
6630                             newq->loop_id, newq->d_id.b24);
6631                         ql_send_logo(ha, newq, NULL);
6632                 }
6633 
6634                 tq->loop_id = mr.mb[1];
6635                 break;
6636 
6637         case QL_LOOP_ID_USED:
6638                 d_id.b.al_pa = LSB(mr.mb[2]);
6639                 d_id.b.area = MSB(mr.mb[2]);
6640                 d_id.b.domain = LSB(mr.mb[1]);
6641 
6642                 newq = ql_d_id_to_queue(ha, d_id);
6643                 if (newq && (newq->loop_id != loop_id)) {
6644                         /*
6645                          * This should NEVER ever happen; but this
6646                          * code is needed to bail out when the worst
6647                          * case happens - or as used to happen before
6648                          */
6649                         QL_PRINT_2(CE_CONT, "(%d,%d): Loop ID is now "
6650                             "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
6651                             "new pairs: [%xh, unknown] and [%xh, %xh]\n",
6652                             ha->instance, ha->vp_index, tq->d_id.b24, loop_id,
6653                             newq->d_id.b24, newq->loop_id, tq->d_id.b24,
6654                             newq->d_id.b24, loop_id);
6655 
6656                         if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
6657                                 ADAPTER_STATE_LOCK(ha);
6658 
6659                                 index = ql_alpa_to_index[newq->d_id.b.al_pa];
6660                                 ql_add_link_b(&ha->dev[index], &newq->device);
6661 
6662                                 newq->d_id.b24 = d_id.b24;
6663 
6664                                 index = ql_alpa_to_index[d_id.b.al_pa];
6665                                 ql_add_link_b(&ha->dev[index], &newq->device);
6666 
6667                                 ADAPTER_STATE_UNLOCK(ha);
6668                         }
6669 
6670                         (void) ql_get_port_database(ha, newq, PDF_NONE);
6671 
6672                 }
 
6674                 /*
6675                  * Invalidate the loop ID for the
6676                  * us to obtain a new one.
6677                  */
6678                 tq->loop_id = PORT_NO_LOOP_ID;
6679                 break;
6680 
6681         case QL_ALL_IDS_IN_USE:
6682                 rval = QL_FUNCTION_FAILED;
6683                 EL(ha, "no loop id's available\n");
6684                 break;
6685 
6686         default:
6687                 if (rval == QL_COMMAND_ERROR) {
6688                         switch (mr.mb[1]) {
6689                         case 2:
6690                         case 3:
6691                                 rval = QL_MEMORY_ALLOC_FAILED;
6692                                 break;
6693 
6694                         case 4:
6695                                 rval = QL_FUNCTION_TIMEOUT;
6696                                 break;
6697                         case 7:
6698                                 rval = QL_FABRIC_NOT_INITIALIZED;
6699                                 break;
6700                         default:
6701                                 EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
6702                                 break;
6703                         }
6704                 } else {
6705                         cmn_err(CE_WARN, "%s(%d): login fabric port failed"
6706                             " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
6707                             ha->instance, tq->d_id.b24, rval, mr.mb[1]);
6708                 }
6709                 break;
6710         }
6711 
6712         if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
6713             rval != QL_LOOP_ID_USED) {
6714                 EL(ha, "failed=%xh\n", rval);
6715         } else {
6716                 /*EMPTY*/
6717                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6718         }
6719         return (rval);
6720 }
6721 
6722 /*
6723  * ql_logout_port
6724  *      Logs out a device if possible.
6725  *
6726  * Input:
6727  *      ha:     adapter state pointer.
6728  *      d_id:   24 bit port ID.
6729  *
6730  * Returns:
6731  *      QL local function return status code.
6732  *
6733  * Context:
6734  *      Kernel context.
6735  */
6736 static int
6737 ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
6738 {
6739         ql_link_t       *link;
6740         ql_tgt_t        *tq;
6741         uint16_t        index;
6742 
6743         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6744 
6745         /* Get head queue index. */
6746         index = ql_alpa_to_index[d_id.b.al_pa];
6747 
6748         /* Get device queue. */
6749         tq = NULL;
6750         for (link = ha->dev[index].first; link != NULL; link = link->next) {
6751                 tq = link->base_address;
6752                 if (tq->d_id.b24 == d_id.b24) {
6753                         break;
6754                 } else {
6755                         tq = NULL;
6756                 }
6757         }
6758 
6759         if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
6760                 (void) ql_logout_fabric_port(ha, tq);
6761                 tq->loop_id = PORT_NO_LOOP_ID;
6762         }
6763 
6764         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6765 
6766         return (QL_SUCCESS);
6767 }
6768 
6769 /*
6770  * ql_dev_init
6771  *      Initialize/allocate device queue.
6772  *
6773  * Input:
6774  *      ha:             adapter state pointer.
6775  *      d_id:           device destination ID
6776  *      loop_id:        device loop ID
6777  *      ADAPTER_STATE_LOCK must be already obtained.
6778  *
6779  * Returns:
6780  *      NULL = failure
6781  *
6782  * Context:
6783  *      Kernel context.
6784  */
6785 ql_tgt_t *
6786 ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
6787 {
6788         ql_link_t       *link;
6789         uint16_t        index;
6790         ql_tgt_t        *tq;
6791 
6792         QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh, loop_id=%xh\n",
6793             ha->instance, d_id.b24, loop_id);
6794 
6795         index = ql_alpa_to_index[d_id.b.al_pa];
6796 
6797         /* If device queue exists, set proper loop ID. */
6798         tq = NULL;
6799         for (link = ha->dev[index].first; link != NULL; link = link->next) {
6800                 tq = link->base_address;
6801                 if (tq->d_id.b24 == d_id.b24) {
6802                         tq->loop_id = loop_id;
6803 
6804                         /* Reset port down retry count. */
6805                         tq->port_down_retry_count = ha->port_down_retry_count;
6806                         tq->qfull_retry_count = ha->qfull_retry_count;
6807 
6808                         break;
6809                 } else {
6810                         tq = NULL;
6811                 }
6812         }
6813 
6814         /* If device does not have queue. */
6815         if (tq == NULL) {
6816                 tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
6817                 if (tq != NULL) {
6818                         /*
6819                          * mutex to protect the device queue,
6820                          * does not block interrupts.
6821                          */
6822                         mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
6823                             (ha->iflags & IFLG_INTR_AIF) ?
6824                             (void *)(uintptr_t)ha->intr_pri :
6825                             (void *)(uintptr_t)ha->iblock_cookie);
6826 
6827                         tq->d_id.b24 = d_id.b24;
6828                         tq->loop_id = loop_id;
6829                         tq->device.base_address = tq;
6830                         tq->iidma_rate = IIDMA_RATE_INIT;
6831 
6832                         /* Reset port down retry count. */
6833                         tq->port_down_retry_count = ha->port_down_retry_count;
6834                         tq->qfull_retry_count = ha->qfull_retry_count;
6835 
6836                         /* Add device to device queue. */
6837                         ql_add_link_b(&ha->dev[index], &tq->device);
6838                 }
6839         }
6840 
6841         if (tq == NULL) {
6842                 EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
6843         } else {
6844                 /*EMPTY*/
6845                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6846         }
6847         return (tq);
6848 }
6849 
6850 /*
6851  * ql_dev_free
6852  *      Remove queue from device list and frees resources used by queue.
6853  *
6854  * Input:
6855  *      ha:     adapter state pointer.
6856  *      tq:     target queue pointer.
6857  *      ADAPTER_STATE_LOCK must be already obtained.
6858  *
6859  * Context:
6860  *      Kernel context.
6861  */
6862 void
6863 ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
6864 {
6865         ql_link_t       *link;
6866         uint16_t        index;
6867         ql_lun_t        *lq;
6868 
6869         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6870 
6871         for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6872                 lq = link->base_address;
6873                 if (lq->cmd.first != NULL) {
6874                         return;
6875                 }
6876         }
6877 
6878         if (tq->outcnt == 0) {
6879                 /* Get head queue index. */
6880                 index = ql_alpa_to_index[tq->d_id.b.al_pa];
6881                 for (link = ha->dev[index].first; link != NULL;
6882                     link = link->next) {
6883                         if (link->base_address == tq) {
6884                                 ql_remove_link(&ha->dev[index], link);
6885 
6886                                 link = tq->lun_queues.first;
6887                                 while (link != NULL) {
6888                                         lq = link->base_address;
6889                                         link = link->next;
6890 
6891                                         ql_remove_link(&tq->lun_queues,
6892                                             &lq->link);
6893                                         kmem_free(lq, sizeof (ql_lun_t));
6894                                 }
6895 
6896                                 mutex_destroy(&tq->mutex);
6897                                 kmem_free(tq, sizeof (ql_tgt_t));
6898                                 break;
6899                         }
6900                 }
6901         }
6902 
6903         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6904 }
6905 
6906 /*
6907  * ql_lun_queue
6908  *      Allocate LUN queue if does not exists.
6909  *
6910  * Input:
6911  *      ha:     adapter state pointer.
6912  *      tq:     target queue.
6913  *      lun:    LUN number.
6914  *
6915  * Returns:
6916  *      NULL = failure
6917  *
6918  * Context:
6919  *      Kernel context.
6920  */
6921 static ql_lun_t *
6922 ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t lun)
6923 {
6924         ql_lun_t        *lq;
6925         ql_link_t       *link;
6926 
6927         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6928 
6929         /* Fast path. */
6930         if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_no == lun) {
6931                 QL_PRINT_3(CE_CONT, "(%d): fast done\n", ha->instance);
6932                 return (tq->last_lun_queue);
6933         }
6934 
6935         if (lun >= MAX_LUNS) {
6936                 EL(ha, "Exceeded MAX_LUN=%d, lun=%d\n", MAX_LUNS, lun);
6937                 return (NULL);
6938         }
6939         /* If device queue exists, set proper loop ID. */
6940         lq = NULL;
6941         for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6942                 lq = link->base_address;
6943                 if (lq->lun_no == lun) {
6944                         QL_PRINT_3(CE_CONT, "(%d): found done\n", ha->instance);
6945                         tq->last_lun_queue = lq;
6946                         return (lq);
6947                 }
6948         }
6949 
6950         /* If queue does exist. */
6951         lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
6952 
6953         /* Initialize LUN queue. */
6954         if (lq != NULL) {
6955                 lq->link.base_address = lq;
6956 
6957                 lq->lun_no = lun;
6958                 lq->target_queue = tq;
6959 
6960                 DEVICE_QUEUE_LOCK(tq);
6961                 ql_add_link_b(&tq->lun_queues, &lq->link);
6962                 DEVICE_QUEUE_UNLOCK(tq);
6963                 tq->last_lun_queue = lq;
6964         }
6965 
6966         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6967 
6968         return (lq);
6969 }
6970 
6971 /*
6972  * ql_fcp_scsi_cmd
6973  *      Process fibre channel (FCP) SCSI protocol commands.
6974  *
6975  * Input:
6976  *      ha = adapter state pointer.
6977  *      pkt = pointer to fc_packet.
6978  *      sp = srb pointer.
6979  *
6980  * Returns:
6981  *      FC_SUCCESS - the packet was accepted for transport.
6982  *      FC_TRANSPORT_ERROR - a transport error occurred.
6983  *
6984  * Context:
6985  *      Kernel context.
6986  */
6987 static int
6988 ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6989 {
6990         port_id_t       d_id;
6991         ql_tgt_t        *tq;
6992         uint64_t        *ptr;
6993         uint16_t        lun;
6994 
6995         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6996 
6997         tq = (ql_tgt_t *)pkt->pkt_fca_device;
6998         if (tq == NULL) {
6999                 d_id.r.rsvd_1 = 0;
7000                 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7001                 tq = ql_d_id_to_queue(ha, d_id);
7002         }
7003 
7004         sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
7005         lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
7006             hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
7007 
7008         if (tq != NULL &&
7009             (sp->lun_queue = ql_lun_queue(ha, tq, lun)) != NULL) {
7010 
7011                 /*
7012                  * zero out FCP response; 24 Bytes
7013                  */
7014                 ptr = (uint64_t *)pkt->pkt_resp;
7015                 *ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
7016 
7017                 /* Handle task management function. */
7018                 if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
7019                     sp->fcp->fcp_cntl.cntl_clr_aca |
7020                     sp->fcp->fcp_cntl.cntl_reset_tgt |
7021                     sp->fcp->fcp_cntl.cntl_reset_lun |
7022                     sp->fcp->fcp_cntl.cntl_clr_tsk |
7023                     sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
7024                         ql_task_mgmt(ha, tq, pkt, sp);
7025                 } else {
7026                         ha->pha->xioctl->IosRequested++;
7027                         ha->pha->xioctl->BytesRequested += (uint32_t)
7028                             sp->fcp->fcp_data_len;
7029 
7030                         /*
7031                          * Setup for commands with data transfer
7032                          */
7033                         sp->iocb = ha->fcp_cmd;
7034                         sp->req_cnt = 1;
7035                         if (sp->fcp->fcp_data_len != 0) {
7036                                 /*
7037                                  * FCP data is bound to pkt_data_dma
7038                                  */
7039                                 if (sp->fcp->fcp_cntl.cntl_write_data) {
7040                                         (void) ddi_dma_sync(pkt->pkt_data_dma,
7041                                             0, 0, DDI_DMA_SYNC_FORDEV);
7042                                 }
7043 
7044                                 /* Setup IOCB count. */
7045                                 if (pkt->pkt_data_cookie_cnt > ha->cmd_segs &&
7046                                     (!CFG_IST(ha, CFG_CTRL_8021) ||
7047                                     sp->sg_dma.dma_handle == NULL)) {
7048                                         uint32_t        cnt;
7049 
7050                                         cnt = pkt->pkt_data_cookie_cnt -
7051                                             ha->cmd_segs;
7052                                         sp->req_cnt = (uint16_t)
7053                                             (cnt / ha->cmd_cont_segs);
7054                                         if (cnt % ha->cmd_cont_segs) {
7055                                                 sp->req_cnt = (uint16_t)
7056                                                     (sp->req_cnt + 2);
7057                                         } else {
7058                                                 sp->req_cnt++;
7059                                         }
7060                                 }
7061                         }
7062                         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7063 
7064                         return (ql_start_cmd(ha, tq, pkt, sp));
7065                 }
7066         } else {
7067                 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7068                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7069 
7070                 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7071                         ql_awaken_task_daemon(ha, sp, 0, 0);
7072         }
7073 
7074         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7075 
7076         return (FC_SUCCESS);
7077 }
7078 
7079 /*
7080  * ql_task_mgmt
7081  *      Task management function processor.
7082  *
7083  * Input:
7084  *      ha:     adapter state pointer.
7085  *      tq:     target queue pointer.
7086  *      pkt:    pointer to fc_packet.
7087  *      sp:     SRB pointer.
7088  *
7089  * Context:
7090  *      Kernel context.
7091  */
7092 static void
7093 ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7094     ql_srb_t *sp)
7095 {
7096         fcp_rsp_t               *fcpr;
7097         struct fcp_rsp_info     *rsp;
7098         uint16_t                lun;
7099 
7100         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7101 
7102         fcpr = (fcp_rsp_t *)pkt->pkt_resp;
7103         rsp = (struct fcp_rsp_info *)pkt->pkt_resp + sizeof (fcp_rsp_t);
7104 
7105         bzero(fcpr, pkt->pkt_rsplen);
7106 
7107         fcpr->fcp_u.fcp_status.rsp_len_set = 1;
7108         fcpr->fcp_response_len = 8;
7109         lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
7110             hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
7111 
7112         if (sp->fcp->fcp_cntl.cntl_clr_aca) {
7113                 if (ql_clear_aca(ha, tq, lun) != QL_SUCCESS) {
7114                         rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7115                 }
7116         } else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
7117                 if (ql_lun_reset(ha, tq, lun) != QL_SUCCESS) {
7118                         rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7119                 }
7120         } else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
7121                 if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
7122                     QL_SUCCESS) {
7123                         rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7124                 }
7125         } else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
7126                 if (ql_clear_task_set(ha, tq, lun) != QL_SUCCESS) {
7127                         rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7128                 }
7129         } else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
7130                 if (ql_abort_task_set(ha, tq, lun) != QL_SUCCESS) {
7131                         rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7132                 }
7133         } else {
7134                 rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
7135         }
7136 
7137         pkt->pkt_state = FC_PKT_SUCCESS;
7138 
7139         /* Do command callback. */
7140         if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7141                 ql_awaken_task_daemon(ha, sp, 0, 0);
7142         }
7143 
7144         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7145 }
7146 
7147 /*
7148  * ql_fcp_ip_cmd
7149  *      Process fibre channel (FCP) Internet (IP) protocols commands.
7150  *
7151  * Input:
7152  *      ha:     adapter state pointer.
7153  *      pkt:    pointer to fc_packet.
7154  *      sp:     SRB pointer.
7155  *
7156  * Returns:
7157  *      FC_SUCCESS - the packet was accepted for transport.
7158  *      FC_TRANSPORT_ERROR - a transport error occurred.
7159  *
7160  * Context:
7161  *      Kernel context.
7162  */
7163 static int
7164 ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
7165 {
7166         port_id_t       d_id;
7167         ql_tgt_t        *tq;
7168 
7169         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7170 
7171         tq = (ql_tgt_t *)pkt->pkt_fca_device;
7172         if (tq == NULL) {
7173                 d_id.r.rsvd_1 = 0;
7174                 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7175                 tq = ql_d_id_to_queue(ha, d_id);
7176         }
7177 
7178         if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
7179                 /*
7180                  * IP data is bound to pkt_cmd_dma
7181                  */
7182                 (void) ddi_dma_sync(pkt->pkt_cmd_dma,
7183                     0, 0, DDI_DMA_SYNC_FORDEV);
7184 
7185                 /* Setup IOCB count. */
7186                 sp->iocb = ha->ip_cmd;
7187                 if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
7188                         uint32_t        cnt;
7189 
7190                         cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
7191                         sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
7192                         if (cnt % ha->cmd_cont_segs) {
7193                                 sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7194                         } else {
7195                                 sp->req_cnt++;
7196                         }
7197                 } else {
7198                         sp->req_cnt = 1;
7199                 }
7200                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7201 
7202                 return (ql_start_cmd(ha, tq, pkt, sp));
7203         } else {
7204                 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7205                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7206 
7207                 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7208                         ql_awaken_task_daemon(ha, sp, 0, 0);
7209         }
7210 
7211         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7212 
7213         return (FC_SUCCESS);
7214 }
7215 
7216 /*
7217  * ql_fc_services
7218  *      Process fibre channel services (name server).
7219  *
7220  * Input:
7221  *      ha:     adapter state pointer.
7222  *      pkt:    pointer to fc_packet.
7223  *
7224  * Returns:
7225  *      FC_SUCCESS - the packet was accepted for transport.
7226  *      FC_TRANSPORT_ERROR - a transport error occurred.
7227  *
7228  * Context:
7229  *      Kernel context.
7230  */
7231 static int
7232 ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
7233 {
7234         uint32_t        cnt;
7235         fc_ct_header_t  hdr;
7236         la_els_rjt_t    rjt;
7237         port_id_t       d_id;
7238         ql_tgt_t        *tq;
7239         ql_srb_t        *sp;
7240         int             rval;
7241 
7242         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7243 
7244         ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
7245             (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
7246 
7247         bzero(&rjt, sizeof (rjt));
7248 
7249         /* Do some sanity checks */
7250         cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
7251             sizeof (fc_ct_header_t));
7252         if (cnt > (uint32_t)pkt->pkt_rsplen) {
7253                 EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
7254                     pkt->pkt_rsplen);
7255                 return (FC_ELS_MALFORMED);
7256         }
7257 
7258         switch (hdr.ct_fcstype) {
7259         case FCSTYPE_DIRECTORY:
7260         case FCSTYPE_MGMTSERVICE:
7261                 /* An FCA must make sure that the header is in big endian */
7262                 ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
7263 
7264                 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7265                 tq = ql_d_id_to_queue(ha, d_id);
7266                 sp = (ql_srb_t *)pkt->pkt_fca_private;
7267                 if (tq == NULL ||
7268                     (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
7269                         pkt->pkt_state = FC_PKT_LOCAL_RJT;
7270                         pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7271                         rval = QL_SUCCESS;
7272                         break;
7273                 }
7274 
7275                 /*
7276                  * Services data is bound to pkt_cmd_dma
7277                  */
7278                 (void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
7279                     DDI_DMA_SYNC_FORDEV);
7280 
7281                 sp->flags |= SRB_MS_PKT;
7282                 sp->retry_count = 32;
7283 
7284                 /* Setup IOCB count. */
7285                 sp->iocb = ha->ms_cmd;
7286                 if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
7287                         cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
7288                         sp->req_cnt =
7289                             (uint16_t)(cnt / CONT_TYPE_1_DATA_SEGMENTS);
7290                         if (cnt % CONT_TYPE_1_DATA_SEGMENTS) {
7291                                 sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7292                         } else {
7293                                 sp->req_cnt++;
7294                         }
7295                 } else {
7296                         sp->req_cnt = 1;
7297                 }
7298                 rval = ql_start_cmd(ha, tq, pkt, sp);
7299 
7300                 QL_PRINT_3(CE_CONT, "(%d): done, ql_start_cmd=%xh\n",
7301                     ha->instance, rval);
7302 
7303                 return (rval);
7304 
7305         default:
7306                 EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7307                 rval = QL_FUNCTION_PARAMETER_ERROR;
7308                 break;
7309         }
7310 
7311         if (rval != QL_SUCCESS) {
7312                 /* Build RJT. */
7313                 rjt.ls_code.ls_code = LA_ELS_RJT;
7314                 rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7315 
7316                 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7317                     (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7318 
7319                 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7320                 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7321                 EL(ha, "LA_ELS_RJT, FC_REASON_UNSUPPORTED\n");
7322         }
7323 
7324         /* Do command callback. */
7325         if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7326                 ql_awaken_task_daemon(ha, (ql_srb_t *)pkt->pkt_fca_private,
7327                     0, 0);
7328         }
7329 
7330         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7331 
7332         return (FC_SUCCESS);
7333 }
7334 
7335 /*
7336  * ql_cthdr_endian
7337  *      Change endianess of ct passthrough header and payload.
7338  *
7339  * Input:
7340  *      acc_handle:     DMA buffer access handle.
7341  *      ct_hdr:         Pointer to header.
7342  *      restore:        Restore first flag.
7343  *
7344  * Context:
7345  *      Interrupt or Kernel context, no mailbox commands allowed.
7346  */
7347 void
7348 ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7349     boolean_t restore)
7350 {
 
7420  * ql_start_cmd
7421  *      Finishes starting fibre channel protocol (FCP) command.
7422  *
7423  * Input:
7424  *      ha:     adapter state pointer.
7425  *      tq:     target queue pointer.
7426  *      pkt:    pointer to fc_packet.
7427  *      sp:     SRB pointer.
7428  *
7429  * Context:
7430  *      Kernel context.
7431  */
7432 static int
7433 ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7434     ql_srb_t *sp)
7435 {
7436         int             rval = FC_SUCCESS;
7437         time_t          poll_wait = 0;
7438         ql_lun_t        *lq = sp->lun_queue;
7439 
7440         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7441 
7442         sp->handle = 0;
7443 
7444         /* Set poll for finish. */
7445         if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7446                 sp->flags |= SRB_POLL;
7447                 if (pkt->pkt_timeout == 0) {
7448                         pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7449                 }
7450         }
7451 
7452         /* Acquire device queue lock. */
7453         DEVICE_QUEUE_LOCK(tq);
7454 
7455         /*
7456          * If we need authentication, report device busy to
7457          * upper layers to retry later
7458          */
7459         if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7460                 DEVICE_QUEUE_UNLOCK(tq);
 
7476                 sp->isp_timeout = 0;
7477         }
7478 
7479         /* If a polling command setup wait time. */
7480         if (sp->flags & SRB_POLL) {
7481                 if (sp->flags & SRB_WATCHDOG_ENABLED) {
7482                         poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7483                 } else {
7484                         poll_wait = pkt->pkt_timeout;
7485                 }
7486         }
7487 
7488         if (ha->pha->flags & ABORT_CMDS_LOOP_DOWN_TMO &&
7489             (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7490                 /* Set ending status. */
7491                 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7492 
7493                 /* Call done routine to handle completions. */
7494                 sp->cmd.next = NULL;
7495                 DEVICE_QUEUE_UNLOCK(tq);
7496                 ql_done(&sp->cmd);
7497         } else {
7498                 if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7499                         int do_lip = 0;
7500 
7501                         DEVICE_QUEUE_UNLOCK(tq);
7502 
7503                         ADAPTER_STATE_LOCK(ha);
7504                         if ((do_lip = ha->pha->lip_on_panic) == 0) {
7505                                 ha->pha->lip_on_panic++;
7506                         }
7507                         ADAPTER_STATE_UNLOCK(ha);
7508 
7509                         if (!do_lip) {
7510 
7511                                 /*
7512                                  * That Qlogic F/W performs PLOGI, PRLI, etc
7513                                  * is helpful here. If a PLOGI fails for some
7514                                  * reason, you would get CS_PORT_LOGGED_OUT
7515                                  * or some such error; and we should get a
7516                                  * careful polled mode login kicked off inside
 
7524                         }
7525 
7526                         ql_start_iocb(ha, sp);
7527                 } else {
7528                         /* Add the command to the device queue */
7529                         if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7530                                 ql_add_link_t(&lq->cmd, &sp->cmd);
7531                         } else {
7532                                 ql_add_link_b(&lq->cmd, &sp->cmd);
7533                         }
7534 
7535                         sp->flags |= SRB_IN_DEVICE_QUEUE;
7536 
7537                         /* Check whether next message can be processed */
7538                         ql_next(ha, lq);
7539                 }
7540         }
7541 
7542         /* If polling, wait for finish. */
7543         if (poll_wait) {
7544                 if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS) {
7545                         int     res;
7546 
7547                         res = ql_abort((opaque_t)ha, pkt, 0);
7548                         if (res != FC_SUCCESS && res != FC_ABORTED) {
7549                                 DEVICE_QUEUE_LOCK(tq);
7550                                 ql_remove_link(&lq->cmd, &sp->cmd);
7551                                 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7552                                 DEVICE_QUEUE_UNLOCK(tq);
7553                         }
7554                 }
7555 
7556                 if (pkt->pkt_state != FC_PKT_SUCCESS) {
7557                         EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7558                         rval = FC_TRANSPORT_ERROR;
7559                 }
7560 
7561                 if (ddi_in_panic()) {
7562                         if (pkt->pkt_state != FC_PKT_SUCCESS) {
7563                                 port_id_t d_id;
7564 
7565                                 /*
7566                                  * successful LOGIN implies by design
7567                                  * that PRLI also succeeded for disks
7568                                  * Note also that there is no special
7569                                  * mailbox command to send PRLI.
7570                                  */
7571                                 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7572                                 (void) ql_login_port(ha, d_id);
7573                         }
7574                 }
7575 
7576                 /*
7577                  * This should only happen during CPR dumping
7578                  */
7579                 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7580                     pkt->pkt_comp) {
7581                         sp->flags &= ~SRB_POLL;
7582                         (*pkt->pkt_comp)(pkt);
7583                 }
7584         }
7585 
7586         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7587 
7588         return (rval);
7589 }
7590 
7591 /*
7592  * ql_poll_cmd
7593  *      Polls commands for completion.
7594  *
7595  * Input:
7596  *      ha = adapter state pointer.
7597  *      sp = SRB command pointer.
7598  *      poll_wait = poll wait time in seconds.
7599  *
7600  * Returns:
7601  *      QL local function return status code.
7602  *
7603  * Context:
7604  *      Kernel context.
7605  */
7606 static int
7607 ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
7608 {
7609         int                     rval = QL_SUCCESS;
7610         time_t                  msecs_left = poll_wait * 100;   /* 10ms inc */
7611         ql_adapter_state_t      *ha = vha->pha;
7612 
7613         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7614 
7615         while (sp->flags & SRB_POLL) {
7616 
7617                 if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
7618                     ha->idle_timer >= 15 || ddi_in_panic()) {
7619 
7620                         /* If waiting for restart, do it now. */
7621                         if (ha->port_retry_timer != 0) {
7622                                 ADAPTER_STATE_LOCK(ha);
7623                                 ha->port_retry_timer = 0;
7624                                 ADAPTER_STATE_UNLOCK(ha);
7625 
7626                                 TASK_DAEMON_LOCK(ha);
7627                                 ha->task_daemon_flags |= PORT_RETRY_NEEDED;
7628                                 TASK_DAEMON_UNLOCK(ha);
7629                         }
7630 
7631                         if (INTERRUPT_PENDING(ha)) {
7632                                 (void) ql_isr((caddr_t)ha);
7633                                 INTR_LOCK(ha);
7634                                 ha->intr_claimed = TRUE;
7635                                 INTR_UNLOCK(ha);
7636                         }
7637 
7638                         /*
7639                          * Call task thread function in case the
7640                          * daemon is not running.
7641                          */
7642                         TASK_DAEMON_LOCK(ha);
7643 
7644                         if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
7645                             QL_TASK_PENDING(ha)) {
7646                                 ha->task_daemon_flags |= TASK_THREAD_CALLED;
7647                                 ql_task_thread(ha);
7648                                 ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7649                         }
7650 
7651                         TASK_DAEMON_UNLOCK(ha);
7652                 }
7653 
7654                 if (msecs_left < 10) {
7655                         rval = QL_FUNCTION_TIMEOUT;
7656                         break;
7657                 }
7658 
7659                 /*
7660                  * Polling interval is 10 milli seconds; Increasing
7661                  * the polling interval to seconds since disk IO
7662                  * timeout values are ~60 seconds is tempting enough,
7663                  * but CPR dump time increases, and so will the crash
7664                  * dump time; Don't toy with the settings without due
7665                  * consideration for all the scenarios that will be
7666                  * impacted.
7667                  */
7668                 ql_delay(ha, 10000);
7669                 msecs_left -= 10;
7670         }
7671 
7672         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7673 
7674         return (rval);
7675 }
7676 
7677 /*
7678  * ql_next
7679  *      Retrieve and process next job in the device queue.
7680  *
7681  * Input:
7682  *      ha:     adapter state pointer.
7683  *      lq:     LUN queue pointer.
7684  *      DEVICE_QUEUE_LOCK must be already obtained.
7685  *
7686  * Output:
7687  *      Releases DEVICE_QUEUE_LOCK upon exit.
7688  *
7689  * Context:
7690  *      Interrupt or Kernel context, no mailbox commands allowed.
7691  */
7692 void
7693 ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
7694 {
7695         ql_srb_t                *sp;
7696         ql_link_t               *link;
7697         ql_tgt_t                *tq = lq->target_queue;
7698         ql_adapter_state_t      *ha = vha->pha;
7699 
7700         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7701 
7702         if (ddi_in_panic()) {
7703                 DEVICE_QUEUE_UNLOCK(tq);
7704                 QL_PRINT_3(CE_CONT, "(%d): panic/active exit\n",
7705                     ha->instance);
7706                 return;
7707         }
7708 
7709         while ((link = lq->cmd.first) != NULL) {
7710                 sp = link->base_address;
7711 
7712                 /* Exit if can not start commands. */
7713                 if (DRIVER_SUSPENDED(ha) ||
7714                     (ha->flags & ONLINE) == 0 ||
7715                     !VALID_DEVICE_ID(ha, tq->loop_id) ||
7716                     sp->flags & SRB_ABORT ||
7717                     tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
7718                     TQF_QUEUE_SUSPENDED)) {
7719                         EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
7720                             "haf=%xh, loop_id=%xh\n", tq->d_id.b24,
7721                             ha->task_daemon_flags, tq->flags, sp->flags,
7722                             ha->flags, tq->loop_id);
7723                         break;
7724                 }
7725 
7726                 /*
7727                  * Find out the LUN number for untagged command use.
7728                  * If there is an untagged command pending for the LUN,
7729                  * we would not submit another untagged command
7730                  * or if reached LUN execution throttle.
7731                  */
7732                 if (sp->flags & SRB_FCP_CMD_PKT) {
7733                         if (lq->flags & LQF_UNTAGGED_PENDING ||
7734                             lq->lun_outcnt >= ha->execution_throttle) {
7735                                 QL_PRINT_8(CE_CONT, "(%d): break, d_id=%xh, "
7736                                     "lf=%xh, lun_outcnt=%xh\n", ha->instance,
7737                                     tq->d_id.b24, lq->flags, lq->lun_outcnt);
7738                                 break;
7739                         }
7740                         if (sp->fcp->fcp_cntl.cntl_qtype ==
7741                             FCP_QTYPE_UNTAGGED) {
7742                                 /*
7743                                  * Set the untagged-flag for the LUN
7744                                  * so that no more untagged commands
7745                                  * can be submitted for this LUN.
7746                                  */
7747                                 lq->flags |= LQF_UNTAGGED_PENDING;
7748                         }
7749 
7750                         /* Count command as sent. */
7751                         lq->lun_outcnt++;
7752                 }
7753 
7754                 /* Remove srb from device queue. */
7755                 ql_remove_link(&lq->cmd, &sp->cmd);
7756                 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7757 
7758                 tq->outcnt++;
7759 
7760                 ql_start_iocb(vha, sp);
7761         }
7762 
7763         /* Release device queue lock. */
7764         DEVICE_QUEUE_UNLOCK(tq);
7765 
7766         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7767 }
7768 
7769 /*
7770  * ql_done
7771  *      Process completed commands.
7772  *
7773  * Input:
7774  *      link:   first command link in chain.
7775  *
7776  * Context:
7777  *      Interrupt or Kernel context, no mailbox commands allowed.
7778  */
7779 void
7780 ql_done(ql_link_t *link)
7781 {
7782         ql_adapter_state_t      *ha;
7783         ql_link_t               *next_link;
7784         ql_srb_t                *sp;
7785         ql_tgt_t                *tq;
7786         ql_lun_t                *lq;
7787 
7788         QL_PRINT_3(CE_CONT, "started\n");
7789 
7790         for (; link != NULL; link = next_link) {
7791                 next_link = link->next;
7792                 sp = link->base_address;
7793                 ha = sp->ha;
7794 
7795                 if (sp->flags & SRB_UB_CALLBACK) {
7796                         QL_UB_LOCK(ha);
7797                         if (sp->flags & SRB_UB_IN_ISP) {
7798                                 if (ha->ub_outcnt != 0) {
7799                                         ha->ub_outcnt--;
7800                                 }
7801                                 QL_UB_UNLOCK(ha);
7802                                 ql_isp_rcvbuf(ha);
7803                                 QL_UB_LOCK(ha);
7804                         }
7805                         QL_UB_UNLOCK(ha);
7806                         ql_awaken_task_daemon(ha, sp, 0, 0);
7807                 } else {
7808                         /* Free outstanding command slot. */
7809                         if (sp->handle != 0) {
7810                                 ha->outstanding_cmds[
7811                                     sp->handle & OSC_INDEX_MASK] = NULL;
7812                                 sp->handle = 0;
7813                                 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
7814                         }
7815 
7816                         /* Acquire device queue lock. */
7817                         lq = sp->lun_queue;
7818                         tq = lq->target_queue;
7819                         DEVICE_QUEUE_LOCK(tq);
7820 
7821                         /* Decrement outstanding commands on device. */
7822                         if (tq->outcnt != 0) {
7823                                 tq->outcnt--;
7824                         }
7825 
7826                         if (sp->flags & SRB_FCP_CMD_PKT) {
7827                                 if (sp->fcp->fcp_cntl.cntl_qtype ==
7828                                     FCP_QTYPE_UNTAGGED) {
7829                                         /*
7830                                          * Clear the flag for this LUN so that
7831                                          * untagged commands can be submitted
7832                                          * for it.
7833                                          */
7834                                         lq->flags &= ~LQF_UNTAGGED_PENDING;
 
7857                                 EL(ha, "fast abort modify change\n");
7858                                 sp->flags &= ~(SRB_RETRY);
7859                                 sp->pkt->pkt_reason = CS_TIMEOUT;
7860                         }
7861 
7862                         /* Place request back on top of target command queue */
7863                         if ((sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7864                             !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7865                             sp->flags & SRB_RETRY &&
7866                             (sp->flags & SRB_WATCHDOG_ENABLED &&
7867                             sp->wdg_q_time > 1)) {
7868                                 sp->flags &= ~(SRB_ISP_STARTED |
7869                                     SRB_ISP_COMPLETED | SRB_RETRY);
7870 
7871                                 /* Reset watchdog timer */
7872                                 sp->wdg_q_time = sp->init_wdg_q_time;
7873 
7874                                 /* Issue marker command on reset status. */
7875                                 if (!(ha->task_daemon_flags & LOOP_DOWN) &&
7876                                     (sp->pkt->pkt_reason == CS_RESET ||
7877                                     (CFG_IST(ha, CFG_CTRL_24258081) &&
7878                                     sp->pkt->pkt_reason == CS_ABORTED))) {
7879                                         (void) ql_marker(ha, tq->loop_id, 0,
7880                                             MK_SYNC_ID);
7881                                 }
7882 
7883                                 ql_add_link_t(&lq->cmd, &sp->cmd);
7884                                 sp->flags |= SRB_IN_DEVICE_QUEUE;
7885                                 ql_next(ha, lq);
7886                         } else {
7887                                 /* Remove command from watchdog queue. */
7888                                 if (sp->flags & SRB_WATCHDOG_ENABLED) {
7889                                         ql_remove_link(&tq->wdg, &sp->wdg);
7890                                         sp->flags &= ~SRB_WATCHDOG_ENABLED;
7891                                 }
7892 
7893                                 if (lq->cmd.first != NULL) {
7894                                         ql_next(ha, lq);
7895                                 } else {
7896                                         /* Release LU queue specific lock. */
7897                                         DEVICE_QUEUE_UNLOCK(tq);
 
7899                                             NULL) {
7900                                                 ql_start_iocb(ha, NULL);
7901                                         }
7902                                 }
7903 
7904                                 /* Sync buffers if required.  */
7905                                 if (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT)) {
7906                                         (void) ddi_dma_sync(
7907                                             sp->pkt->pkt_resp_dma,
7908                                             0, 0, DDI_DMA_SYNC_FORCPU);
7909                                 }
7910 
7911                                 /* Map ISP completion codes. */
7912                                 sp->pkt->pkt_expln = FC_EXPLN_NONE;
7913                                 sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
7914                                 switch (sp->pkt->pkt_reason) {
7915                                 case CS_COMPLETE:
7916                                         sp->pkt->pkt_state = FC_PKT_SUCCESS;
7917                                         break;
7918                                 case CS_RESET:
7919                                         /* Issue marker command. */
7920                                         if (!(ha->task_daemon_flags &
7921                                             LOOP_DOWN)) {
7922                                                 (void) ql_marker(ha,
7923                                                     tq->loop_id, 0,
7924                                                     MK_SYNC_ID);
7925                                         }
7926                                         sp->pkt->pkt_state =
7927                                             FC_PKT_PORT_OFFLINE;
7928                                         sp->pkt->pkt_reason =
7929                                             FC_REASON_ABORTED;
7930                                         break;
7931                                 case CS_RESOUCE_UNAVAILABLE:
7932                                         sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
7933                                         sp->pkt->pkt_reason =
7934                                             FC_REASON_PKT_BUSY;
7935                                         break;
7936 
7937                                 case CS_TIMEOUT:
7938                                         sp->pkt->pkt_state = FC_PKT_TIMEOUT;
7939                                         sp->pkt->pkt_reason =
7940                                             FC_REASON_HW_ERROR;
7941                                         break;
7942                                 case CS_DATA_OVERRUN:
7943                                         sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7944                                         sp->pkt->pkt_reason =
7945                                             FC_REASON_OVERRUN;
 
7964                                         break;
7965 
7966                                 case CS_ABORTED:
7967                                         DEVICE_QUEUE_LOCK(tq);
7968                                         if (tq->flags & (TQF_RSCN_RCVD |
7969                                             TQF_NEED_AUTHENTICATION)) {
7970                                                 sp->pkt->pkt_state =
7971                                                     FC_PKT_PORT_OFFLINE;
7972                                                 sp->pkt->pkt_reason =
7973                                                     FC_REASON_LOGIN_REQUIRED;
7974                                         } else {
7975                                                 sp->pkt->pkt_state =
7976                                                     FC_PKT_LOCAL_RJT;
7977                                                 sp->pkt->pkt_reason =
7978                                                     FC_REASON_ABORTED;
7979                                         }
7980                                         DEVICE_QUEUE_UNLOCK(tq);
7981                                         break;
7982 
7983                                 case CS_TRANSPORT:
7984                                         sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7985                                         sp->pkt->pkt_reason =
7986                                             FC_PKT_TRAN_ERROR;
7987                                         break;
7988 
7989                                 case CS_DATA_UNDERRUN:
7990                                         sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7991                                         sp->pkt->pkt_reason =
7992                                             FC_REASON_UNDERRUN;
7993                                         break;
7994                                 case CS_DMA_ERROR:
7995                                 case CS_BAD_PAYLOAD:
7996                                 case CS_UNKNOWN:
7997                                 case CS_CMD_FAILED:
7998                                 default:
7999                                         sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
8000                                         sp->pkt->pkt_reason =
8001                                             FC_REASON_HW_ERROR;
8002                                         break;
8003                                 }
8004 
8005                                 /* Now call the pkt completion callback */
8006                                 if (sp->flags & SRB_POLL) {
8007                                         sp->flags &= ~SRB_POLL;
8008                                 } else if (sp->pkt->pkt_comp) {
8009                                         if (sp->pkt->pkt_tran_flags &
8010                                             FC_TRAN_IMMEDIATE_CB) {
8011                                                 (*sp->pkt->pkt_comp)(sp->pkt);
8012                                         } else {
8013                                                 ql_awaken_task_daemon(ha, sp,
8014                                                     0, 0);
8015                                         }
8016                                 }
8017                         }
8018                 }
8019         }
8020 
8021         QL_PRINT_3(CE_CONT, "done\n");
8022 }
8023 
8024 /*
8025  * ql_awaken_task_daemon
8026  *      Adds command completion callback to callback queue and/or
8027  *      awakens task daemon thread.
8028  *
8029  * Input:
8030  *      ha:             adapter state pointer.
8031  *      sp:             srb pointer.
8032  *      set_flags:      task daemon flags to set.
8033  *      reset_flags:    task daemon flags to reset.
8034  *
8035  * Context:
8036  *      Interrupt or Kernel context, no mailbox commands allowed.
8037  */
8038 void
8039 ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
8040     uint32_t set_flags, uint32_t reset_flags)
8041 {
8042         ql_adapter_state_t      *ha = vha->pha;
8043 
8044         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8045 
8046         /* Acquire task daemon lock. */
8047         TASK_DAEMON_LOCK(ha);
8048 
8049         if (set_flags & ISP_ABORT_NEEDED) {
8050                 if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
8051                         set_flags &= ~ISP_ABORT_NEEDED;
8052                 }
8053         }
8054 
8055         ha->task_daemon_flags |= set_flags;
8056         ha->task_daemon_flags &= ~reset_flags;
8057 
8058         if (QL_DAEMON_SUSPENDED(ha)) {
8059                 if (sp != NULL) {
8060                         TASK_DAEMON_UNLOCK(ha);
8061 
8062                         /* Do callback. */
8063                         if (sp->flags & SRB_UB_CALLBACK) {
8064                                 ql_unsol_callback(sp);
8065                         } else {
8066                                 (*sp->pkt->pkt_comp)(sp->pkt);
8067                         }
8068                 } else {
8069                         if (!(curthread->t_flag & T_INTR_THREAD) &&
8070                             !(ha->task_daemon_flags & TASK_THREAD_CALLED)) {
8071                                 ha->task_daemon_flags |= TASK_THREAD_CALLED;
8072                                 ql_task_thread(ha);
8073                                 ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
8074                         }
8075 
8076                         TASK_DAEMON_UNLOCK(ha);
8077                 }
8078         } else {
8079                 if (sp != NULL) {
8080                         ql_add_link_b(&ha->callback_queue, &sp->cmd);
8081                 }
8082 
8083                 if (ha->task_daemon_flags & TASK_DAEMON_SLEEPING_FLG) {
8084                         cv_broadcast(&ha->cv_task_daemon);
8085                 }
8086                 TASK_DAEMON_UNLOCK(ha);
8087         }
8088 
8089         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8090 }
8091 
8092 /*
8093  * ql_task_daemon
8094  *      Thread that is awaken by the driver when a
8095  *      background needs to be done.
8096  *
8097  * Input:
8098  *      arg = adapter state pointer.
8099  *
8100  * Context:
8101  *      Kernel context.
8102  */
8103 static void
8104 ql_task_daemon(void *arg)
8105 {
8106         ql_adapter_state_t      *ha = (void *)arg;
8107 
8108         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8109 
8110         CALLB_CPR_INIT(&ha->cprinfo, &ha->task_daemon_mutex, callb_generic_cpr,
8111             "ql_task_daemon");
8112 
8113         /* Acquire task daemon lock. */
8114         TASK_DAEMON_LOCK(ha);
8115 
8116         ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
8117 
8118         while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
8119                 ql_task_thread(ha);
8120 
8121                 QL_PRINT_3(CE_CONT, "(%d): Going to sleep\n", ha->instance);
8122 
8123                 /*
8124                  * Before we wait on the conditional variable, we
8125                  * need to check if STOP_FLG is set for us to terminate
8126                  */
8127                 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
8128                         break;
8129                 }
8130 
8131                 /*LINTED [Solaris CALLB_CPR_SAFE_BEGIN Lint error]*/
8132                 CALLB_CPR_SAFE_BEGIN(&ha->cprinfo);
8133 
8134                 ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
8135 
8136                 /* If killed, stop task daemon */
8137                 if (cv_wait_sig(&ha->cv_task_daemon,
8138                     &ha->task_daemon_mutex) == 0) {
8139                         ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
8140                 }
8141 
8142                 ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
8143 
8144                 /*LINTED [Solaris CALLB_CPR_SAFE_END Lint error]*/
8145                 CALLB_CPR_SAFE_END(&ha->cprinfo, &ha->task_daemon_mutex);
8146 
8147                 QL_PRINT_3(CE_CONT, "(%d): Awakened\n", ha->instance);
8148         }
8149 
8150         ha->task_daemon_flags &= ~(TASK_DAEMON_STOP_FLG |
8151             TASK_DAEMON_ALIVE_FLG);
8152 
8153         /*LINTED [Solaris CALLB_CPR_EXIT Lint error]*/
8154         CALLB_CPR_EXIT(&ha->cprinfo);
8155 
8156         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8157 
8158         thread_exit();
8159 }
8160 
8161 /*
8162  * ql_task_thread
8163  *      Thread run by daemon.
8164  *
8165  * Input:
8166  *      ha = adapter state pointer.
8167  *      TASK_DAEMON_LOCK must be acquired prior to call.
8168  *
8169  * Context:
8170  *      Kernel context.
8171  */
8172 static void
8173 ql_task_thread(ql_adapter_state_t *ha)
8174 {
8175         int                     loop_again;
8176         ql_srb_t                *sp;
8177         ql_head_t               *head;
8178         ql_link_t               *link;
8179         caddr_t                 msg;
8180         ql_adapter_state_t      *vha;
8181 
8182         do {
8183                 QL_PRINT_3(CE_CONT, "(%d): task_daemon_flags=%xh\n",
8184                     ha->instance, ha->task_daemon_flags);
8185 
8186                 loop_again = FALSE;
8187 
8188                 QL_PM_LOCK(ha);
8189                 if (ha->power_level != PM_LEVEL_D0) {
8190                         QL_PM_UNLOCK(ha);
8191                         ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8192                         break;
8193                 }
8194                 QL_PM_UNLOCK(ha);
8195 
8196                 /* IDC event. */
8197                 if (ha->task_daemon_flags & IDC_EVENT) {
8198                         ha->task_daemon_flags &= ~IDC_EVENT;
8199                         TASK_DAEMON_UNLOCK(ha);
8200                         ql_process_idc_event(ha);
8201                         TASK_DAEMON_LOCK(ha);
8202                         loop_again = TRUE;
8203                 }
8204 
8205                 if (ha->flags & ADAPTER_SUSPENDED || ha->task_daemon_flags &
8206                     (TASK_DAEMON_STOP_FLG | DRIVER_STALL) ||
8207                     (ha->flags & ONLINE) == 0) {
8208                         ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8209                         break;
8210                 }
8211                 ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
8212 
8213                 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8214                         TASK_DAEMON_UNLOCK(ha);
8215                         if (ha->log_parity_pause == B_TRUE) {
8216                                 (void) ql_flash_errlog(ha,
8217                                     FLASH_ERRLOG_PARITY_ERR, 0,
8218                                     MSW(ha->parity_stat_err),
8219                                     LSW(ha->parity_stat_err));
8220                                 ha->log_parity_pause = B_FALSE;
8221                         }
8222                         ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
8223                         TASK_DAEMON_LOCK(ha);
8224                         loop_again = TRUE;
8225                 }
8226 
8227                 /* Idle Check. */
8228                 if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
8229                         ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
8230                         if (!(ha->task_daemon_flags & QL_SUSPENDED)) {
8231                                 TASK_DAEMON_UNLOCK(ha);
8232                                 ql_idle_check(ha);
8233                                 TASK_DAEMON_LOCK(ha);
8234                                 loop_again = TRUE;
8235                         }
8236                 }
8237 
8238                 /* Crystal+ port#0 bypass transition */
8239                 if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
8240                         ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
8241                         TASK_DAEMON_UNLOCK(ha);
8242                         (void) ql_initiate_lip(ha);
8243                         TASK_DAEMON_LOCK(ha);
8244                         loop_again = TRUE;
8245                 }
8246 
8247                 /* Abort queues needed. */
8248                 if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
8249                         ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
8250                         TASK_DAEMON_UNLOCK(ha);
8251                         ql_abort_queues(ha);
8252                         TASK_DAEMON_LOCK(ha);
8253                 }
8254 
8255                 /* Not suspended, awaken waiting routines. */
8256                 if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8257                     ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
8258                         ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
8259                         cv_broadcast(&ha->cv_dr_suspended);
8260                         loop_again = TRUE;
8261                 }
8262 
8263                 /* Handle RSCN changes. */
8264                 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8265                         if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
8266                                 vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
8267                                 TASK_DAEMON_UNLOCK(ha);
8268                                 (void) ql_handle_rscn_update(vha);
8269                                 TASK_DAEMON_LOCK(ha);
8270                                 loop_again = TRUE;
8271                         }
8272                 }
8273 
8274                 /* Handle state changes. */
8275                 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8276                         if (vha->task_daemon_flags & FC_STATE_CHANGE &&
8277                             !(ha->task_daemon_flags &
8278                             TASK_DAEMON_POWERING_DOWN)) {
8279                                 /* Report state change. */
8280                                 EL(vha, "state change = %xh\n", vha->state);
8281                                 vha->task_daemon_flags &= ~FC_STATE_CHANGE;
8282 
8283                                 if (vha->task_daemon_flags &
8284                                     COMMAND_WAIT_NEEDED) {
8285                                         vha->task_daemon_flags &=
8286                                             ~COMMAND_WAIT_NEEDED;
8287                                         if (!(ha->task_daemon_flags &
8288                                             COMMAND_WAIT_ACTIVE)) {
8289                                                 ha->task_daemon_flags |=
8290                                                     COMMAND_WAIT_ACTIVE;
8291                                                 TASK_DAEMON_UNLOCK(ha);
8292                                                 ql_cmd_wait(ha);
8293                                                 TASK_DAEMON_LOCK(ha);
8294                                                 ha->task_daemon_flags &=
8295                                                     ~COMMAND_WAIT_ACTIVE;
8296                                         }
8297                                 }
8298 
8299                                 msg = NULL;
8300                                 if (FC_PORT_STATE_MASK(vha->state) ==
8301                                     FC_STATE_OFFLINE) {
8302                                         if (vha->task_daemon_flags &
8303                                             STATE_ONLINE) {
8304                                                 if (ha->topology &
8305                                                     QL_LOOP_CONNECTION) {
8306                                                         msg = "Loop OFFLINE";
8307                                                 } else {
8308                                                         msg = "Link OFFLINE";
8309                                                 }
8310                                         }
8311                                         vha->task_daemon_flags &=
8312                                             ~STATE_ONLINE;
8313                                 } else if (FC_PORT_STATE_MASK(vha->state) ==
8314                                     FC_STATE_LOOP) {
8315                                         if (!(vha->task_daemon_flags &
 
8318                                         }
8319                                         vha->task_daemon_flags |= STATE_ONLINE;
8320                                 } else if (FC_PORT_STATE_MASK(vha->state) ==
8321                                     FC_STATE_ONLINE) {
8322                                         if (!(vha->task_daemon_flags &
8323                                             STATE_ONLINE)) {
8324                                                 msg = "Link ONLINE";
8325                                         }
8326                                         vha->task_daemon_flags |= STATE_ONLINE;
8327                                 } else {
8328                                         msg = "Unknown Link state";
8329                                 }
8330 
8331                                 if (msg != NULL) {
8332                                         cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8333                                             "%s", QL_NAME, ha->instance,
8334                                             vha->vp_index, msg);
8335                                 }
8336 
8337                                 if (vha->flags & FCA_BOUND) {
8338                                         QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8339                                             "cb state=%xh\n", ha->instance,
8340                                             vha->vp_index, vha->state);
8341                                         TASK_DAEMON_UNLOCK(ha);
8342                                         (vha->bind_info.port_statec_cb)
8343                                             (vha->bind_info.port_handle,
8344                                             vha->state);
8345                                         TASK_DAEMON_LOCK(ha);
8346                                 }
8347                                 loop_again = TRUE;
8348                         }
8349                 }
8350 
8351                 if (ha->task_daemon_flags & LIP_RESET_PENDING &&
8352                     !(ha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN)) {
8353                         EL(ha, "processing LIP reset\n");
8354                         ha->task_daemon_flags &= ~LIP_RESET_PENDING;
8355                         TASK_DAEMON_UNLOCK(ha);
8356                         for (vha = ha; vha != NULL; vha = vha->vp_next) {
8357                                 if (vha->flags & FCA_BOUND) {
8358                                         QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8359                                             "cb reset\n", ha->instance,
8360                                             vha->vp_index);
8361                                         (vha->bind_info.port_statec_cb)
8362                                             (vha->bind_info.port_handle,
8363                                             FC_STATE_TARGET_PORT_RESET);
8364                                 }
8365                         }
8366                         TASK_DAEMON_LOCK(ha);
8367                         loop_again = TRUE;
8368                 }
8369 
8370                 if (QL_IS_SET(ha->task_daemon_flags, NEED_UNSOLICITED_BUFFERS |
8371                     FIRMWARE_UP)) {
8372                         /*
8373                          * The firmware needs more unsolicited
8374                          * buffers. We cannot allocate any new
8375                          * buffers unless the ULP module requests
8376                          * for new buffers. All we can do here is
8377                          * to give received buffers from the pool
8378                          * that is already allocated
8379                          */
8380                         ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8381                         TASK_DAEMON_UNLOCK(ha);
8382                         ql_isp_rcvbuf(ha);
8383                         TASK_DAEMON_LOCK(ha);
8384                         loop_again = TRUE;
8385                 }
8386 
8387                 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8388                         TASK_DAEMON_UNLOCK(ha);
8389                         (void) ql_abort_isp(ha);
8390                         TASK_DAEMON_LOCK(ha);
8391                         loop_again = TRUE;
8392                 }
8393 
8394                 if (!(ha->task_daemon_flags & (LOOP_DOWN | DRIVER_STALL |
8395                     COMMAND_WAIT_NEEDED))) {
8396                         if (QL_IS_SET(ha->task_daemon_flags,
8397                             RESET_MARKER_NEEDED | FIRMWARE_UP)) {
8398                                 ha->task_daemon_flags &= ~RESET_MARKER_NEEDED;
8399                                 if (!(ha->task_daemon_flags & RESET_ACTIVE)) {
8400                                         ha->task_daemon_flags |= RESET_ACTIVE;
8401                                         TASK_DAEMON_UNLOCK(ha);
8402                                         for (vha = ha; vha != NULL;
8403                                             vha = vha->vp_next) {
8404                                                 ql_rst_aen(vha);
8405                                         }
8406                                         TASK_DAEMON_LOCK(ha);
8407                                         ha->task_daemon_flags &= ~RESET_ACTIVE;
8408                                         loop_again = TRUE;
8409                                 }
8410                         }
8411 
8412                         if (QL_IS_SET(ha->task_daemon_flags,
8413                             LOOP_RESYNC_NEEDED | FIRMWARE_UP)) {
8414                                 if (!(ha->task_daemon_flags &
8415                                     LOOP_RESYNC_ACTIVE)) {
8416                                         ha->task_daemon_flags |=
8417                                             LOOP_RESYNC_ACTIVE;
8418                                         TASK_DAEMON_UNLOCK(ha);
8419                                         (void) ql_loop_resync(ha);
8420                                         TASK_DAEMON_LOCK(ha);
8421                                         loop_again = TRUE;
8422                                 }
8423                         }
8424                 }
8425 
8426                 /* Port retry needed. */
8427                 if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8428                         ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8429                         ADAPTER_STATE_LOCK(ha);
8430                         ha->port_retry_timer = 0;
8431                         ADAPTER_STATE_UNLOCK(ha);
8432 
8433                         TASK_DAEMON_UNLOCK(ha);
8434                         ql_restart_queues(ha);
8435                         TASK_DAEMON_LOCK(ha);
8436                         loop_again = B_TRUE;
8437                 }
8438 
8439                 /* iiDMA setting needed? */
8440                 if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8441                         ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8442 
8443                         TASK_DAEMON_UNLOCK(ha);
8444                         ql_iidma(ha);
8445                         TASK_DAEMON_LOCK(ha);
8446                         loop_again = B_TRUE;
8447                 }
8448 
8449                 if (ha->task_daemon_flags & SEND_PLOGI) {
8450                         ha->task_daemon_flags &= ~SEND_PLOGI;
8451                         TASK_DAEMON_UNLOCK(ha);
8452                         (void) ql_n_port_plogi(ha);
8453                         TASK_DAEMON_LOCK(ha);
8454                 }
8455 
8456                 head = &ha->callback_queue;
8457                 if (head->first != NULL) {
8458                         sp = head->first->base_address;
8459                         link = &sp->cmd;
8460 
8461                         /* Dequeue command. */
8462                         ql_remove_link(head, link);
8463 
8464                         /* Release task daemon lock. */
8465                         TASK_DAEMON_UNLOCK(ha);
8466 
8467                         /* Do callback. */
8468                         if (sp->flags & SRB_UB_CALLBACK) {
8469                                 ql_unsol_callback(sp);
8470                         } else {
8471                                 (*sp->pkt->pkt_comp)(sp->pkt);
8472                         }
8473 
8474                         /* Acquire task daemon lock. */
8475                         TASK_DAEMON_LOCK(ha);
8476 
8477                         loop_again = TRUE;
8478                 }
8479 
8480         } while (loop_again);
8481 }
8482 
8483 /*
8484  * ql_idle_check
8485  *      Test for adapter is alive and well.
8486  *
8487  * Input:
8488  *      ha:     adapter state pointer.
8489  *
8490  * Context:
8491  *      Kernel context.
8492  */
8493 static void
8494 ql_idle_check(ql_adapter_state_t *ha)
8495 {
8496         ddi_devstate_t  state;
8497         int             rval;
8498         ql_mbx_data_t   mr;
8499 
8500         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8501 
8502         /* Firmware Ready Test. */
8503         rval = ql_get_firmware_state(ha, &mr);
8504         if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8505             (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8506                 EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8507                 state = ddi_get_devstate(ha->dip);
8508                 if (state == DDI_DEVSTATE_UP) {
8509                         /*EMPTY*/
8510                         ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
8511                             DDI_DEVICE_FAULT, "Firmware Ready Test failed");
8512                 }
8513                 TASK_DAEMON_LOCK(ha);
8514                 if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8515                         EL(ha, "fstate_ready, isp_abort_needed\n");
8516                         ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8517                 }
8518                 TASK_DAEMON_UNLOCK(ha);
8519         }
8520 
8521         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8522 }
8523 
8524 /*
8525  * ql_unsol_callback
8526  *      Handle unsolicited buffer callbacks.
8527  *
8528  * Input:
8529  *      ha = adapter state pointer.
8530  *      sp = srb pointer.
8531  *
8532  * Context:
8533  *      Kernel context.
8534  */
8535 static void
8536 ql_unsol_callback(ql_srb_t *sp)
8537 {
8538         fc_affected_id_t        *af;
8539         fc_unsol_buf_t          *ubp;
8540         uchar_t                 r_ctl;
8541         uchar_t                 ls_code;
8542         ql_tgt_t                *tq;
8543         ql_adapter_state_t      *ha = sp->ha, *pha = sp->ha->pha;
8544 
8545         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8546 
8547         ubp = ha->ub_array[sp->handle];
8548         r_ctl = ubp->ub_frame.r_ctl;
8549         ls_code = ubp->ub_buffer[0];
8550 
8551         if (sp->lun_queue == NULL) {
8552                 tq = NULL;
8553         } else {
8554                 tq = sp->lun_queue->target_queue;
8555         }
8556 
8557         QL_UB_LOCK(ha);
8558         if (sp->flags & SRB_UB_FREE_REQUESTED ||
8559             pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8560                 sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8561                     SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8562                 sp->flags |= SRB_UB_IN_FCA;
8563                 QL_UB_UNLOCK(ha);
8564                 return;
8565         }
8566 
8567         /* Process RSCN */
8568         if (sp->flags & SRB_UB_RSCN) {
8569                 int sendup = 1;
8570 
8571                 /*
8572                  * Defer RSCN posting until commands return
8573                  */
8574                 QL_UB_UNLOCK(ha);
8575 
8576                 af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8577 
8578                 /* Abort outstanding commands */
8579                 sendup = ql_process_rscn(ha, af);
8580                 if (sendup == 0) {
8581 
8582                         TASK_DAEMON_LOCK(ha);
8583                         ql_add_link_b(&pha->callback_queue, &sp->cmd);
8584                         TASK_DAEMON_UNLOCK(ha);
8585 
8586                         /*
8587                          * Wait for commands to drain in F/W (doesn't take
8588                          * more than a few milliseconds)
8589                          */
8590                         ql_delay(ha, 10000);
8591 
8592                         QL_PRINT_2(CE_CONT, "(%d,%d): done rscn_sendup=0, "
8593                             "fmt=%xh, d_id=%xh\n", ha->instance, ha->vp_index,
8594                             af->aff_format, af->aff_d_id);
8595                         return;
8596                 }
8597 
8598                 QL_UB_LOCK(ha);
8599 
8600                 EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
8601                     af->aff_format, af->aff_d_id);
8602         }
8603 
8604         /* Process UNSOL LOGO */
8605         if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
8606                 QL_UB_UNLOCK(ha);
8607 
8608                 if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
8609                         TASK_DAEMON_LOCK(ha);
8610                         ql_add_link_b(&pha->callback_queue, &sp->cmd);
8611                         TASK_DAEMON_UNLOCK(ha);
8612                         QL_PRINT_2(CE_CONT, "(%d,%d): logo_sendup=0, d_id=%xh"
8613                             "\n", ha->instance, ha->vp_index, tq->d_id.b24);
8614                         return;
8615                 }
8616 
8617                 QL_UB_LOCK(ha);
8618                 EL(ha, "sending unsol logout for %xh to transport\n",
8619                     ubp->ub_frame.s_id);
8620         }
8621 
8622         sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
8623             SRB_UB_FCP);
8624 
8625         if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
8626                 (void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
8627                     ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
8628         }
8629         QL_UB_UNLOCK(ha);
8630 
8631         (ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
8632             ubp, sp->ub_type);
8633 
8634         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8635 }
8636 
8637 /*
8638  * ql_send_logo
8639  *
8640  * Input:
8641  *      ha:     adapter state pointer.
8642  *      tq:     target queue pointer.
8643  *      done_q: done queue pointer.
8644  *
8645  * Context:
8646  *      Interrupt or Kernel context, no mailbox commands allowed.
8647  */
8648 void
8649 ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
8650 {
8651         fc_unsol_buf_t          *ubp;
8652         ql_srb_t                *sp;
8653         la_els_logo_t           *payload;
8654         ql_adapter_state_t      *ha = vha->pha;
8655 
8656         QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
8657             tq->d_id.b24);
8658 
8659         if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == 0xffffff)) {
8660                 EL(ha, "no device, d_id=%xh\n", tq->d_id.b24);
8661                 return;
8662         }
8663 
8664         if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
8665             tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
8666 
8667                 /* Locate a buffer to use. */
8668                 ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
8669                 if (ubp == NULL) {
8670                         EL(vha, "Failed, get_unsolicited_buffer\n");
8671                         return;
8672                 }
8673 
8674                 DEVICE_QUEUE_LOCK(tq);
8675                 tq->flags |= TQF_NEED_AUTHENTICATION;
8676                 tq->logout_sent++;
8677                 DEVICE_QUEUE_UNLOCK(tq);
8678 
8679                 EL(vha, "Received LOGO from = %xh\n", tq->d_id.b24);
8680 
8681                 sp = ubp->ub_fca_private;
8682 
8683                 /* Set header. */
8684                 ubp->ub_frame.d_id = vha->d_id.b24;
8685                 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8686                 ubp->ub_frame.s_id = tq->d_id.b24;
8687                 ubp->ub_frame.rsvd = 0;
8688                 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8689                     F_CTL_SEQ_INITIATIVE;
8690                 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8691                 ubp->ub_frame.seq_cnt = 0;
8692                 ubp->ub_frame.df_ctl = 0;
8693                 ubp->ub_frame.seq_id = 0;
8694                 ubp->ub_frame.rx_id = 0xffff;
8695                 ubp->ub_frame.ox_id = 0xffff;
8696 
8697                 /* set payload. */
8698                 payload = (la_els_logo_t *)ubp->ub_buffer;
8699                 bzero(payload, sizeof (la_els_logo_t));
8700                 /* Make sure ls_code in payload is always big endian */
 
8704                 ubp->ub_buffer[3] = 0;
8705                 bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
8706                     &payload->nport_ww_name.raw_wwn[0], 8);
8707                 payload->nport_id.port_id = tq->d_id.b24;
8708 
8709                 QL_UB_LOCK(ha);
8710                 sp->flags |= SRB_UB_CALLBACK;
8711                 QL_UB_UNLOCK(ha);
8712                 if (tq->lun_queues.first != NULL) {
8713                         sp->lun_queue = (tq->lun_queues.first)->base_address;
8714                 } else {
8715                         sp->lun_queue = ql_lun_queue(vha, tq, 0);
8716                 }
8717                 if (done_q) {
8718                         ql_add_link_b(done_q, &sp->cmd);
8719                 } else {
8720                         ql_awaken_task_daemon(ha, sp, 0, 0);
8721                 }
8722         }
8723 
8724         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8725 }
8726 
8727 static int
8728 ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8729 {
8730         port_id_t       d_id;
8731         ql_srb_t        *sp;
8732         ql_link_t       *link;
8733         int             sendup = 1;
8734 
8735         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8736 
8737         DEVICE_QUEUE_LOCK(tq);
8738         if (tq->outcnt) {
8739                 DEVICE_QUEUE_UNLOCK(tq);
8740                 sendup = 0;
8741                 (void) ql_abort_device(ha, tq, 1);
8742                 ql_delay(ha, 10000);
8743         } else {
8744                 DEVICE_QUEUE_UNLOCK(tq);
8745                 TASK_DAEMON_LOCK(ha);
8746 
8747                 for (link = ha->pha->callback_queue.first; link != NULL;
8748                     link = link->next) {
8749                         sp = link->base_address;
8750                         if (sp->flags & SRB_UB_CALLBACK) {
8751                                 continue;
8752                         }
8753                         d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
8754 
8755                         if (tq->d_id.b24 == d_id.b24) {
8756                                 sendup = 0;
8757                                 break;
8758                         }
8759                 }
8760 
8761                 TASK_DAEMON_UNLOCK(ha);
8762         }
8763 
8764         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8765 
8766         return (sendup);
8767 }
8768 
8769 static int
8770 ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
8771 {
8772         fc_unsol_buf_t          *ubp;
8773         ql_srb_t                *sp;
8774         la_els_logi_t           *payload;
8775         class_svc_param_t       *class3_param;
8776 
8777         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8778 
8779         if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
8780             LOOP_DOWN)) {
8781                 EL(ha, "Failed, tqf=%xh\n", tq->flags);
8782                 return (QL_FUNCTION_FAILED);
8783         }
8784 
8785         /* Locate a buffer to use. */
8786         ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8787         if (ubp == NULL) {
8788                 EL(ha, "Failed\n");
8789                 return (QL_FUNCTION_FAILED);
8790         }
8791 
8792         QL_PRINT_3(CE_CONT, "(%d): Received LOGO from = %xh\n",
8793             ha->instance, tq->d_id.b24);
8794 
8795         EL(ha, "Emulate PLOGI from = %xh tq = %x\n", tq->d_id.b24, tq);
8796 
8797         sp = ubp->ub_fca_private;
8798 
8799         /* Set header. */
8800         ubp->ub_frame.d_id = ha->d_id.b24;
8801         ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8802         ubp->ub_frame.s_id = tq->d_id.b24;
8803         ubp->ub_frame.rsvd = 0;
8804         ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8805             F_CTL_SEQ_INITIATIVE;
8806         ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8807         ubp->ub_frame.seq_cnt = 0;
8808         ubp->ub_frame.df_ctl = 0;
8809         ubp->ub_frame.seq_id = 0;
8810         ubp->ub_frame.rx_id = 0xffff;
8811         ubp->ub_frame.ox_id = 0xffff;
8812 
8813         /* set payload. */
8814         payload = (la_els_logi_t *)ubp->ub_buffer;
8815         bzero(payload, sizeof (payload));
8816 
8817         payload->ls_code.ls_code = LA_ELS_PLOGI;
8818         payload->common_service.fcph_version = 0x2006;
8819         payload->common_service.cmn_features = 0x8800;
8820 
8821         CFG_IST(ha, CFG_CTRL_24258081) ?
8822             (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8823             ha->init_ctrl_blk.cb24.max_frame_length[0],
8824             ha->init_ctrl_blk.cb24.max_frame_length[1])) :
8825             (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8826             ha->init_ctrl_blk.cb.max_frame_length[0],
8827             ha->init_ctrl_blk.cb.max_frame_length[1]));
8828 
8829         payload->common_service.conc_sequences = 0xff;
8830         payload->common_service.relative_offset = 0x03;
8831         payload->common_service.e_d_tov = 0x7d0;
8832 
8833         bcopy((void *)&tq->port_name[0],
8834             (void *)&payload->nport_ww_name.raw_wwn[0], 8);
8835 
8836         bcopy((void *)&tq->node_name[0],
8837             (void *)&payload->node_ww_name.raw_wwn[0], 8);
8838 
8839         class3_param = (class_svc_param_t *)&payload->class_3;
8840         class3_param->class_valid_svc_opt = 0x8000;
8841         class3_param->recipient_ctl = tq->class3_recipient_ctl;
8842         class3_param->rcv_data_size = tq->class3_rcv_data_size;
8843         class3_param->conc_sequences = tq->class3_conc_sequences;
8844         class3_param->open_sequences_per_exch =
8845             tq->class3_open_sequences_per_exch;
8846 
8847         QL_UB_LOCK(ha);
8848         sp->flags |= SRB_UB_CALLBACK;
8849         QL_UB_UNLOCK(ha);
8850 
8851         ql_isp_els_handle_endian(ha, (uint8_t *)payload, LA_ELS_PLOGI);
8852 
8853         if (done_q) {
8854                 ql_add_link_b(done_q, &sp->cmd);
8855         } else {
8856                 ql_awaken_task_daemon(ha, sp, 0, 0);
8857         }
8858 
8859         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8860 
8861         return (QL_SUCCESS);
8862 }
8863 
8864 /*
8865  * Abort outstanding commands in the Firmware, clear internally
8866  * queued commands in the driver, Synchronize the target with
8867  * the Firmware
8868  */
8869 int
8870 ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
8871 {
8872         ql_link_t       *link, *link2;
8873         ql_lun_t        *lq;
8874         int             rval = QL_SUCCESS;
8875         ql_srb_t        *sp;
8876         ql_head_t       done_q = { NULL, NULL };
8877 
8878         QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
8879 
8880         /*
8881          * First clear, internally queued commands
8882          */
8883         DEVICE_QUEUE_LOCK(tq);
8884         for (link = tq->lun_queues.first; link != NULL; link = link->next) {
8885                 lq = link->base_address;
8886 
8887                 link2 = lq->cmd.first;
8888                 while (link2 != NULL) {
8889                         sp = link2->base_address;
8890                         link2 = link2->next;
8891 
8892                         if (sp->flags & SRB_ABORT) {
8893                                 continue;
8894                         }
8895 
8896                         /* Remove srb from device command queue. */
8897                         ql_remove_link(&lq->cmd, &sp->cmd);
8898                         sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8899 
8900                         /* Set ending status. */
8901                         sp->pkt->pkt_reason = CS_ABORTED;
8902 
8903                         /* Call done routine to handle completions. */
8904                         ql_add_link_b(&done_q, &sp->cmd);
8905                 }
8906         }
8907         DEVICE_QUEUE_UNLOCK(tq);
8908 
8909         if (done_q.first != NULL) {
8910                 ql_done(done_q.first);
8911         }
8912 
8913         if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
8914                 rval = ql_abort_target(ha, tq, 0);
8915         }
8916 
8917         if (rval != QL_SUCCESS) {
8918                 EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
8919         } else {
8920                 /*EMPTY*/
8921                 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
8922                     ha->vp_index);
8923         }
8924 
8925         return (rval);
8926 }
8927 
8928 /*
8929  * ql_rcv_rscn_els
8930  *      Processes received RSCN extended link service.
8931  *
8932  * Input:
8933  *      ha:     adapter state pointer.
8934  *      mb:     array containing input mailbox registers.
8935  *      done_q: done queue pointer.
8936  *
8937  * Context:
8938  *      Interrupt or Kernel context, no mailbox commands allowed.
8939  */
8940 void
8941 ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
8942 {
8943         fc_unsol_buf_t          *ubp;
8944         ql_srb_t                *sp;
8945         fc_rscn_t               *rn;
8946         fc_affected_id_t        *af;
8947         port_id_t               d_id;
8948 
8949         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8950 
8951         /* Locate a buffer to use. */
8952         ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8953         if (ubp != NULL) {
8954                 sp = ubp->ub_fca_private;
8955 
8956                 /* Set header. */
8957                 ubp->ub_frame.d_id = ha->d_id.b24;
8958                 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8959                 ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
8960                 ubp->ub_frame.rsvd = 0;
8961                 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8962                     F_CTL_SEQ_INITIATIVE;
8963                 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8964                 ubp->ub_frame.seq_cnt = 0;
8965                 ubp->ub_frame.df_ctl = 0;
8966                 ubp->ub_frame.seq_id = 0;
8967                 ubp->ub_frame.rx_id = 0xffff;
8968                 ubp->ub_frame.ox_id = 0xffff;
8969 
 
8978                 d_id.b.area = MSB(mb[2]);
8979                 d_id.b.domain = LSB(mb[1]);
8980                 af->aff_d_id = d_id.b24;
8981                 af->aff_format = MSB(mb[1]);
8982 
8983                 EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
8984                     af->aff_d_id);
8985 
8986                 ql_update_rscn(ha, af);
8987 
8988                 QL_UB_LOCK(ha);
8989                 sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
8990                 QL_UB_UNLOCK(ha);
8991                 ql_add_link_b(done_q, &sp->cmd);
8992         }
8993 
8994         if (ubp == NULL) {
8995                 EL(ha, "Failed, get_unsolicited_buffer\n");
8996         } else {
8997                 /*EMPTY*/
8998                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8999         }
9000 }
9001 
9002 /*
9003  * ql_update_rscn
9004  *      Update devices from received RSCN.
9005  *
9006  * Input:
9007  *      ha:     adapter state pointer.
9008  *      af:     pointer to RSCN data.
9009  *
9010  * Context:
9011  *      Interrupt or Kernel context, no mailbox commands allowed.
9012  */
9013 static void
9014 ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9015 {
9016         ql_link_t       *link;
9017         uint16_t        index;
9018         ql_tgt_t        *tq;
9019 
9020         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9021 
9022         if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9023                 port_id_t d_id;
9024 
9025                 d_id.r.rsvd_1 = 0;
9026                 d_id.b24 = af->aff_d_id;
9027 
9028                 tq = ql_d_id_to_queue(ha, d_id);
9029                 if (tq) {
9030                         EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
9031                         DEVICE_QUEUE_LOCK(tq);
9032                         tq->flags |= TQF_RSCN_RCVD;
9033                         DEVICE_QUEUE_UNLOCK(tq);
9034                 }
9035                 QL_PRINT_3(CE_CONT, "(%d): FC_RSCN_PORT_ADDRESS done\n",
9036                     ha->instance);
9037 
9038                 return;
9039         }
9040 
9041         for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9042                 for (link = ha->dev[index].first; link != NULL;
9043                     link = link->next) {
9044                         tq = link->base_address;
9045 
9046                         switch (af->aff_format) {
9047                         case FC_RSCN_FABRIC_ADDRESS:
9048                                 if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9049                                         EL(ha, "SD_RSCN_RCVD %xh RFA\n",
9050                                             tq->d_id.b24);
9051                                         DEVICE_QUEUE_LOCK(tq);
9052                                         tq->flags |= TQF_RSCN_RCVD;
9053                                         DEVICE_QUEUE_UNLOCK(tq);
9054                                 }
9055                                 break;
9056 
9057                         case FC_RSCN_AREA_ADDRESS:
9058                                 if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
9059                                         EL(ha, "SD_RSCN_RCVD %xh RAA\n",
9060                                             tq->d_id.b24);
9061                                         DEVICE_QUEUE_LOCK(tq);
9062                                         tq->flags |= TQF_RSCN_RCVD;
9063                                         DEVICE_QUEUE_UNLOCK(tq);
9064                                 }
9065                                 break;
9066 
9067                         case FC_RSCN_DOMAIN_ADDRESS:
9068                                 if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
9069                                         EL(ha, "SD_RSCN_RCVD %xh RDA\n",
9070                                             tq->d_id.b24);
9071                                         DEVICE_QUEUE_LOCK(tq);
9072                                         tq->flags |= TQF_RSCN_RCVD;
9073                                         DEVICE_QUEUE_UNLOCK(tq);
9074                                 }
9075                                 break;
9076 
9077                         default:
9078                                 break;
9079                         }
9080                 }
9081         }
9082         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9083 }
9084 
9085 /*
9086  * ql_process_rscn
9087  *
9088  * Input:
9089  *      ha:     adapter state pointer.
9090  *      af:     RSCN payload pointer.
9091  *
9092  * Context:
9093  *      Kernel context.
9094  */
9095 static int
9096 ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9097 {
9098         int             sendit;
9099         int             sendup = 1;
9100         ql_link_t       *link;
9101         uint16_t        index;
9102         ql_tgt_t        *tq;
9103 
9104         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9105 
9106         if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9107                 port_id_t d_id;
9108 
9109                 d_id.r.rsvd_1 = 0;
9110                 d_id.b24 = af->aff_d_id;
9111 
9112                 tq = ql_d_id_to_queue(ha, d_id);
9113                 if (tq) {
9114                         sendup = ql_process_rscn_for_device(ha, tq);
9115                 }
9116 
9117                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9118 
9119                 return (sendup);
9120         }
9121 
9122         for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9123                 for (link = ha->dev[index].first; link != NULL;
9124                     link = link->next) {
9125 
9126                         tq = link->base_address;
9127                         if (tq == NULL) {
9128                                 continue;
9129                         }
9130 
9131                         switch (af->aff_format) {
9132                         case FC_RSCN_FABRIC_ADDRESS:
9133                                 if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9134                                         sendit = ql_process_rscn_for_device(
9135                                             ha, tq);
9136                                         if (sendup) {
9137                                                 sendup = sendit;
 
9152                                 break;
9153 
9154                         case FC_RSCN_DOMAIN_ADDRESS:
9155                                 if ((tq->d_id.b24 & 0xff0000) ==
9156                                     af->aff_d_id) {
9157                                         sendit = ql_process_rscn_for_device(
9158                                             ha, tq);
9159 
9160                                         if (sendup) {
9161                                                 sendup = sendit;
9162                                         }
9163                                 }
9164                                 break;
9165 
9166                         default:
9167                                 break;
9168                         }
9169                 }
9170         }
9171 
9172         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9173 
9174         return (sendup);
9175 }
9176 
9177 /*
9178  * ql_process_rscn_for_device
9179  *
9180  * Input:
9181  *      ha:     adapter state pointer.
9182  *      tq:     target queue pointer.
9183  *
9184  * Context:
9185  *      Kernel context.
9186  */
9187 static int
9188 ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9189 {
9190         int sendup = 1;
9191 
9192         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9193 
9194         DEVICE_QUEUE_LOCK(tq);
9195 
9196         /*
9197          * Let FCP-2 compliant devices continue I/Os
9198          * with their low level recoveries.
9199          */
9200         if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
9201             (tq->prli_svc_param_word_3 & PRLI_W3_RETRY)) {
9202                 /*
9203                  * Cause ADISC to go out
9204                  */
9205                 DEVICE_QUEUE_UNLOCK(tq);
9206 
9207                 (void) ql_get_port_database(ha, tq, PDF_NONE);
9208 
9209                 DEVICE_QUEUE_LOCK(tq);
9210                 tq->flags &= ~TQF_RSCN_RCVD;
9211 
9212         } else if (tq->loop_id != PORT_NO_LOOP_ID) {
9213                 if (tq->d_id.b24 != BROADCAST_ADDR) {
9214                         tq->flags |= TQF_NEED_AUTHENTICATION;
9215                 }
9216 
9217                 DEVICE_QUEUE_UNLOCK(tq);
9218 
9219                 (void) ql_abort_device(ha, tq, 1);
9220 
9221                 DEVICE_QUEUE_LOCK(tq);
9222 
9223                 if (tq->outcnt) {
9224                         sendup = 0;
9225                 } else {
9226                         tq->flags &= ~TQF_RSCN_RCVD;
9227                 }
9228         } else {
9229                 tq->flags &= ~TQF_RSCN_RCVD;
9230         }
9231 
9232         if (sendup) {
9233                 if (tq->d_id.b24 != BROADCAST_ADDR) {
9234                         tq->flags |= TQF_NEED_AUTHENTICATION;
9235                 }
9236         }
9237 
9238         DEVICE_QUEUE_UNLOCK(tq);
9239 
9240         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9241 
9242         return (sendup);
9243 }
9244 
9245 static int
9246 ql_handle_rscn_update(ql_adapter_state_t *ha)
9247 {
9248         int                     rval;
9249         ql_tgt_t                *tq;
9250         uint16_t                index, loop_id;
9251         ql_dev_id_list_t        *list;
9252         uint32_t                list_size;
9253         port_id_t               d_id;
9254         ql_mbx_data_t           mr;
9255         ql_head_t               done_q = { NULL, NULL };
9256 
9257         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9258 
9259         list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
9260         list = kmem_zalloc(list_size, KM_SLEEP);
9261         if (list == NULL) {
9262                 rval = QL_MEMORY_ALLOC_FAILED;
9263                 EL(ha, "kmem_zalloc failed=%xh\n", rval);
9264                 return (rval);
9265         }
9266 
9267         /*
9268          * Get data from RISC code d_id list to init each device queue.
9269          */
9270         rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
9271         if (rval != QL_SUCCESS) {
9272                 kmem_free(list, list_size);
9273                 EL(ha, "get_id_list failed=%xh\n", rval);
9274                 return (rval);
9275         }
9276 
9277         /* Acquire adapter state lock. */
9278         ADAPTER_STATE_LOCK(ha);
9279 
9280         /* Check for new devices */
9281         for (index = 0; index < mr.mb[1]; index++) {
9282                 ql_dev_list(ha, list, index, &d_id, &loop_id);
9283 
9284                 if (VALID_DEVICE_ID(ha, loop_id)) {
9285                         d_id.r.rsvd_1 = 0;
9286 
9287                         tq = ql_d_id_to_queue(ha, d_id);
9288                         if (tq != NULL) {
9289                                 continue;
9290                         }
9291 
9292                         tq = ql_dev_init(ha, d_id, loop_id);
9293 
9294                         /* Test for fabric device. */
9295                         if (d_id.b.domain != ha->d_id.b.domain ||
9296                             d_id.b.area != ha->d_id.b.area) {
9297                                 tq->flags |= TQF_FABRIC_DEVICE;
9298                         }
9299 
9300                         ADAPTER_STATE_UNLOCK(ha);
9301                         if (ql_get_port_database(ha, tq, PDF_NONE) !=
9302                             QL_SUCCESS) {
9303                                 tq->loop_id = PORT_NO_LOOP_ID;
9304                         }
9305                         ADAPTER_STATE_LOCK(ha);
9306 
9307                         /*
9308                          * Send up a PLOGI about the new device
9309                          */
9310                         if (VALID_DEVICE_ID(ha, tq->loop_id)) {
9311                                 (void) ql_send_plogi(ha, tq, &done_q);
9312                         }
9313                 }
9314         }
9315 
9316         /* Release adapter state lock. */
9317         ADAPTER_STATE_UNLOCK(ha);
9318 
9319         if (done_q.first != NULL) {
9320                 ql_done(done_q.first);
9321         }
9322 
9323         kmem_free(list, list_size);
9324 
9325         if (rval != QL_SUCCESS) {
9326                 EL(ha, "failed=%xh\n", rval);
9327         } else {
9328                 /*EMPTY*/
9329                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9330         }
9331 
9332         return (rval);
9333 }
9334 
9335 /*
9336  * ql_free_unsolicited_buffer
9337  *      Frees allocated buffer.
9338  *
9339  * Input:
9340  *      ha = adapter state pointer.
9341  *      index = buffer array index.
9342  *      ADAPTER_STATE_LOCK must be already obtained.
9343  *
9344  * Context:
9345  *      Kernel context.
9346  */
9347 static void
9348 ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9349 {
9350         ql_srb_t        *sp;
9351         int             status;
9352 
9353         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9354 
9355         sp = ubp->ub_fca_private;
9356         if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9357                 /* Disconnect IP from system buffers. */
9358                 if (ha->flags & IP_INITIALIZED) {
9359                         ADAPTER_STATE_UNLOCK(ha);
9360                         status = ql_shutdown_ip(ha);
9361                         ADAPTER_STATE_LOCK(ha);
9362                         if (status != QL_SUCCESS) {
9363                                 cmn_err(CE_WARN,
9364                                     "!Qlogic %s(%d): Failed to shutdown IP",
9365                                     QL_NAME, ha->instance);
9366                                 return;
9367                         }
9368 
9369                         ha->flags &= ~IP_ENABLED;
9370                 }
9371 
9372                 ql_free_phys(ha, &sp->ub_buffer);
9373         } else {
9374                 kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9375         }
9376 
9377         kmem_free(sp, sizeof (ql_srb_t));
9378         kmem_free(ubp, sizeof (fc_unsol_buf_t));
9379 
9380         if (ha->ub_allocated != 0) {
9381                 ha->ub_allocated--;
9382         }
9383 
9384         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9385 }
9386 
9387 /*
9388  * ql_get_unsolicited_buffer
9389  *      Locates a free unsolicited buffer.
9390  *
9391  * Input:
9392  *      ha = adapter state pointer.
9393  *      type = buffer type.
9394  *
9395  * Returns:
9396  *      Unsolicited buffer pointer.
9397  *
9398  * Context:
9399  *      Interrupt or Kernel context, no mailbox commands allowed.
9400  */
9401 fc_unsol_buf_t *
9402 ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9403 {
9404         fc_unsol_buf_t  *ubp;
9405         ql_srb_t        *sp;
9406         uint16_t        index;
9407 
9408         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9409 
9410         /* Locate a buffer to use. */
9411         ubp = NULL;
9412 
9413         QL_UB_LOCK(ha);
9414         for (index = 0; index < QL_UB_LIMIT; index++) {
9415                 ubp = ha->ub_array[index];
9416                 if (ubp != NULL) {
9417                         sp = ubp->ub_fca_private;
9418                         if ((sp->ub_type == type) &&
9419                             (sp->flags & SRB_UB_IN_FCA) &&
9420                             (!(sp->flags & (SRB_UB_CALLBACK |
9421                             SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9422                                 sp->flags |= SRB_UB_ACQUIRED;
9423                                 ubp->ub_resp_flags = 0;
9424                                 break;
9425                         }
9426                         ubp = NULL;
9427                 }
9428         }
9429         QL_UB_UNLOCK(ha);
9430 
9431         if (ubp) {
9432                 ubp->ub_resp_token = NULL;
9433                 ubp->ub_class = FC_TRAN_CLASS3;
9434         }
9435 
9436         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9437 
9438         return (ubp);
9439 }
9440 
9441 /*
9442  * ql_ub_frame_hdr
9443  *      Processes received unsolicited buffers from ISP.
9444  *
9445  * Input:
9446  *      ha:     adapter state pointer.
9447  *      tq:     target queue pointer.
9448  *      index:  unsolicited buffer array index.
9449  *      done_q: done queue pointer.
9450  *
9451  * Returns:
9452  *      ql local function return status code.
9453  *
9454  * Context:
9455  *      Interrupt or Kernel context, no mailbox commands allowed.
9456  */
9457 int
9458 ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9459     ql_head_t *done_q)
9460 {
9461         fc_unsol_buf_t  *ubp;
9462         ql_srb_t        *sp;
9463         uint16_t        loop_id;
9464         int             rval = QL_FUNCTION_FAILED;
9465 
9466         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9467 
9468         QL_UB_LOCK(ha);
9469         if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9470                 EL(ha, "Invalid buffer index=%xh\n", index);
9471                 QL_UB_UNLOCK(ha);
9472                 return (rval);
9473         }
9474 
9475         sp = ubp->ub_fca_private;
9476         if (sp->flags & SRB_UB_FREE_REQUESTED) {
9477                 EL(ha, "buffer freed index=%xh\n", index);
9478                 sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9479                     SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9480 
9481                 sp->flags |= SRB_UB_IN_FCA;
9482 
9483                 QL_UB_UNLOCK(ha);
9484                 return (rval);
9485         }
9486 
9487         if ((sp->handle == index) &&
9488             (sp->flags & SRB_UB_IN_ISP) &&
9489             (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9490             (!(sp->flags & SRB_UB_ACQUIRED))) {
9491                 /* set broadcast D_ID */
9492                 loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
9493                     BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9494                 if (tq->ub_loop_id == loop_id) {
9495                         if (ha->topology & QL_FL_PORT) {
9496                                 ubp->ub_frame.d_id = 0x000000;
9497                         } else {
9498                                 ubp->ub_frame.d_id = 0xffffff;
9499                         }
9500                 } else {
9501                         ubp->ub_frame.d_id = ha->d_id.b24;
9502                 }
9503                 ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9504                 ubp->ub_frame.rsvd = 0;
9505                 ubp->ub_frame.s_id = tq->d_id.b24;
9506                 ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9507                 ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9508                 ubp->ub_frame.df_ctl = 0;
9509                 ubp->ub_frame.seq_id = tq->ub_seq_id;
9510                 ubp->ub_frame.rx_id = 0xffff;
9511                 ubp->ub_frame.ox_id = 0xffff;
9512                 ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9513                     sp->ub_size : tq->ub_sequence_length;
9514                 ubp->ub_frame.ro = tq->ub_frame_ro;
9515 
9516                 tq->ub_sequence_length = (uint16_t)
9517                     (tq->ub_sequence_length - ubp->ub_bufsize);
9518                 tq->ub_frame_ro += ubp->ub_bufsize;
9519                 tq->ub_seq_cnt++;
9520 
9521                 if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9522                         if (tq->ub_seq_cnt == 1) {
9523                                 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9524                                     F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9525                         } else {
9526                                 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9527                                     F_CTL_END_SEQ;
9528                         }
9529                         tq->ub_total_seg_cnt = 0;
9530                 } else if (tq->ub_seq_cnt == 1) {
9531                         ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9532                             F_CTL_FIRST_SEQ;
9533                         ubp->ub_frame.df_ctl = 0x20;
9534                 }
9535 
9536                 QL_PRINT_3(CE_CONT, "(%d): ub_frame.d_id=%xh\n",
9537                     ha->instance, ubp->ub_frame.d_id);
9538                 QL_PRINT_3(CE_CONT, "(%d): ub_frame.s_id=%xh\n",
9539                     ha->instance, ubp->ub_frame.s_id);
9540                 QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_cnt=%xh\n",
9541                     ha->instance, ubp->ub_frame.seq_cnt);
9542                 QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_id=%xh\n",
9543                     ha->instance, ubp->ub_frame.seq_id);
9544                 QL_PRINT_3(CE_CONT, "(%d): ub_frame.ro=%xh\n",
9545                     ha->instance, ubp->ub_frame.ro);
9546                 QL_PRINT_3(CE_CONT, "(%d): ub_frame.f_ctl=%xh\n",
9547                     ha->instance, ubp->ub_frame.f_ctl);
9548                 QL_PRINT_3(CE_CONT, "(%d): ub_bufsize=%xh\n",
9549                     ha->instance, ubp->ub_bufsize);
9550                 QL_DUMP_3(ubp->ub_buffer, 8,
9551                     ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9552 
9553                 sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9554                 ql_add_link_b(done_q, &sp->cmd);
9555                 rval = QL_SUCCESS;
9556         } else {
9557                 if (sp->handle != index) {
9558                         EL(ha, "Bad index=%xh, expect=%xh\n", index,
9559                             sp->handle);
9560                 }
9561                 if ((sp->flags & SRB_UB_IN_ISP) == 0) {
9562                         EL(ha, "buffer was already in driver, index=%xh\n",
9563                             index);
9564                 }
9565                 if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
9566                         EL(ha, "buffer was not an IP buffer, index=%xh\n",
9567                             index);
9568                 }
9569                 if (sp->flags & SRB_UB_ACQUIRED) {
9570                         EL(ha, "buffer was being used by driver, index=%xh\n",
9571                             index);
9572                 }
9573         }
9574         QL_UB_UNLOCK(ha);
9575 
9576         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9577 
9578         return (rval);
9579 }
9580 
9581 /*
9582  * ql_timer
9583  *      One second timer function.
9584  *
9585  * Input:
9586  *      ql_hba.first = first link in adapter list.
9587  *
9588  * Context:
9589  *      Interrupt context, no mailbox commands allowed.
9590  */
9591 static void
9592 ql_timer(void *arg)
9593 {
9594         ql_link_t               *link;
9595         uint32_t                set_flags;
9596         uint32_t                reset_flags;
9597         ql_adapter_state_t      *ha = NULL, *vha;
9598 
9599         QL_PRINT_6(CE_CONT, "started\n");
9600 
9601         /* Acquire global state lock. */
9602         GLOBAL_STATE_LOCK();
9603         if (ql_timer_timeout_id == NULL) {
9604                 /* Release global state lock. */
9605                 GLOBAL_STATE_UNLOCK();
9606                 return;
9607         }
9608 
9609         for (link = ql_hba.first; link != NULL; link = link->next) {
9610                 ha = link->base_address;
9611 
9612                 /* Skip adapter if suspended of stalled. */
9613                 ADAPTER_STATE_LOCK(ha);
9614                 if (ha->flags & ADAPTER_SUSPENDED ||
9615                     ha->task_daemon_flags & DRIVER_STALL) {
9616                         ADAPTER_STATE_UNLOCK(ha);
9617                         continue;
9618                 }
9619                 ha->flags |= ADAPTER_TIMER_BUSY;
9620                 ADAPTER_STATE_UNLOCK(ha);
9621 
9622                 QL_PM_LOCK(ha);
9623                 if (ha->power_level != PM_LEVEL_D0) {
9624                         QL_PM_UNLOCK(ha);
9625 
9626                         ADAPTER_STATE_LOCK(ha);
9627                         ha->flags &= ~ADAPTER_TIMER_BUSY;
9628                         ADAPTER_STATE_UNLOCK(ha);
9629                         continue;
9630                 }
9631                 ha->busy++;
9632                 QL_PM_UNLOCK(ha);
9633 
9634                 set_flags = 0;
9635                 reset_flags = 0;
9636 
9637                 /* Port retry timer handler. */
9638                 if (LOOP_READY(ha)) {
9639                         ADAPTER_STATE_LOCK(ha);
9640                         if (ha->port_retry_timer != 0) {
9641                                 ha->port_retry_timer--;
9642                                 if (ha->port_retry_timer == 0) {
9643                                         set_flags |= PORT_RETRY_NEEDED;
9644                                 }
9645                         }
9646                         ADAPTER_STATE_UNLOCK(ha);
9647                 }
9648 
9649                 /* Loop down timer handler. */
9650                 if (LOOP_RECONFIGURE(ha) == 0) {
9651                         if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
9652                                 ha->loop_down_timer--;
9653                                 /*
9654                                  * give the firmware loop down dump flag
9655                                  * a chance to work.
9656                                  */
9657                                 if (ha->loop_down_timer == LOOP_DOWN_RESET) {
9658                                         if (CFG_IST(ha,
9659                                             CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
9660                                                 (void) ql_binary_fw_dump(ha,
9661                                                     TRUE);
9662                                         }
9663                                         EL(ha, "loop_down_reset, "
9664                                             "isp_abort_needed\n");
9665                                         set_flags |= ISP_ABORT_NEEDED;
9666                                 }
9667                         }
9668                         if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
9669                                 /* Command abort time handler. */
9670                                 if (ha->loop_down_timer ==
9671                                     ha->loop_down_abort_time) {
9672                                         ADAPTER_STATE_LOCK(ha);
9673                                         ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
9674                                         ADAPTER_STATE_UNLOCK(ha);
9675                                         set_flags |= ABORT_QUEUES_NEEDED;
9676                                         EL(ha, "loop_down_abort_time, "
9677                                             "abort_queues_needed\n");
9678                                 }
9679 
9680                                 /* Watchdog timer handler. */
9681                                 if (ha->watchdog_timer == 0) {
9682                                         ha->watchdog_timer = WATCHDOG_TIME;
9683                                 } else if (LOOP_READY(ha)) {
9684                                         ha->watchdog_timer--;
9685                                         if (ha->watchdog_timer == 0) {
9686                                                 for (vha = ha; vha != NULL;
9687                                                     vha = vha->vp_next) {
9688                                                         ql_watchdog(vha,
9689                                                             &set_flags,
9690                                                             &reset_flags);
9691                                                 }
9692                                                 ha->watchdog_timer =
9693                                                     WATCHDOG_TIME;
9694                                         }
9695                                 }
9696                         }
9697                 }
9698 
9699                 /* Idle timer handler. */
9700                 if (!DRIVER_SUSPENDED(ha)) {
9701                         if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
9702 #if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
9703                                 set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
9704 #endif
9705                                 ha->idle_timer = 0;
9706                         }
9707                         if (ha->send_plogi_timer != NULL) {
9708                                 ha->send_plogi_timer--;
9709                                 if (ha->send_plogi_timer == NULL) {
9710                                         set_flags |= SEND_PLOGI;
9711                                 }
9712                         }
9713                 }
9714                 ADAPTER_STATE_LOCK(ha);
9715                 if (ha->idc_restart_timer != 0) {
9716                         ha->idc_restart_timer--;
9717                         if (ha->idc_restart_timer == 0) {
9718                                 ha->idc_restart_cnt = 0;
9719                                 reset_flags |= DRIVER_STALL;
9720                         }
9721                 }
9722                 if (ha->idc_flash_acc_timer != 0) {
9723                         ha->idc_flash_acc_timer--;
9724                         if (ha->idc_flash_acc_timer == 0 &&
9725                             ha->idc_flash_acc != 0) {
9726                                 ha->idc_flash_acc = 1;
9727                                 ha->idc_mb[0] = MBA_IDC_NOTIFICATION;
9728                                 ha->idc_mb[1] = 0;
9729                                 ha->idc_mb[2] = IDC_OPC_DRV_START;
9730                                 set_flags |= IDC_EVENT;
9731                         }
9732                 }
9733                 ADAPTER_STATE_UNLOCK(ha);
9734 
9735                 if (set_flags != 0 || reset_flags != 0) {
9736                         ql_awaken_task_daemon(ha, NULL, set_flags,
9737                             reset_flags);
9738                 }
9739 
9740                 if (ha->xioctl->ledstate.BeaconState == BEACON_ON) {
9741                         ql_blink_led(ha);
9742                 }
9743 
9744                 /* Update the IO stats */
9745                 if (ha->xioctl->IOInputByteCnt >= 0x100000) {
9746                         ha->xioctl->IOInputMByteCnt +=
9747                             (ha->xioctl->IOInputByteCnt / 0x100000);
9748                         ha->xioctl->IOInputByteCnt %= 0x100000;
9749                 }
9750 
9751                 if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
9752                         ha->xioctl->IOOutputMByteCnt +=
9753                             (ha->xioctl->IOOutputByteCnt / 0x100000);
9754                         ha->xioctl->IOOutputByteCnt %= 0x100000;
9755                 }
9756 
9757                 if (CFG_IST(ha, CFG_CTRL_8021)) {
9758                         (void) ql_8021_idc_handler(ha);
9759                 }
9760 
9761                 ADAPTER_STATE_LOCK(ha);
9762                 ha->flags &= ~ADAPTER_TIMER_BUSY;
9763                 ADAPTER_STATE_UNLOCK(ha);
9764 
9765                 QL_PM_LOCK(ha);
9766                 ha->busy--;
9767                 QL_PM_UNLOCK(ha);
9768         }
9769 
9770         /* Restart timer, if not being stopped. */
9771         if (ql_timer_timeout_id != NULL) {
9772                 ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
9773         }
9774 
9775         /* Release global state lock. */
9776         GLOBAL_STATE_UNLOCK();
9777 
9778         QL_PRINT_6(CE_CONT, "done\n");
9779 }
9780 
9781 /*
9782  * ql_timeout_insert
9783  *      Function used to insert a command block onto the
9784  *      watchdog timer queue.
9785  *
9786  *      Note: Must insure that pkt_time is not zero
9787  *                      before calling ql_timeout_insert.
9788  *
9789  * Input:
9790  *      ha:     adapter state pointer.
9791  *      tq:     target queue pointer.
9792  *      sp:     SRB pointer.
9793  *      DEVICE_QUEUE_LOCK must be already obtained.
9794  *
9795  * Context:
9796  *      Kernel context.
9797  */
9798 /* ARGSUSED */
9799 static void
9800 ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
9801 {
9802         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9803 
9804         if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
9805                 sp->isp_timeout = (uint16_t)(sp->pkt->pkt_timeout);
9806                 /*
9807                  * The WATCHDOG_TIME must be rounded up + 1.  As an example,
9808                  * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
9809                  * will expire in the next watchdog call, which could be in
9810                  * 1 microsecond.
9811                  *
9812                  */
9813                 sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
9814                     WATCHDOG_TIME;
9815                 /*
9816                  * Added an additional 10 to account for the
9817                  * firmware timer drift which can occur with
9818                  * very long timeout values.
9819                  */
9820                 sp->wdg_q_time += 10;
9821 
9822                 /*
9823                  * Add 6 more to insure watchdog does not timeout at the same
9824                  * time as ISP RISC code timeout.
9825                  */
9826                 sp->wdg_q_time += 6;
9827 
9828                 /* Save initial time for resetting watchdog time. */
9829                 sp->init_wdg_q_time = sp->wdg_q_time;
9830 
9831                 /* Insert command onto watchdog queue. */
9832                 ql_add_link_b(&tq->wdg, &sp->wdg);
9833 
9834                 sp->flags |= SRB_WATCHDOG_ENABLED;
9835         } else {
9836                 sp->isp_timeout = 0;
9837                 sp->wdg_q_time = 0;
9838                 sp->init_wdg_q_time = 0;
9839         }
9840 
9841         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9842 }
9843 
9844 /*
9845  * ql_watchdog
9846  *      Timeout handler that runs in interrupt context. The
9847  *      ql_adapter_state_t * argument is the parameter set up when the
9848  *      timeout was initialized (state structure pointer).
9849  *      Function used to update timeout values and if timeout
9850  *      has occurred command will be aborted.
9851  *
9852  * Input:
9853  *      ha:             adapter state pointer.
9854  *      set_flags:      task daemon flags to set.
9855  *      reset_flags:    task daemon flags to reset.
9856  *
9857  * Context:
9858  *      Interrupt context, no mailbox commands allowed.
9859  */
9860 static void
9861 ql_watchdog(ql_adapter_state_t *ha, uint32_t *set_flags, uint32_t *reset_flags)
9862 {
9863         ql_srb_t        *sp;
9864         ql_link_t       *link;
9865         ql_link_t       *next_cmd;
9866         ql_link_t       *next_device;
9867         ql_tgt_t        *tq;
9868         ql_lun_t        *lq;
9869         uint16_t        index;
9870         int             q_sane;
9871 
9872         QL_PRINT_6(CE_CONT, "(%d): started\n", ha->instance);
9873 
9874         /* Loop through all targets. */
9875         for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9876                 for (link = ha->dev[index].first; link != NULL;
9877                     link = next_device) {
9878                         tq = link->base_address;
9879 
9880                         /* Try to acquire device queue lock. */
9881                         if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
9882                                 next_device = NULL;
9883                                 continue;
9884                         }
9885 
9886                         next_device = link->next;
9887 
9888                         if (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) &&
9889                             (tq->port_down_retry_count == 0)) {
9890                                 /* Release device queue lock. */
9891                                 DEVICE_QUEUE_UNLOCK(tq);
9892                                 continue;
9893                         }
9894 
9895                         /* Find out if this device is in a sane state. */
9896                         if (tq->flags & (TQF_RSCN_RCVD |
9897                             TQF_NEED_AUTHENTICATION | TQF_QUEUE_SUSPENDED)) {
9898                                 q_sane = 0;
9899                         } else {
9900                                 q_sane = 1;
9901                         }
9902                         /* Loop through commands on watchdog queue. */
9903                         for (link = tq->wdg.first; link != NULL;
9904                             link = next_cmd) {
9905                                 next_cmd = link->next;
9906                                 sp = link->base_address;
9907                                 lq = sp->lun_queue;
9908 
9909                                 /*
9910                                  * For SCSI commands, if everything seems to
9911                                  * be going fine and this packet is stuck
9912                                  * because of throttling at LUN or target
9913                                  * level then do not decrement the
9914                                  * sp->wdg_q_time
9915                                  */
9916                                 if (ha->task_daemon_flags & STATE_ONLINE &&
9917                                     (sp->flags & SRB_ISP_STARTED) == 0 &&
9918                                     q_sane && sp->flags & SRB_FCP_CMD_PKT &&
9919                                     lq->lun_outcnt >= ha->execution_throttle) {
9920                                         continue;
9921                                 }
9922 
9923                                 if (sp->wdg_q_time != 0) {
9924                                         sp->wdg_q_time--;
9925 
9926                                         /* Timeout? */
9927                                         if (sp->wdg_q_time != 0) {
9928                                                 continue;
9929                                         }
9930 
9931                                         ql_remove_link(&tq->wdg, &sp->wdg);
9932                                         sp->flags &= ~SRB_WATCHDOG_ENABLED;
9933 
9934                                         if (sp->flags & SRB_ISP_STARTED) {
9935                                                 ql_cmd_timeout(ha, tq, sp,
9936                                                     set_flags, reset_flags);
9937 
9938                                                 DEVICE_QUEUE_UNLOCK(tq);
9939                                                 tq = NULL;
9940                                                 next_cmd = NULL;
9941                                                 next_device = NULL;
9942                                                 index = DEVICE_HEAD_LIST_SIZE;
9943                                         } else {
9944                                                 ql_cmd_timeout(ha, tq, sp,
9945                                                     set_flags, reset_flags);
9946                                         }
9947                                 }
9948                         }
9949 
9950                         /* Release device queue lock. */
9951                         if (tq != NULL) {
9952                                 DEVICE_QUEUE_UNLOCK(tq);
9953                         }
9954                 }
9955         }
9956 
9957         QL_PRINT_6(CE_CONT, "(%d): done\n", ha->instance);
9958 }
9959 
9960 /*
9961  * ql_cmd_timeout
9962  *      Command timeout handler.
9963  *
9964  * Input:
9965  *      ha:             adapter state pointer.
9966  *      tq:             target queue pointer.
9967  *      sp:             SRB pointer.
9968  *      set_flags:      task daemon flags to set.
9969  *      reset_flags:    task daemon flags to reset.
9970  *
9971  * Context:
9972  *      Interrupt context, no mailbox commands allowed.
9973  */
9974 /* ARGSUSED */
9975 static void
9976 ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp,
9977     uint32_t *set_flags, uint32_t *reset_flags)
9978 {
9979         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9980 
9981         if (!(sp->flags & SRB_ISP_STARTED)) {
9982 
9983                 EL(ha, "command timed out in driver = %ph\n", (void *)sp);
9984 
9985                 REQUEST_RING_LOCK(ha);
9986 
9987                 /* if it's on a queue */
9988                 if (sp->cmd.head) {
9989                         /*
9990                          * The pending_cmds que needs to be
9991                          * protected by the ring lock
9992                          */
9993                         ql_remove_link(sp->cmd.head, &sp->cmd);
9994                 }
9995                 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
9996 
9997                 /* Release device queue lock. */
9998                 REQUEST_RING_UNLOCK(ha);
9999                 DEVICE_QUEUE_UNLOCK(tq);
10000 
10001                 /* Set timeout status */
10002                 sp->pkt->pkt_reason = CS_TIMEOUT;
10003 
10004                 /* Ensure no retry */
10005                 sp->flags &= ~SRB_RETRY;
10006 
10007                 /* Call done routine to handle completion. */
10008                 ql_done(&sp->cmd);
10009 
10010                 DEVICE_QUEUE_LOCK(tq);
10011         } else if (CFG_IST(ha, CFG_CTRL_8021)) {
10012                 int             rval;
10013                 uint32_t        index;
10014 
10015                 EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10016                     "spf=%xh\n", (void *)sp,
10017                     (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10018                     sp->handle & OSC_INDEX_MASK, sp->flags);
10019 
10020                 DEVICE_QUEUE_UNLOCK(tq);
10021 
10022                 INTR_LOCK(ha);
10023                 ha->pha->xioctl->ControllerErrorCount++;
10024                 if (sp->handle) {
10025                         ha->pha->timeout_cnt++;
10026                         index = sp->handle & OSC_INDEX_MASK;
10027                         if (ha->pha->outstanding_cmds[index] == sp) {
10028                                 sp->request_ring_ptr->entry_type =
10029                                     INVALID_ENTRY_TYPE;
10030                                 sp->request_ring_ptr->entry_count = 0;
10031                                 ha->pha->outstanding_cmds[index] = 0;
10032                         }
10033                         INTR_UNLOCK(ha);
10034 
10035                         rval = ql_abort_command(ha, sp);
10036                         if (rval == QL_FUNCTION_TIMEOUT ||
10037                             rval == QL_LOCK_TIMEOUT ||
10038                             rval == QL_FUNCTION_PARAMETER_ERROR ||
10039                             ha->pha->timeout_cnt > TIMEOUT_THRESHOLD) {
10040                                 *set_flags |= ISP_ABORT_NEEDED;
10041                                 EL(ha, "abort status=%xh, tc=%xh, isp_abort_"
10042                                     "needed\n", rval, ha->pha->timeout_cnt);
10043                         }
10044 
10045                         sp->handle = 0;
10046                         sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10047                 } else {
10048                         INTR_UNLOCK(ha);
10049                 }
10050 
10051                 /* Set timeout status */
10052                 sp->pkt->pkt_reason = CS_TIMEOUT;
10053 
10054                 /* Ensure no retry */
10055                 sp->flags &= ~SRB_RETRY;
10056 
10057                 /* Call done routine to handle completion. */
10058                 ql_done(&sp->cmd);
10059 
10060                 DEVICE_QUEUE_LOCK(tq);
10061 
10062         } else {
10063                 EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10064                     "spf=%xh, isp_abort_needed\n", (void *)sp,
10065                     (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10066                     sp->handle & OSC_INDEX_MASK, sp->flags);
10067 
10068                 /* Release device queue lock. */
10069                 DEVICE_QUEUE_UNLOCK(tq);
10070 
10071                 INTR_LOCK(ha);
10072                 ha->pha->xioctl->ControllerErrorCount++;
10073                 INTR_UNLOCK(ha);
10074 
10075                 /* Set ISP needs to be reset */
10076                 sp->flags |= SRB_COMMAND_TIMEOUT;
10077 
10078                 if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
10079                         (void) ql_binary_fw_dump(ha, TRUE);
10080                 }
10081 
10082                 *set_flags |= ISP_ABORT_NEEDED;
10083 
10084                 DEVICE_QUEUE_LOCK(tq);
10085         }
10086 
10087         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10088 }
10089 
10090 /*
10091  * ql_rst_aen
10092  *      Processes asynchronous reset.
10093  *
10094  * Input:
10095  *      ha = adapter state pointer.
10096  *
10097  * Context:
10098  *      Kernel context.
10099  */
10100 static void
10101 ql_rst_aen(ql_adapter_state_t *ha)
10102 {
10103         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10104 
10105         /* Issue marker command. */
10106         (void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
10107 
10108         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10109 }
10110 
10111 /*
10112  * ql_cmd_wait
10113  *      Stall driver until all outstanding commands are returned.
10114  *
10115  * Input:
10116  *      ha = adapter state pointer.
10117  *
10118  * Context:
10119  *      Kernel context.
10120  */
10121 void
10122 ql_cmd_wait(ql_adapter_state_t *ha)
10123 {
10124         uint16_t                index;
10125         ql_link_t               *link;
10126         ql_tgt_t                *tq;
10127         ql_adapter_state_t      *vha;
10128 
10129         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10130 
10131         /* Wait for all outstanding commands to be returned. */
10132         (void) ql_wait_outstanding(ha);
10133 
10134         /*
10135          * clear out internally queued commands
10136          */
10137         for (vha = ha; vha != NULL; vha = vha->vp_next) {
10138                 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10139                         for (link = vha->dev[index].first; link != NULL;
10140                             link = link->next) {
10141                                 tq = link->base_address;
10142                                 if (tq &&
10143                                     (!(tq->prli_svc_param_word_3 &
10144                                     PRLI_W3_RETRY))) {
10145                                         (void) ql_abort_device(vha, tq, 0);
10146                                 }
10147                         }
10148                 }
10149         }
10150 
10151         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10152 }
10153 
10154 /*
10155  * ql_wait_outstanding
10156  *      Wait for all outstanding commands to complete.
10157  *
10158  * Input:
10159  *      ha = adapter state pointer.
10160  *
10161  * Returns:
10162  *      index - the index for ql_srb into outstanding_cmds.
10163  *
10164  * Context:
10165  *      Kernel context.
10166  */
10167 static uint16_t
10168 ql_wait_outstanding(ql_adapter_state_t *ha)
10169 {
10170         ql_srb_t        *sp;
10171         uint16_t        index, count;
10172 
10173         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10174 
10175         count = ql_osc_wait_count;
10176         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10177                 if (ha->pha->pending_cmds.first != NULL) {
10178                         ql_start_iocb(ha, NULL);
10179                         index = 1;
10180                 }
10181                 if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
10182                     (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
10183                         if (count-- != 0) {
10184                                 ql_delay(ha, 10000);
10185                                 index = 0;
10186                         } else {
10187                                 EL(ha, "failed, sp=%ph, oci=%d, hdl=%xh\n",
10188                                     (void *)sp, index, sp->handle);
10189                                 break;
10190                         }
10191                 }
10192         }
10193 
10194         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10195 
10196         return (index);
10197 }
10198 
10199 /*
10200  * ql_restart_queues
10201  *      Restart device queues.
10202  *
10203  * Input:
10204  *      ha = adapter state pointer.
10205  *      DEVICE_QUEUE_LOCK must be released.
10206  *
10207  * Context:
10208  *      Interrupt or Kernel context, no mailbox commands allowed.
10209  */
10210 static void
10211 ql_restart_queues(ql_adapter_state_t *ha)
10212 {
10213         ql_link_t               *link, *link2;
10214         ql_tgt_t                *tq;
10215         ql_lun_t                *lq;
10216         uint16_t                index;
10217         ql_adapter_state_t      *vha;
10218 
10219         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10220 
10221         for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10222                 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10223                         for (link = vha->dev[index].first; link != NULL;
10224                             link = link->next) {
10225                                 tq = link->base_address;
10226 
10227                                 /* Acquire device queue lock. */
10228                                 DEVICE_QUEUE_LOCK(tq);
10229 
10230                                 tq->flags &= ~TQF_QUEUE_SUSPENDED;
10231 
10232                                 for (link2 = tq->lun_queues.first;
10233                                     link2 != NULL; link2 = link2->next) {
10234                                         lq = link2->base_address;
10235 
10236                                         if (lq->cmd.first != NULL) {
10237                                                 ql_next(vha, lq);
10238                                                 DEVICE_QUEUE_LOCK(tq);
10239                                         }
10240                                 }
10241 
10242                                 /* Release device queue lock. */
10243                                 DEVICE_QUEUE_UNLOCK(tq);
10244                         }
10245                 }
10246         }
10247 
10248         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10249 }
10250 
10251 /*
10252  * ql_iidma
10253  *      Setup iiDMA parameters to firmware
10254  *
10255  * Input:
10256  *      ha = adapter state pointer.
10257  *      DEVICE_QUEUE_LOCK must be released.
10258  *
10259  * Context:
10260  *      Interrupt or Kernel context, no mailbox commands allowed.
10261  */
10262 static void
10263 ql_iidma(ql_adapter_state_t *ha)
10264 {
10265         ql_link_t       *link;
10266         ql_tgt_t        *tq;
10267         uint16_t        index;
10268         char            buf[256];
10269         uint32_t        data;
10270 
10271         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10272 
10273         if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
10274                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10275                 return;
10276         }
10277 
10278         for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10279                 for (link = ha->dev[index].first; link != NULL;
10280                     link = link->next) {
10281                         tq = link->base_address;
10282 
10283                         /* Acquire device queue lock. */
10284                         DEVICE_QUEUE_LOCK(tq);
10285 
10286                         if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
10287                                 DEVICE_QUEUE_UNLOCK(tq);
10288                                 continue;
10289                         }
10290 
10291                         tq->flags &= ~TQF_IIDMA_NEEDED;
10292 
10293                         if ((tq->loop_id > LAST_N_PORT_HDL) ||
10294                             (tq->iidma_rate == IIDMA_RATE_NDEF)) {
10295                                 DEVICE_QUEUE_UNLOCK(tq);
10296                                 continue;
10297                         }
10298 
10299                         /* Get the iiDMA persistent data */
10300                         if (tq->iidma_rate == IIDMA_RATE_INIT) {
10301                                 (void) sprintf(buf,
10302                                     "iidma-rate-%02x%02x%02x%02x%02x"
10303                                     "%02x%02x%02x", tq->port_name[0],
10304                                     tq->port_name[1], tq->port_name[2],
10305                                     tq->port_name[3], tq->port_name[4],
10306                                     tq->port_name[5], tq->port_name[6],
10307                                     tq->port_name[7]);
10308 
10309                                 if ((data = ql_get_prop(ha, buf)) ==
10310                                     0xffffffff) {
10311                                         tq->iidma_rate = IIDMA_RATE_NDEF;
10312                                 } else {
10313                                         switch (data) {
10314                                         case IIDMA_RATE_1GB:
10315                                         case IIDMA_RATE_2GB:
10316                                         case IIDMA_RATE_4GB:
10317                                         case IIDMA_RATE_10GB:
10318                                                 tq->iidma_rate = data;
10319                                                 break;
10320                                         case IIDMA_RATE_8GB:
10321                                                 if (CFG_IST(ha,
10322                                                     CFG_CTRL_25XX)) {
10323                                                         tq->iidma_rate = data;
10324                                                 } else {
10325                                                         tq->iidma_rate =
10326                                                             IIDMA_RATE_4GB;
10327                                                 }
10328                                                 break;
10329                                         default:
10330                                                 EL(ha, "invalid data for "
10331                                                     "parameter: %s: %xh\n",
10332                                                     buf, data);
10333                                                 tq->iidma_rate =
10334                                                     IIDMA_RATE_NDEF;
10335                                                 break;
10336                                         }
10337                                 }
10338                         }
10339 
10340                         /* Set the firmware's iiDMA rate */
10341                         if (tq->iidma_rate <= IIDMA_RATE_MAX &&
10342                             !(CFG_IST(ha, CFG_CTRL_8081))) {
10343                                 data = ql_iidma_rate(ha, tq->loop_id,
10344                                     &tq->iidma_rate, EXT_IIDMA_MODE_SET);
10345                                 if (data != QL_SUCCESS) {
10346                                         EL(ha, "mbx failed: %xh\n", data);
10347                                 }
10348                         }
10349 
10350                         /* Release device queue lock. */
10351                         DEVICE_QUEUE_UNLOCK(tq);
10352                 }
10353         }
10354 
10355         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10356 }
10357 
10358 /*
10359  * ql_abort_queues
10360  *      Abort all commands on device queues.
10361  *
10362  * Input:
10363  *      ha = adapter state pointer.
10364  *
10365  * Context:
10366  *      Interrupt or Kernel context, no mailbox commands allowed.
10367  */
10368 static void
10369 ql_abort_queues(ql_adapter_state_t *ha)
10370 {
10371         ql_link_t               *link;
10372         ql_tgt_t                *tq;
10373         ql_srb_t                *sp;
10374         uint16_t                index;
10375         ql_adapter_state_t      *vha;
10376 
10377         QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10378 
10379         /* Return all commands in outstanding command list. */
10380         INTR_LOCK(ha);
10381 
10382         /* Place all commands in outstanding cmd list on device queue. */
10383         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10384                 if (ha->pending_cmds.first != NULL) {
10385                         INTR_UNLOCK(ha);
10386                         ql_start_iocb(ha, NULL);
10387                         /* Delay for system */
10388                         ql_delay(ha, 10000);
10389                         INTR_LOCK(ha);
10390                         index = 1;
10391                 }
10392                 sp = ha->outstanding_cmds[index];
10393 
10394                 /* skip devices capable of FCP2 retrys */
10395                 if ((sp != NULL) &&
10396                     ((tq = sp->lun_queue->target_queue) != NULL) &&
10397                     (!(tq->prli_svc_param_word_3 & PRLI_W3_RETRY))) {
10398                         ha->outstanding_cmds[index] = NULL;
10399                         sp->handle = 0;
10400                         sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10401 
10402                         INTR_UNLOCK(ha);
10403 
10404                         /* Set ending status. */
10405                         sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10406                         sp->flags |= SRB_ISP_COMPLETED;
10407 
10408                         /* Call done routine to handle completions. */
10409                         sp->cmd.next = NULL;
10410                         ql_done(&sp->cmd);
10411 
10412                         INTR_LOCK(ha);
10413                 }
10414         }
10415         INTR_UNLOCK(ha);
10416 
10417         for (vha = ha; vha != NULL; vha = vha->vp_next) {
10418                 QL_PRINT_10(CE_CONT, "(%d,%d): abort instance\n",
10419                     vha->instance, vha->vp_index);
10420                 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10421                         for (link = vha->dev[index].first; link != NULL;
10422                             link = link->next) {
10423                                 tq = link->base_address;
10424                                 /* skip devices capable of FCP2 retrys */
10425                                 if (!(tq->prli_svc_param_word_3 &
10426                                     PRLI_W3_RETRY)) {
10427                                         /*
10428                                          * Set port unavailable status and
10429                                          * return all commands on a devices
10430                                          * queues.
10431                                          */
10432                                         ql_abort_device_queues(ha, tq);
10433                                 }
10434                         }
10435                 }
10436         }
10437         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10438 }
10439 
10440 /*
10441  * ql_abort_device_queues
10442  *      Abort all commands on device queues.
10443  *
10444  * Input:
10445  *      ha = adapter state pointer.
10446  *
10447  * Context:
10448  *      Interrupt or Kernel context, no mailbox commands allowed.
10449  */
10450 static void
10451 ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq)
10452 {
10453         ql_link_t       *lun_link, *cmd_link;
10454         ql_srb_t        *sp;
10455         ql_lun_t        *lq;
10456 
10457         QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10458 
10459         DEVICE_QUEUE_LOCK(tq);
10460 
10461         for (lun_link = tq->lun_queues.first; lun_link != NULL;
10462             lun_link = lun_link->next) {
10463                 lq = lun_link->base_address;
10464 
10465                 cmd_link = lq->cmd.first;
10466                 while (cmd_link != NULL) {
10467                         sp = cmd_link->base_address;
10468 
10469                         if (sp->flags & SRB_ABORT) {
10470                                 cmd_link = cmd_link->next;
10471                                 continue;
10472                         }
10473 
10474                         /* Remove srb from device cmd queue. */
10475                         ql_remove_link(&lq->cmd, &sp->cmd);
10476 
10477                         sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10478 
10479                         DEVICE_QUEUE_UNLOCK(tq);
10480 
10481                         /* Set ending status. */
10482                         sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10483 
10484                         /* Call done routine to handle completion. */
10485                         ql_done(&sp->cmd);
10486 
10487                         /* Delay for system */
10488                         ql_delay(ha, 10000);
10489 
10490                         DEVICE_QUEUE_LOCK(tq);
10491                         cmd_link = lq->cmd.first;
10492                 }
10493         }
10494         DEVICE_QUEUE_UNLOCK(tq);
10495 
10496         QL_PRINT_10(CE_CONT, "(%d): done\n", ha->instance);
10497 }
10498 
10499 /*
10500  * ql_loop_resync
10501  *      Resync with fibre channel devices.
10502  *
10503  * Input:
10504  *      ha = adapter state pointer.
10505  *      DEVICE_QUEUE_LOCK must be released.
10506  *
10507  * Returns:
10508  *      ql local function return status code.
10509  *
10510  * Context:
10511  *      Kernel context.
10512  */
10513 static int
10514 ql_loop_resync(ql_adapter_state_t *ha)
10515 {
10516         int rval;
10517 
10518         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10519 
10520         if (ha->flags & IP_INITIALIZED) {
10521                 (void) ql_shutdown_ip(ha);
10522         }
10523 
10524         rval = ql_fw_ready(ha, 10);
10525 
10526         TASK_DAEMON_LOCK(ha);
10527         ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10528         TASK_DAEMON_UNLOCK(ha);
10529 
10530         /* Set loop online, if it really is. */
10531         if (rval == QL_SUCCESS) {
10532                 ql_loop_online(ha);
10533                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10534         } else {
10535                 EL(ha, "failed, rval = %xh\n", rval);
10536         }
10537 
10538         return (rval);
10539 }
10540 
10541 /*
10542  * ql_loop_online
10543  *      Set loop online status if it really is online.
10544  *
10545  * Input:
10546  *      ha = adapter state pointer.
10547  *      DEVICE_QUEUE_LOCK must be released.
10548  *
10549  * Context:
10550  *      Kernel context.
10551  */
10552 void
10553 ql_loop_online(ql_adapter_state_t *ha)
10554 {
10555         ql_adapter_state_t      *vha;
10556 
10557         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10558 
10559         /* Inform the FC Transport that the hardware is online. */
10560         for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10561                 if (!(vha->task_daemon_flags &
10562                     (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10563                         /* Restart IP if it was shutdown. */
10564                         if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10565                             !(vha->flags & IP_INITIALIZED)) {
10566                                 (void) ql_initialize_ip(vha);
10567                                 ql_isp_rcvbuf(vha);
10568                         }
10569 
10570                         if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10571                             FC_PORT_STATE_MASK(vha->state) !=
10572                             FC_STATE_ONLINE) {
10573                                 vha->state = FC_PORT_SPEED_MASK(vha->state);
10574                                 if (vha->topology & QL_LOOP_CONNECTION) {
10575                                         vha->state |= FC_STATE_LOOP;
10576                                 } else {
10577                                         vha->state |= FC_STATE_ONLINE;
10578                                 }
10579                                 TASK_DAEMON_LOCK(ha);
10580                                 vha->task_daemon_flags |= FC_STATE_CHANGE;
10581                                 TASK_DAEMON_UNLOCK(ha);
10582                         }
10583                 }
10584         }
10585 
10586         ql_awaken_task_daemon(ha, NULL, 0, 0);
10587 
10588         /* Restart device queues that may have been stopped. */
10589         ql_restart_queues(ha);
10590 
10591         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10592 }
10593 
10594 /*
10595  * ql_fca_handle_to_state
10596  *      Verifies handle to be correct.
10597  *
10598  * Input:
10599  *      fca_handle = pointer to state structure.
10600  *
10601  * Returns:
10602  *      NULL = failure
10603  *
10604  * Context:
10605  *      Kernel context.
10606  */
10607 static ql_adapter_state_t *
10608 ql_fca_handle_to_state(opaque_t fca_handle)
10609 {
10610 #ifdef  QL_DEBUG_ROUTINES
10611         ql_link_t               *link;
10612         ql_adapter_state_t      *ha = NULL;
10613         ql_adapter_state_t      *vha = NULL;
10614 
10615         for (link = ql_hba.first; link != NULL; link = link->next) {
10616                 ha = link->base_address;
10617                 for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
10618                         if ((opaque_t)vha == fca_handle) {
10619                                 ha = vha;
10620                                 break;
10621                         }
10622                 }
10623                 if ((opaque_t)ha == fca_handle) {
10624                         break;
10625                 } else {
10626                         ha = NULL;
10627                 }
10628         }
10629 
10630         if (ha == NULL) {
10631                 /*EMPTY*/
10632                 QL_PRINT_2(CE_CONT, "failed\n");
10633         }
10634 
10635 #endif /* QL_DEBUG_ROUTINES */
10636 
10637         return ((ql_adapter_state_t *)fca_handle);
10638 }
10639 
10640 /*
10641  * ql_d_id_to_queue
10642  *      Locate device queue that matches destination ID.
10643  *
10644  * Input:
10645  *      ha = adapter state pointer.
10646  *      d_id = destination ID
10647  *
10648  * Returns:
10649  *      NULL = failure
10650  *
10651  * Context:
10652  *      Interrupt or Kernel context, no mailbox commands allowed.
 
10709 /*
10710  * ql_kstat_update
10711  *      Updates kernel statistics.
10712  *
10713  * Input:
10714  *      ksp - driver kernel statistics structure pointer.
10715  *      rw - function to perform
10716  *
10717  * Returns:
10718  *      0 or EACCES
10719  *
10720  * Context:
10721  *      Kernel context.
10722  */
10723 /* ARGSUSED */
10724 static int
10725 ql_kstat_update(kstat_t *ksp, int rw)
10726 {
10727         int                     rval;
10728 
10729         QL_PRINT_3(CE_CONT, "started\n");
10730 
10731         if (rw == KSTAT_WRITE) {
10732                 rval = EACCES;
10733         } else {
10734                 rval = 0;
10735         }
10736 
10737         if (rval != 0) {
10738                 /*EMPTY*/
10739                 QL_PRINT_2(CE_CONT, "failed, rval = %xh\n", rval);
10740         } else {
10741                 /*EMPTY*/
10742                 QL_PRINT_3(CE_CONT, "done\n");
10743         }
10744         return (rval);
10745 }
10746 
10747 /*
10748  * ql_load_flash
10749  *      Loads flash.
10750  *
10751  * Input:
10752  *      ha:     adapter state pointer.
10753  *      dp:     data pointer.
10754  *      size:   data length.
10755  *
10756  * Returns:
10757  *      ql local function return status code.
10758  *
10759  * Context:
10760  *      Kernel context.
10761  */
10762 int
10763 ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
10764 {
10765         uint32_t        cnt;
10766         int             rval;
10767         uint32_t        size_to_offset;
10768         uint32_t        size_to_compare;
10769         int             erase_all;
10770 
10771         if (CFG_IST(ha, CFG_CTRL_24258081)) {
10772                 return (ql_24xx_load_flash(ha, dp, size, 0));
10773         }
10774 
10775         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10776 
10777         size_to_compare = 0x20000;
10778         size_to_offset = 0;
10779         erase_all = 0;
10780         if (CFG_IST(ha, CFG_SBUS_CARD)) {
10781                 if (size == 0x80000) {
10782                         /* Request to flash the entire chip. */
10783                         size_to_compare = 0x80000;
10784                         erase_all = 1;
10785                 } else {
10786                         size_to_compare = 0x40000;
10787                         if (ql_flash_sbus_fpga) {
10788                                 size_to_offset = 0x40000;
10789                         }
10790                 }
10791         }
10792         if (size > size_to_compare) {
10793                 rval = QL_FUNCTION_PARAMETER_ERROR;
10794                 EL(ha, "failed=%xh\n", rval);
10795                 return (rval);
10796         }
10797 
10798         GLOBAL_HW_LOCK();
10799 
10800         /* Enable Flash Read/Write. */
10801         ql_flash_enable(ha);
10802 
10803         /* Erase flash prior to write. */
10804         rval = ql_erase_flash(ha, erase_all);
10805 
10806         if (rval == QL_SUCCESS) {
10807                 /* Write data to flash. */
10808                 for (cnt = 0; cnt < size; cnt++) {
10809                         /* Allow other system activity. */
10810                         if (cnt % 0x1000 == 0) {
10811                                 ql_delay(ha, 10000);
10812                         }
10813                         rval = ql_program_flash_address(ha,
10814                             cnt + size_to_offset, *dp++);
10815                         if (rval != QL_SUCCESS) {
10816                                 break;
10817                         }
10818                 }
10819         }
10820 
10821         ql_flash_disable(ha);
10822 
10823         GLOBAL_HW_UNLOCK();
10824 
10825         if (rval != QL_SUCCESS) {
10826                 EL(ha, "failed=%xh\n", rval);
10827         } else {
10828                 /*EMPTY*/
10829                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10830         }
10831         return (rval);
10832 }
10833 
10834 /*
10835  * ql_program_flash_address
10836  *      Program flash address.
10837  *
10838  * Input:
10839  *      ha = adapter state pointer.
10840  *      addr = flash byte address.
10841  *      data = data to be written to flash.
10842  *
10843  * Returns:
10844  *      ql local function return status code.
10845  *
10846  * Context:
10847  *      Kernel context.
10848  */
10849 static int
10850 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10851 {
10852         int rval;
10853 
10854         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10855 
10856         if (CFG_IST(ha, CFG_SBUS_CARD)) {
10857                 ql_write_flash_byte(ha, 0x5555, 0xa0);
10858                 ql_write_flash_byte(ha, addr, data);
10859         } else {
10860                 /* Write Program Command Sequence */
10861                 ql_write_flash_byte(ha, 0x5555, 0xaa);
10862                 ql_write_flash_byte(ha, 0x2aaa, 0x55);
10863                 ql_write_flash_byte(ha, 0x5555, 0xa0);
10864                 ql_write_flash_byte(ha, addr, data);
10865         }
10866 
10867         /* Wait for write to complete. */
10868         rval = ql_poll_flash(ha, addr, data);
10869 
10870         if (rval != QL_SUCCESS) {
10871                 EL(ha, "failed=%xh\n", rval);
10872         } else {
10873                 /*EMPTY*/
10874                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10875         }
10876         return (rval);
10877 }
10878 
10879 /*
10880  * ql_erase_flash
10881  *      Erases entire flash.
10882  *
10883  * Input:
10884  *      ha = adapter state pointer.
10885  *
10886  * Returns:
10887  *      ql local function return status code.
10888  *
10889  * Context:
10890  *      Kernel context.
10891  */
10892 int
10893 ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
10894 {
10895         int             rval;
10896         uint32_t        erase_delay = 2000000;
10897         uint32_t        sStartAddr;
10898         uint32_t        ssize;
10899         uint32_t        cnt;
10900         uint8_t         *bfp;
10901         uint8_t         *tmp;
10902 
10903         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10904 
10905         if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10906 
10907                 if (ql_flash_sbus_fpga == 1) {
10908                         ssize = QL_SBUS_FCODE_SIZE;
10909                         sStartAddr = QL_FCODE_OFFSET;
10910                 } else {
10911                         ssize = QL_FPGA_SIZE;
10912                         sStartAddr = QL_FPGA_OFFSET;
10913                 }
10914 
10915                 erase_delay = 20000000;
10916 
10917                 bfp = (uint8_t *)kmem_zalloc(ssize, KM_SLEEP);
10918 
10919                 /* Save the section of flash we're not updating to buffer */
10920                 tmp = bfp;
10921                 for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10922                         /* Allow other system activity. */
10923                         if (cnt % 0x1000 == 0) {
10924                                 ql_delay(ha, 10000);
10925                         }
10926                         *tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
10927                 }
10928         }
10929 
10930         /* Chip Erase Command Sequence */
10931         ql_write_flash_byte(ha, 0x5555, 0xaa);
10932         ql_write_flash_byte(ha, 0x2aaa, 0x55);
10933         ql_write_flash_byte(ha, 0x5555, 0x80);
10934         ql_write_flash_byte(ha, 0x5555, 0xaa);
10935         ql_write_flash_byte(ha, 0x2aaa, 0x55);
10936         ql_write_flash_byte(ha, 0x5555, 0x10);
10937 
10938         ql_delay(ha, erase_delay);
10939 
10940         /* Wait for erase to complete. */
10941         rval = ql_poll_flash(ha, 0, 0x80);
10942 
10943         if (rval != QL_SUCCESS) {
10944                 EL(ha, "failed=%xh\n", rval);
10945                 if (CFG_IST(ha, CFG_SBUS_CARD)) {
10946                         kmem_free(bfp, ssize);
10947                 }
10948                 return (rval);
10949         }
10950 
10951         /* restore the section we saved in the buffer */
10952         if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10953                 /* Restore the section we saved off */
10954                 tmp = bfp;
10955                 for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10956                         /* Allow other system activity. */
10957                         if (cnt % 0x1000 == 0) {
10958                                 ql_delay(ha, 10000);
10959                         }
10960                         rval = ql_program_flash_address(ha, cnt, *tmp++);
10961                         if (rval != QL_SUCCESS) {
10962                                 break;
10963                         }
10964                 }
10965 
10966                 kmem_free(bfp, ssize);
10967         }
10968 
10969         if (rval != QL_SUCCESS) {
10970                 EL(ha, "failed=%xh\n", rval);
10971         } else {
10972                 /*EMPTY*/
10973                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10974         }
10975         return (rval);
10976 }
10977 
10978 /*
10979  * ql_poll_flash
10980  *      Polls flash for completion.
10981  *
10982  * Input:
10983  *      ha = adapter state pointer.
10984  *      addr = flash byte address.
10985  *      data = data to be polled.
10986  *
10987  * Returns:
10988  *      ql local function return status code.
10989  *
10990  * Context:
10991  *      Kernel context.
10992  */
10993 int
10994 ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
10995 {
10996         uint8_t         flash_data;
10997         uint32_t        cnt;
10998         int             rval = QL_FUNCTION_FAILED;
10999 
11000         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11001 
11002         poll_data = (uint8_t)(poll_data & BIT_7);
11003 
11004         /* Wait for 30 seconds for command to finish. */
11005         for (cnt = 30000000; cnt; cnt--) {
11006                 flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
11007 
11008                 if ((flash_data & BIT_7) == poll_data) {
11009                         rval = QL_SUCCESS;
11010                         break;
11011                 }
11012                 if (flash_data & BIT_5 && cnt > 2) {
11013                         cnt = 2;
11014                 }
11015                 drv_usecwait(1);
11016         }
11017 
11018         if (rval != QL_SUCCESS) {
11019                 EL(ha, "failed=%xh\n", rval);
11020         } else {
11021                 /*EMPTY*/
11022                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11023         }
11024         return (rval);
11025 }
11026 
11027 /*
11028  * ql_flash_enable
11029  *      Setup flash for reading/writing.
11030  *
11031  * Input:
11032  *      ha = adapter state pointer.
11033  *
11034  * Context:
11035  *      Kernel context.
11036  */
11037 void
11038 ql_flash_enable(ql_adapter_state_t *ha)
11039 {
11040         uint16_t        data;
11041 
11042         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11043 
11044         /* Enable Flash Read/Write. */
11045         if (CFG_IST(ha, CFG_SBUS_CARD)) {
11046                 data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11047                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11048                 data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
11049                 ddi_put16(ha->sbus_fpga_dev_handle,
11050                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11051                 /* Read reset command sequence */
11052                 ql_write_flash_byte(ha, 0xaaa, 0xaa);
11053                 ql_write_flash_byte(ha, 0x555, 0x55);
11054                 ql_write_flash_byte(ha, 0xaaa, 0x20);
11055                 ql_write_flash_byte(ha, 0x555, 0xf0);
11056         } else {
11057                 data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
11058                     ISP_FLASH_ENABLE);
11059                 WRT16_IO_REG(ha, ctrl_status, data);
11060 
11061                 /* Read/Reset Command Sequence */
11062                 ql_write_flash_byte(ha, 0x5555, 0xaa);
11063                 ql_write_flash_byte(ha, 0x2aaa, 0x55);
11064                 ql_write_flash_byte(ha, 0x5555, 0xf0);
11065         }
11066         (void) ql_read_flash_byte(ha, 0);
11067 
11068         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11069 }
11070 
11071 /*
11072  * ql_flash_disable
11073  *      Disable flash and allow RISC to run.
11074  *
11075  * Input:
11076  *      ha = adapter state pointer.
11077  *
11078  * Context:
11079  *      Kernel context.
11080  */
11081 void
11082 ql_flash_disable(ql_adapter_state_t *ha)
11083 {
11084         uint16_t        data;
11085 
11086         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11087 
11088         if (CFG_IST(ha, CFG_SBUS_CARD)) {
11089                 /*
11090                  * Lock the flash back up.
11091                  */
11092                 ql_write_flash_byte(ha, 0x555, 0x90);
11093                 ql_write_flash_byte(ha, 0x555, 0x0);
11094 
11095                 data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11096                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11097                 data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
11098                 ddi_put16(ha->sbus_fpga_dev_handle,
11099                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11100         } else {
11101                 data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
11102                     ~ISP_FLASH_ENABLE);
11103                 WRT16_IO_REG(ha, ctrl_status, data);
11104         }
11105 
11106         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11107 }
11108 
11109 /*
11110  * ql_write_flash_byte
11111  *      Write byte to flash.
11112  *
11113  * Input:
11114  *      ha = adapter state pointer.
11115  *      addr = flash byte address.
11116  *      data = data to be written.
11117  *
11118  * Context:
11119  *      Kernel context.
11120  */
11121 void
11122 ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
11123 {
11124         if (CFG_IST(ha, CFG_SBUS_CARD)) {
11125                 ddi_put16(ha->sbus_fpga_dev_handle,
11126                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11127                     LSW(addr));
11128                 ddi_put16(ha->sbus_fpga_dev_handle,
11129                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11130                     MSW(addr));
11131                 ddi_put16(ha->sbus_fpga_dev_handle,
11132                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
11133                     (uint16_t)data);
11134         } else {
11135                 uint16_t bank_select;
11136 
11137                 /* Setup bit 16 of flash address. */
11138                 bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
11139 
11140                 if (CFG_IST(ha, CFG_CTRL_6322)) {
11141                         bank_select = (uint16_t)(bank_select & ~0xf0);
11142                         bank_select = (uint16_t)(bank_select |
11143                             ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11144                         WRT16_IO_REG(ha, ctrl_status, bank_select);
11145                 } else {
11146                         if (addr & BIT_16 && !(bank_select &
11147                             ISP_FLASH_64K_BANK)) {
11148                                 bank_select = (uint16_t)(bank_select |
11149                                     ISP_FLASH_64K_BANK);
11150                                 WRT16_IO_REG(ha, ctrl_status, bank_select);
11151                         } else if (!(addr & BIT_16) && bank_select &
11152                             ISP_FLASH_64K_BANK) {
11153                                 bank_select = (uint16_t)(bank_select &
11154                                     ~ISP_FLASH_64K_BANK);
11155                                 WRT16_IO_REG(ha, ctrl_status, bank_select);
11156                         }
11157                 }
11158 
11159                 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11160                         WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
 
11182  */
11183 uint8_t
11184 ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
11185 {
11186         uint8_t data;
11187 
11188         if (CFG_IST(ha, CFG_SBUS_CARD)) {
11189                 ddi_put16(ha->sbus_fpga_dev_handle,
11190                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11191                     LSW(addr));
11192                 ddi_put16(ha->sbus_fpga_dev_handle,
11193                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11194                     MSW(addr));
11195                 data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
11196                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
11197         } else {
11198                 uint16_t        bank_select;
11199 
11200                 /* Setup bit 16 of flash address. */
11201                 bank_select = RD16_IO_REG(ha, ctrl_status);
11202                 if (CFG_IST(ha, CFG_CTRL_6322)) {
11203                         bank_select = (uint16_t)(bank_select & ~0xf0);
11204                         bank_select = (uint16_t)(bank_select |
11205                             ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11206                         WRT16_IO_REG(ha, ctrl_status, bank_select);
11207                 } else {
11208                         if (addr & BIT_16 &&
11209                             !(bank_select & ISP_FLASH_64K_BANK)) {
11210                                 bank_select = (uint16_t)(bank_select |
11211                                     ISP_FLASH_64K_BANK);
11212                                 WRT16_IO_REG(ha, ctrl_status, bank_select);
11213                         } else if (!(addr & BIT_16) &&
11214                             bank_select & ISP_FLASH_64K_BANK) {
11215                                 bank_select = (uint16_t)(bank_select &
11216                                     ~ISP_FLASH_64K_BANK);
11217                                 WRT16_IO_REG(ha, ctrl_status, bank_select);
11218                         }
11219                 }
11220 
11221                 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11222                         WRT16_IO_REG(ha, flash_address, addr);
 
11234  * ql_24xx_flash_id
11235  *      Get flash IDs.
11236  *
11237  * Input:
11238  *      ha:             adapter state pointer.
11239  *
11240  * Returns:
11241  *      ql local function return status code.
11242  *
11243  * Context:
11244  *      Kernel context.
11245  */
11246 int
11247 ql_24xx_flash_id(ql_adapter_state_t *vha)
11248 {
11249         int                     rval;
11250         uint32_t                fdata = 0;
11251         ql_adapter_state_t      *ha = vha->pha;
11252         ql_xioctl_t             *xp = ha->xioctl;
11253 
11254         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11255 
11256         rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
11257 
11258         if (rval != QL_SUCCESS || fdata == 0 || CFG_IST(ha, CFG_CTRL_2581)) {
11259                 fdata = 0;
11260                 rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
11261                     (CFG_IST(ha, CFG_CTRL_2422) ? 0x39F : 0x49F), &fdata);
11262         }
11263 
11264         if (rval != QL_SUCCESS) {
11265                 EL(ha, "24xx read_flash failed=%xh\n", rval);
11266         } else if (fdata != 0) {
11267                 xp->fdesc.flash_manuf = LSB(LSW(fdata));
11268                 xp->fdesc.flash_id = MSB(LSW(fdata));
11269                 xp->fdesc.flash_len = LSB(MSW(fdata));
11270         } else {
11271                 xp->fdesc.flash_manuf = ATMEL_FLASH;
11272                 xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
11273                 xp->fdesc.flash_len = 0;
11274         }
11275 
11276         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11277 
11278         return (rval);
11279 }
11280 
11281 /*
11282  * ql_24xx_load_flash
11283  *      Loads flash.
11284  *
11285  * Input:
11286  *      ha = adapter state pointer.
11287  *      dp = data pointer.
11288  *      size = data length in bytes.
11289  *      faddr = 32bit word flash byte address.
11290  *
11291  * Returns:
11292  *      ql local function return status code.
11293  *
11294  * Context:
11295  *      Kernel context.
11296  */
11297 int
11298 ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
11299     uint32_t faddr)
11300 {
11301         int                     rval;
11302         uint32_t                cnt, rest_addr, fdata, wc;
11303         dma_mem_t               dmabuf = {0};
11304         ql_adapter_state_t      *ha = vha->pha;
11305         ql_xioctl_t             *xp = ha->xioctl;
11306 
11307         QL_PRINT_3(CE_CONT, "(%d): started, faddr=%xh, size=%xh\n",
11308             ha->instance, faddr, size);
11309 
11310         /* start address must be 32 bit word aligned */
11311         if ((faddr & 0x3) != 0) {
11312                 EL(ha, "incorrect buffer size alignment\n");
11313                 return (QL_FUNCTION_PARAMETER_ERROR);
11314         }
11315 
11316         /* Allocate DMA buffer */
11317         if (CFG_IST(ha, CFG_CTRL_2581)) {
11318                 if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
11319                     LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN)) !=
11320                     QL_SUCCESS) {
11321                         EL(ha, "dma alloc failed, rval=%xh\n", rval);
11322                         return (rval);
11323                 }
11324         }
11325 
11326         GLOBAL_HW_LOCK();
11327 
11328         /* Enable flash write */
11329         if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
11330                 GLOBAL_HW_UNLOCK();
11331                 EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
11332                 ql_free_phys(ha, &dmabuf);
11333                 return (rval);
11334         }
11335 
11336         /* setup mask of address range within a sector */
11337         rest_addr = (xp->fdesc.block_size - 1) >> 2;
11338 
11339         faddr = faddr >> 2;       /* flash gets 32 bit words */
11340 
11341         /*
11342          * Write data to flash.
11343          */
11344         cnt = 0;
11345         size = (size + 3) >> 2;   /* Round up & convert to dwords */
11346 
11347         while (cnt < size) {
11348                 /* Beginning of a sector? */
11349                 if ((faddr & rest_addr) == 0) {
11350                         if (CFG_IST(ha, CFG_CTRL_8021)) {
11351                                 fdata = ha->flash_data_addr | faddr;
11352                                 rval = ql_8021_rom_erase(ha, fdata);
11353                                 if (rval != QL_SUCCESS) {
11354                                         EL(ha, "8021 erase sector status="
11355                                             "%xh, start=%xh, end=%xh"
11356                                             "\n", rval, fdata,
11357                                             fdata + rest_addr);
11358                                         break;
11359                                 }
11360                         } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11361                                 fdata = ha->flash_data_addr | faddr;
11362                                 rval = ql_flash_access(ha,
11363                                     FAC_ERASE_SECTOR, fdata, fdata +
11364                                     rest_addr, 0);
11365                                 if (rval != QL_SUCCESS) {
11366                                         EL(ha, "erase sector status="
11367                                             "%xh, start=%xh, end=%xh"
11368                                             "\n", rval, fdata,
11369                                             fdata + rest_addr);
11370                                         break;
11371                                 }
11372                         } else {
11373                                 fdata = (faddr & ~rest_addr) << 2;
11374                                 fdata = (fdata & 0xff00) |
11375                                     (fdata << 16 & 0xff0000) |
11376                                     (fdata >> 16 & 0xff);
11377 
11378                                 if (rest_addr == 0x1fff) {
11379                                         /* 32kb sector block erase */
11380                                         rval = ql_24xx_write_flash(ha,
11381                                             FLASH_CONF_ADDR | 0x0352,
11382                                             fdata);
11383                                 } else {
11384                                         /* 64kb sector block erase */
11385                                         rval = ql_24xx_write_flash(ha,
11386                                             FLASH_CONF_ADDR | 0x03d8,
11387                                             fdata);
11388                                 }
11389                                 if (rval != QL_SUCCESS) {
11390                                         EL(ha, "Unable to flash sector"
11391                                             ": address=%xh\n", faddr);
11392                                         break;
11393                                 }
11394                         }
11395                 }
11396 
11397                 /* Write data */
11398                 if (CFG_IST(ha, CFG_CTRL_2581) &&
11399                     ((faddr & 0x3f) == 0)) {
11400                         /*
11401                          * Limit write up to sector boundary.
11402                          */
11403                         wc = ((~faddr & (rest_addr>>1)) + 1);
11404 
11405                         if (size - cnt < wc) {
11406                                 wc = size - cnt;
11407                         }
11408 
11409                         ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
11410                             (uint8_t *)dmabuf.bp, wc<<2,
11411                             DDI_DEV_AUTOINCR);
11412 
11413                         rval = ql_wrt_risc_ram(ha, ha->flash_data_addr |
11414                             faddr, dmabuf.cookie.dmac_laddress, wc);
11415                         if (rval != QL_SUCCESS) {
11416                                 EL(ha, "unable to dma to flash "
11417                                     "address=%xh\n", faddr << 2);
11418                                 break;
 
11429                         rval = ql_24xx_write_flash(ha,
11430                             ha->flash_data_addr | faddr, fdata);
11431                         if (rval != QL_SUCCESS) {
11432                                 EL(ha, "Unable to program flash "
11433                                     "address=%xh data=%xh\n", faddr,
11434                                     *dp);
11435                                 break;
11436                         }
11437                         cnt++;
11438                         faddr++;
11439 
11440                         /* Allow other system activity. */
11441                         if (cnt % 0x1000 == 0) {
11442                                 ql_delay(ha, 10000);
11443                         }
11444                 }
11445         }
11446 
11447         ql_24xx_protect_flash(ha);
11448 
11449         ql_free_phys(ha, &dmabuf);
11450 
11451         GLOBAL_HW_UNLOCK();
11452 
11453         if (rval != QL_SUCCESS) {
11454                 EL(ha, "failed=%xh\n", rval);
11455         } else {
11456                 /*EMPTY*/
11457                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11458         }
11459         return (rval);
11460 }
11461 
11462 /*
11463  * ql_24xx_read_flash
11464  *      Reads a 32bit word from ISP24xx NVRAM/FLASH.
11465  *
11466  * Input:
11467  *      ha:     adapter state pointer.
11468  *      faddr:  NVRAM/FLASH address.
11469  *      bp:     data pointer.
11470  *
11471  * Returns:
11472  *      ql local function return status code.
11473  *
11474  * Context:
11475  *      Kernel context.
11476  */
11477 int
11478 ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
11479 {
11480         uint32_t                timer;
11481         int                     rval = QL_SUCCESS;
11482         ql_adapter_state_t      *ha = vha->pha;
11483 
11484         if (CFG_IST(ha, CFG_CTRL_8021)) {
11485                 if ((rval = ql_8021_rom_read(ha, faddr, bp)) != QL_SUCCESS) {
11486                         EL(ha, "8021 access error\n");
11487                 }
11488                 return (rval);
11489         }
11490 
11491         /* Clear access error flag */
11492         WRT32_IO_REG(ha, ctrl_status,
11493             RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11494 
11495         WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
11496 
11497         /* Wait for READ cycle to complete. */
11498         for (timer = 300000; timer; timer--) {
11499                 if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
11500                         break;
11501                 }
11502                 drv_usecwait(10);
11503         }
11504 
 
11520  *      Writes a 32bit word to ISP24xx NVRAM/FLASH.
11521  *
11522  * Input:
11523  *      ha:     adapter state pointer.
11524  *      addr:   NVRAM/FLASH address.
11525  *      value:  data.
11526  *
11527  * Returns:
11528  *      ql local function return status code.
11529  *
11530  * Context:
11531  *      Kernel context.
11532  */
11533 int
11534 ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11535 {
11536         uint32_t                timer, fdata;
11537         int                     rval = QL_SUCCESS;
11538         ql_adapter_state_t      *ha = vha->pha;
11539 
11540         if (CFG_IST(ha, CFG_CTRL_8021)) {
11541                 if ((rval = ql_8021_rom_write(ha, addr, data)) != QL_SUCCESS) {
11542                         EL(ha, "8021 access error\n");
11543                 }
11544                 return (rval);
11545         }
11546         /* Clear access error flag */
11547         WRT32_IO_REG(ha, ctrl_status,
11548             RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11549 
11550         WRT32_IO_REG(ha, flash_data, data);
11551         RD32_IO_REG(ha, flash_data);            /* PCI Posting. */
11552         WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11553 
11554         /* Wait for Write cycle to complete. */
11555         for (timer = 3000000; timer; timer--) {
11556                 if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11557                         /* Check flash write in progress. */
11558                         if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11559                                 (void) ql_24xx_read_flash(ha,
11560                                     FLASH_CONF_ADDR | 0x005, &fdata);
11561                                 if (!(fdata & BIT_0)) {
11562                                         break;
11563                                 }
11564                         } else {
11565                                 break;
11566                         }
11567                 }
11568                 drv_usecwait(10);
11569         }
11570         if (timer == 0) {
11571                 EL(ha, "failed, timeout\n");
11572                 rval = QL_FUNCTION_TIMEOUT;
11573         } else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11574                 EL(ha, "access error\n");
11575                 rval = QL_FUNCTION_FAILED;
11576         }
11577 
11578         return (rval);
11579 }
11580 /*
11581  * ql_24xx_unprotect_flash
11582  *      Enable writes
11583  *
11584  * Input:
11585  *      ha:     adapter state pointer.
11586  *
11587  * Returns:
11588  *      ql local function return status code.
11589  *
11590  * Context:
11591  *      Kernel context.
11592  */
11593 int
11594 ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11595 {
11596         int                     rval;
11597         uint32_t                fdata;
11598         ql_adapter_state_t      *ha = vha->pha;
11599         ql_xioctl_t             *xp = ha->xioctl;
11600 
11601         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11602 
11603         if (CFG_IST(ha, CFG_CTRL_8021)) {
11604                 (void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11605                 rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11606                 if (rval != QL_SUCCESS) {
11607                         EL(ha, "8021 access error\n");
11608                 }
11609                 return (rval);
11610         }
11611         if (CFG_IST(ha, CFG_CTRL_81XX)) {
11612                 if (ha->task_daemon_flags & FIRMWARE_UP) {
11613                         if ((rval = ql_flash_access(ha, FAC_WRT_ENABLE, 0, 0,
11614                             0)) != QL_SUCCESS) {
11615                                 EL(ha, "status=%xh\n", rval);
11616                         }
11617                         QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11618                             ha->instance);
11619                         return (rval);
11620                 }
11621         } else {
11622                 /* Enable flash write. */
11623                 WRT32_IO_REG(ha, ctrl_status,
11624                     RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11625                 RD32_IO_REG(ha, ctrl_status);   /* PCI Posting. */
11626         }
11627 
11628         /*
11629          * Remove block write protection (SST and ST) and
11630          * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11631          * Unprotect sectors.
11632          */
11633         (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
11634             xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
11635 
11636         if (xp->fdesc.unprotect_sector_cmd != 0) {
11637                 for (fdata = 0; fdata < 0x10; fdata++) {
11638                         (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11639                             0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
11640                 }
11641 
11642                 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11643                     xp->fdesc.unprotect_sector_cmd, 0x00400f);
11644                 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11645                     xp->fdesc.unprotect_sector_cmd, 0x00600f);
11646                 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11647                     xp->fdesc.unprotect_sector_cmd, 0x00800f);
11648         }
11649 
11650         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11651 
11652         return (QL_SUCCESS);
11653 }
11654 
11655 /*
11656  * ql_24xx_protect_flash
11657  *      Disable writes
11658  *
11659  * Input:
11660  *      ha:     adapter state pointer.
11661  *
11662  * Context:
11663  *      Kernel context.
11664  */
11665 void
11666 ql_24xx_protect_flash(ql_adapter_state_t *vha)
11667 {
11668         int                     rval;
11669         uint32_t                fdata;
11670         ql_adapter_state_t      *ha = vha->pha;
11671         ql_xioctl_t             *xp = ha->xioctl;
11672 
11673         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11674 
11675         if (CFG_IST(ha, CFG_CTRL_8021)) {
11676                 (void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11677                 rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_disable_bits);
11678                 if (rval != QL_SUCCESS) {
11679                         EL(ha, "8021 access error\n");
11680                 }
11681                 return;
11682         }
11683         if (CFG_IST(ha, CFG_CTRL_81XX)) {
11684                 if (ha->task_daemon_flags & FIRMWARE_UP) {
11685                         if ((rval = ql_flash_access(ha, FAC_WRT_PROTECT, 0, 0,
11686                             0)) != QL_SUCCESS) {
11687                                 EL(ha, "status=%xh\n", rval);
11688                         }
11689                         QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11690                             ha->instance);
11691                         return;
11692                 }
11693         } else {
11694                 /* Enable flash write. */
11695                 WRT32_IO_REG(ha, ctrl_status,
11696                     RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11697                 RD32_IO_REG(ha, ctrl_status);   /* PCI Posting. */
11698         }
11699 
11700         /*
11701          * Protect sectors.
11702          * Set block write protection (SST and ST) and
11703          * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11704          */
11705         if (xp->fdesc.protect_sector_cmd != 0) {
11706                 for (fdata = 0; fdata < 0x10; fdata++) {
11707                         (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11708                             0x330 | xp->fdesc.protect_sector_cmd, fdata);
11709                 }
11710                 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11711                     xp->fdesc.protect_sector_cmd, 0x00400f);
11712                 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11713                     xp->fdesc.protect_sector_cmd, 0x00600f);
11714                 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11715                     xp->fdesc.protect_sector_cmd, 0x00800f);
11716 
11717                 /* TODO: ??? */
11718                 (void) ql_24xx_write_flash(ha,
11719                     FLASH_CONF_ADDR | 0x101, 0x80);
11720         } else {
11721                 (void) ql_24xx_write_flash(ha,
11722                     FLASH_CONF_ADDR | 0x101, 0x9c);
11723         }
11724 
11725         /* Disable flash write. */
11726         if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
11727                 WRT32_IO_REG(ha, ctrl_status,
11728                     RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
11729                 RD32_IO_REG(ha, ctrl_status);   /* PCI Posting. */
11730         }
11731 
11732         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11733 }
11734 
11735 /*
11736  * ql_dump_firmware
11737  *      Save RISC code state information.
11738  *
11739  * Input:
11740  *      ha = adapter state pointer.
11741  *
11742  * Returns:
11743  *      QL local function return status code.
11744  *
11745  * Context:
11746  *      Kernel context.
11747  */
11748 static int
11749 ql_dump_firmware(ql_adapter_state_t *vha)
11750 {
11751         int                     rval;
11752         clock_t                 timer = drv_usectohz(30000000);
11753         ql_adapter_state_t      *ha = vha->pha;
11754 
11755         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11756 
11757         QL_DUMP_LOCK(ha);
11758 
11759         if (ha->ql_dump_state & QL_DUMPING ||
11760             (ha->ql_dump_state & QL_DUMP_VALID &&
11761             !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11762                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11763                 QL_DUMP_UNLOCK(ha);
11764                 return (QL_SUCCESS);
11765         }
11766 
11767         QL_DUMP_UNLOCK(ha);
11768 
11769         ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
11770 
11771         /*
11772          * Wait for all outstanding commands to complete
11773          */
11774         (void) ql_wait_outstanding(ha);
11775 
11776         /* Dump firmware. */
11777         rval = ql_binary_fw_dump(ha, TRUE);
11778 
11779         /* Do abort to force restart. */
11780         ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, DRIVER_STALL);
11781         EL(ha, "restarting, isp_abort_needed\n");
11782 
11783         /* Acquire task daemon lock. */
11784         TASK_DAEMON_LOCK(ha);
11785 
11786         /* Wait for suspension to end. */
11787         while (ha->task_daemon_flags & QL_SUSPENDED) {
11788                 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
11789 
11790                 /* 30 seconds from now */
11791                 if (cv_reltimedwait(&ha->cv_dr_suspended,
11792                     &ha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
11793                         /*
11794                          * The timeout time 'timer' was
11795                          * reached without the condition
11796                          * being signaled.
11797                          */
11798                         break;
11799                 }
11800         }
11801 
11802         /* Release task daemon lock. */
11803         TASK_DAEMON_UNLOCK(ha);
11804 
11805         if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
11806                 /*EMPTY*/
11807                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11808         } else {
11809                 EL(ha, "failed, rval = %xh\n", rval);
11810         }
11811         return (rval);
11812 }
11813 
11814 /*
11815  * ql_binary_fw_dump
11816  *      Dumps binary data from firmware.
11817  *
11818  * Input:
11819  *      ha = adapter state pointer.
11820  *      lock_needed = mailbox lock needed.
11821  *
11822  * Returns:
11823  *      ql local function return status code.
11824  *
11825  * Context:
11826  *      Interrupt or Kernel context, no mailbox commands allowed.
11827  */
11828 int
11829 ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
11830 {
11831         clock_t                 timer;
11832         mbx_cmd_t               mc;
11833         mbx_cmd_t               *mcp = &mc;
11834         int                     rval = QL_SUCCESS;
11835         ql_adapter_state_t      *ha = vha->pha;
11836 
11837         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11838 
11839         if (CFG_IST(ha, CFG_CTRL_8021)) {
11840                 EL(ha, "8021 not supported\n");
11841                 return (QL_NOT_SUPPORTED);
11842         }
11843 
11844         QL_DUMP_LOCK(ha);
11845 
11846         if (ha->ql_dump_state & QL_DUMPING ||
11847             (ha->ql_dump_state & QL_DUMP_VALID &&
11848             !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11849                 EL(ha, "dump already done, qds=%x\n", ha->ql_dump_state);
11850                 QL_DUMP_UNLOCK(ha);
11851                 return (QL_DATA_EXISTS);
11852         }
11853 
11854         ha->ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
11855         ha->ql_dump_state |= QL_DUMPING;
11856 
11857         QL_DUMP_UNLOCK(ha);
11858 
11859         if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE)) {
11860 
11861                 /* Insert Time Stamp */
11862                 rval = ql_fw_etrace(ha, &ha->fwexttracebuf,
11863                     FTO_INSERT_TIME_STAMP);
11864                 if (rval != QL_SUCCESS) {
11865                         EL(ha, "f/w extended trace insert"
11866                             "time stamp failed: %xh\n", rval);
11867                 }
11868         }
11869 
11870         if (lock_needed == TRUE) {
11871                 /* Acquire mailbox register lock. */
11872                 MBX_REGISTER_LOCK(ha);
11873                 timer = (ha->mcp->timeout + 2) * drv_usectohz(1000000);
11874 
11875                 /* Check for mailbox available, if not wait for signal. */
11876                 while (ha->mailbox_flags & MBX_BUSY_FLG) {
11877                         ha->mailbox_flags = (uint8_t)
11878                             (ha->mailbox_flags | MBX_WANT_FLG);
11879 
11880                         /* 30 seconds from now */
11881                         if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
11882                             timer, TR_CLOCK_TICK) == -1) {
11883                                 /*
11884                                  * The timeout time 'timer' was
11885                                  * reached without the condition
11886                                  * being signaled.
11887                                  */
11888 
11889                                 /* Release mailbox register lock. */
11890                                 MBX_REGISTER_UNLOCK(ha);
11891 
11892                                 EL(ha, "failed, rval = %xh\n",
11893                                     QL_FUNCTION_TIMEOUT);
11894                                 return (QL_FUNCTION_TIMEOUT);
11895                         }
11896                 }
11897 
11898                 /* Set busy flag. */
11899                 ha->mailbox_flags = (uint8_t)
11900                     (ha->mailbox_flags | MBX_BUSY_FLG);
11901                 mcp->timeout = 120;
11902                 ha->mcp = mcp;
11903 
11904                 /* Release mailbox register lock. */
11905                 MBX_REGISTER_UNLOCK(ha);
11906         }
11907 
11908         /* Free previous dump buffer. */
11909         if (ha->ql_dump_ptr != NULL) {
11910                 kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11911                 ha->ql_dump_ptr = NULL;
11912         }
11913 
11914         if (CFG_IST(ha, CFG_CTRL_2422)) {
11915                 ha->ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
11916                     ha->fw_ext_memory_size);
11917         } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11918                 ha->ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
11919                     ha->fw_ext_memory_size);
11920         } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11921                 ha->ql_dump_size = (uint32_t)(sizeof (ql_81xx_fw_dump_t) +
11922                     ha->fw_ext_memory_size);
11923         } else {
11924                 ha->ql_dump_size = sizeof (ql_fw_dump_t);
11925         }
11926 
11927         if ((ha->ql_dump_ptr = kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) ==
11928             NULL) {
11929                 rval = QL_MEMORY_ALLOC_FAILED;
11930         } else {
11931                 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11932                         rval = ql_2300_binary_fw_dump(ha, ha->ql_dump_ptr);
11933                 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11934                         rval = ql_81xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11935                 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11936                         rval = ql_25xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11937                 } else if (CFG_IST(ha, CFG_CTRL_2422)) {
11938                         rval = ql_24xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11939                 } else {
11940                         rval = ql_2200_binary_fw_dump(ha, ha->ql_dump_ptr);
11941                 }
11942         }
11943 
11944         /* Reset ISP chip. */
11945         ql_reset_chip(ha);
11946 
11947         QL_DUMP_LOCK(ha);
11948 
11949         if (rval != QL_SUCCESS) {
11950                 if (ha->ql_dump_ptr != NULL) {
11951                         kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11952                         ha->ql_dump_ptr = NULL;
11953                 }
11954                 ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
11955                     QL_DUMP_UPLOADED);
11956                 EL(ha, "failed, rval = %xh\n", rval);
11957         } else {
11958                 ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
11959                 ha->ql_dump_state |= QL_DUMP_VALID;
11960                 EL(ha, "done\n");
11961         }
11962 
 
11971  *
11972  * Input:
11973  *      ha = adapter state pointer.
11974  *      bptr = buffer pointer.
11975  *
11976  * Returns:
11977  *      Amount of data buffer used.
11978  *
11979  * Context:
11980  *      Kernel context.
11981  */
11982 size_t
11983 ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
11984 {
11985         uint32_t                cnt;
11986         caddr_t                 bp;
11987         int                     mbox_cnt;
11988         ql_adapter_state_t      *ha = vha->pha;
11989         ql_fw_dump_t            *fw = ha->ql_dump_ptr;
11990 
11991         if (CFG_IST(ha, CFG_CTRL_2422)) {
11992                 return (ql_24xx_ascii_fw_dump(ha, bufp));
11993         } else if (CFG_IST(ha, CFG_CTRL_2581)) {
11994                 return (ql_2581_ascii_fw_dump(ha, bufp));
11995         }
11996 
11997         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11998 
11999         if (CFG_IST(ha, CFG_CTRL_2300)) {
12000                 (void) sprintf(bufp, "\nISP 2300IP ");
12001         } else if (CFG_IST(ha, CFG_CTRL_6322)) {
12002                 (void) sprintf(bufp, "\nISP 6322FLX ");
12003         } else {
12004                 (void) sprintf(bufp, "\nISP 2200IP ");
12005         }
12006 
12007         bp = bufp + strlen(bufp);
12008         (void) sprintf(bp, "Firmware Version %d.%d.%d\n",
12009             ha->fw_major_version, ha->fw_minor_version,
12010             ha->fw_subminor_version);
12011 
12012         (void) strcat(bufp, "\nPBIU Registers:");
12013         bp = bufp + strlen(bufp);
12014         for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
12015                 if (cnt % 8 == 0) {
12016                         *bp++ = '\n';
12017                 }
12018                 (void) sprintf(bp, "%04x  ", fw->pbiu_reg[cnt]);
12019                 bp = bp + 6;
12020         }
12021 
12022         if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12023                 (void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
12024                     "registers:");
12025                 bp = bufp + strlen(bufp);
12026                 for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
12027                         if (cnt % 8 == 0) {
12028                                 *bp++ = '\n';
12029                         }
12030                         (void) sprintf(bp, "%04x  ", fw->risc_host_reg[cnt]);
12031                         bp = bp + 6;
12032                 }
12033         }
12034 
12035         (void) strcat(bp, "\n\nMailbox Registers:");
12036         bp = bufp + strlen(bufp);
12037         mbox_cnt = (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) ? 16 : 8;
12038         for (cnt = 0; cnt < mbox_cnt; cnt++) {
12039                 if (cnt % 8 == 0) {
12040                         *bp++ = '\n';
12041                 }
12042                 (void) sprintf(bp, "%04x  ", fw->mailbox_reg[cnt]);
12043                 bp = bp + 6;
12044         }
12045 
12046         if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12047                 (void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
12048                 bp = bufp + strlen(bufp);
12049                 for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
12050                         if (cnt % 8 == 0) {
12051                                 *bp++ = '\n';
12052                         }
12053                         (void) sprintf(bp, "%04x  ", fw->resp_dma_reg[cnt]);
12054                         bp = bp + 6;
12055                 }
12056         }
12057 
12058         (void) strcat(bp, "\n\nDMA Registers:");
12059         bp = bufp + strlen(bufp);
12060         for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
12061                 if (cnt % 8 == 0) {
12062                         *bp++ = '\n';
12063                 }
12064                 (void) sprintf(bp, "%04x  ", fw->dma_reg[cnt]);
12065                 bp = bp + 6;
12066         }
 
12141                 if (cnt % 8 == 0) {
12142                         *bp++ = '\n';
12143                 }
12144                 (void) sprintf(bp, "%04x  ", fw->risc_gp6_reg[cnt]);
12145                 bp = bp + 6;
12146         }
12147 
12148         (void) strcat(bp, "\n\nRISC GP7 Registers:");
12149         bp = bufp + strlen(bufp);
12150         for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
12151                 if (cnt % 8 == 0) {
12152                         *bp++ = '\n';
12153                 }
12154                 (void) sprintf(bp, "%04x  ", fw->risc_gp7_reg[cnt]);
12155                 bp = bp + 6;
12156         }
12157 
12158         (void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
12159         bp = bufp + strlen(bufp);
12160         for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
12161                 if ((cnt == 16) && ((CFG_IST(ha, (CFG_CTRL_2300 |
12162                     CFG_CTRL_6322)) == 0))) {
12163                         break;
12164                 }
12165                 if (cnt % 8 == 0) {
12166                         *bp++ = '\n';
12167                 }
12168                 (void) sprintf(bp, "%04x  ", fw->frame_buf_hdw_reg[cnt]);
12169                 bp = bp + 6;
12170         }
12171 
12172         (void) strcat(bp, "\n\nFPM B0 Registers:");
12173         bp = bufp + strlen(bufp);
12174         for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
12175                 if (cnt % 8 == 0) {
12176                         *bp++ = '\n';
12177                 }
12178                 (void) sprintf(bp, "%04x  ", fw->fpm_b0_reg[cnt]);
12179                 bp = bp + 6;
12180         }
12181 
12182         (void) strcat(bp, "\n\nFPM B1 Registers:");
12183         bp = bufp + strlen(bufp);
12184         for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
12185                 if (cnt % 8 == 0) {
12186                         *bp++ = '\n';
12187                 }
12188                 (void) sprintf(bp, "%04x  ", fw->fpm_b1_reg[cnt]);
12189                 bp = bp + 6;
12190         }
12191 
12192         if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12193                 (void) strcat(bp, "\n\nCode RAM Dump:");
12194                 bp = bufp + strlen(bufp);
12195                 for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
12196                         if (cnt % 8 == 0) {
12197                                 (void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
12198                                 bp = bp + 8;
12199                         }
12200                         (void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
12201                         bp = bp + 6;
12202                 }
12203 
12204                 (void) strcat(bp, "\n\nStack RAM Dump:");
12205                 bp = bufp + strlen(bufp);
12206                 for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
12207                         if (cnt % 8 == 0) {
12208                                 (void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
12209                                 bp = bp + 8;
12210                         }
12211                         (void) sprintf(bp, "%04x  ", fw->stack_ram[cnt]);
12212                         bp = bp + 6;
 
12245                         (void) sprintf(bp, "\n%08x: ", cnt);
12246                         bp += strlen(bp);
12247                 }
12248                 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12249                 bp += strlen(bp);
12250         }
12251 
12252         (void) sprintf(bp, "\n\nResponse Queue");
12253         bp += strlen(bp);
12254         for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12255                 if (cnt % 8 == 0) {
12256                         (void) sprintf(bp, "\n%08x: ", cnt);
12257                         bp += strlen(bp);
12258                 }
12259                 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12260                 bp += strlen(bp);
12261         }
12262 
12263         (void) sprintf(bp, "\n");
12264 
12265         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
12266 
12267         return (strlen(bufp));
12268 }
12269 
12270 /*
12271  * ql_24xx_ascii_fw_dump
12272  *      Converts ISP24xx firmware binary dump to ascii.
12273  *
12274  * Input:
12275  *      ha = adapter state pointer.
12276  *      bptr = buffer pointer.
12277  *
12278  * Returns:
12279  *      Amount of data buffer used.
12280  *
12281  * Context:
12282  *      Kernel context.
12283  */
12284 static size_t
12285 ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12286 {
12287         uint32_t                cnt;
12288         caddr_t                 bp = bufp;
12289         ql_24xx_fw_dump_t       *fw = ha->ql_dump_ptr;
12290 
12291         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12292 
12293         (void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
12294             ha->fw_major_version, ha->fw_minor_version,
12295             ha->fw_subminor_version, ha->fw_attributes);
12296         bp += strlen(bp);
12297 
12298         (void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
12299 
12300         (void) strcat(bp, "\nHost Interface Registers");
12301         bp += strlen(bp);
12302         for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12303                 if (cnt % 8 == 0) {
12304                         (void) sprintf(bp++, "\n");
12305                 }
12306 
12307                 (void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12308                 bp += 9;
12309         }
12310 
12311         (void) sprintf(bp, "\n\nMailbox Registers");
 
12659                 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12660                 bp += strlen(bp);
12661                 /* show data address as a byte address, data as long words */
12662                 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12663                         cnt_b = cnt * 4;
12664                         if (cnt_b % 32 == 0) {
12665                                 (void) sprintf(bp, "\n%08x: ",
12666                                     (int)(w64 + cnt_b));
12667                                 bp += 11;
12668                         }
12669                         (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12670                         bp += 9;
12671                 }
12672         }
12673 
12674         (void) sprintf(bp, "\n\n");
12675         bp += strlen(bp);
12676 
12677         cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12678 
12679         QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12680 
12681         return (cnt);
12682 }
12683 
12684 /*
12685  * ql_2581_ascii_fw_dump
12686  *      Converts ISP25xx or ISP81xx firmware binary dump to ascii.
12687  *
12688  * Input:
12689  *      ha = adapter state pointer.
12690  *      bptr = buffer pointer.
12691  *
12692  * Returns:
12693  *      Amount of data buffer used.
12694  *
12695  * Context:
12696  *      Kernel context.
12697  */
12698 static size_t
12699 ql_2581_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12700 {
12701         uint32_t                cnt;
12702         uint32_t                cnt1;
12703         caddr_t                 bp = bufp;
12704         ql_25xx_fw_dump_t       *fw = ha->ql_dump_ptr;
12705 
12706         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12707 
12708         (void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
12709             ha->fw_major_version, ha->fw_minor_version,
12710             ha->fw_subminor_version, ha->fw_attributes);
12711         bp += strlen(bp);
12712 
12713         (void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
12714         bp += strlen(bp);
12715 
12716         (void) sprintf(bp, "\nHostRisc Registers");
12717         bp += strlen(bp);
12718         for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
12719                 if (cnt % 8 == 0) {
12720                         (void) sprintf(bp++, "\n");
12721                 }
12722                 (void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
12723                 bp += 9;
12724         }
12725 
12726         (void) sprintf(bp, "\n\nPCIe Registers");
12727         bp += strlen(bp);
12728         for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
12729                 if (cnt % 8 == 0) {
12730                         (void) sprintf(bp++, "\n");
12731                 }
12732                 (void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
12733                 bp += 9;
12734         }
12735 
 
13002         for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
13003                 if (cnt % 8 == 0) {
13004                         (void) sprintf(bp++, "\n");
13005                 }
13006                 (void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
13007                 bp += 9;
13008         }
13009 
13010         (void) sprintf(bp, "\n\nLMC Registers");
13011         bp += strlen(bp);
13012         for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
13013                 if (cnt % 8 == 0) {
13014                         (void) sprintf(bp++, "\n");
13015                 }
13016                 (void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
13017                 bp += 9;
13018         }
13019 
13020         (void) sprintf(bp, "\n\nFPM Hardware Registers");
13021         bp += strlen(bp);
13022         cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
13023             (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fpm_hdw_reg)) :
13024             (uint32_t)(sizeof (fw->fpm_hdw_reg));
13025         for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13026                 if (cnt % 8 == 0) {
13027                         (void) sprintf(bp++, "\n");
13028                 }
13029                 (void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
13030                 bp += 9;
13031         }
13032 
13033         (void) sprintf(bp, "\n\nFB Hardware Registers");
13034         bp += strlen(bp);
13035         cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
13036             (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fb_hdw_reg)) :
13037             (uint32_t)(sizeof (fw->fb_hdw_reg));
13038         for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13039                 if (cnt % 8 == 0) {
13040                         (void) sprintf(bp++, "\n");
13041                 }
13042                 (void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
13043                 bp += 9;
13044         }
13045 
13046         (void) sprintf(bp, "\n\nCode RAM");
13047         bp += strlen(bp);
13048         for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
13049                 if (cnt % 8 == 0) {
13050                         (void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
13051                         bp += 11;
13052                 }
13053                 (void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
13054                 bp += 9;
13055         }
13056 
13057         (void) sprintf(bp, "\n\nExternal Memory");
13058         bp += strlen(bp);
13059         for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
13060                 if (cnt % 8 == 0) {
13061                         (void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
13062                         bp += 11;
13063                 }
13064                 (void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
13065                 bp += 9;
13066         }
13067 
13068         (void) sprintf(bp, "\n[<==END] ISP Debug Dump");
13069         bp += strlen(bp);
13070 
13071         (void) sprintf(bp, "\n\nRequest Queue");
13072         bp += strlen(bp);
13073         for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
13074                 if (cnt % 8 == 0) {
13075                         (void) sprintf(bp, "\n%08x: ", cnt);
13076                         bp += strlen(bp);
13077                 }
13078                 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
13079                 bp += strlen(bp);
13080         }
13081 
13082         (void) sprintf(bp, "\n\nResponse Queue");
13083         bp += strlen(bp);
13084         for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
13085                 if (cnt % 8 == 0) {
13086                         (void) sprintf(bp, "\n%08x: ", cnt);
13087                         bp += strlen(bp);
13088                 }
13089                 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
13090                 bp += strlen(bp);
13091         }
13092 
13093         if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13094             (ha->fwexttracebuf.bp != NULL)) {
13095                 uint32_t cnt_b = 0;
13096                 uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
13097 
13098                 (void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
13099                 bp += strlen(bp);
13100                 /* show data address as a byte address, data as long words */
13101                 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13102                         cnt_b = cnt * 4;
13103                         if (cnt_b % 32 == 0) {
13104                                 (void) sprintf(bp, "\n%08x: ",
13105                                     (int)(w64 + cnt_b));
13106                                 bp += 11;
13107                         }
13108                         (void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
13109                         bp += 9;
13110                 }
13111         }
13112 
 
13118                 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
13119                 bp += strlen(bp);
13120                 /* show data address as a byte address, data as long words */
13121                 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13122                         cnt_b = cnt * 4;
13123                         if (cnt_b % 32 == 0) {
13124                                 (void) sprintf(bp, "\n%08x: ",
13125                                     (int)(w64 + cnt_b));
13126                                 bp += 11;
13127                         }
13128                         (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
13129                         bp += 9;
13130                 }
13131         }
13132 
13133         (void) sprintf(bp, "\n\n");
13134         bp += strlen(bp);
13135 
13136         cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
13137 
13138         QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
13139 
13140         return (cnt);
13141 }
13142 
13143 /*
13144  * ql_2200_binary_fw_dump
13145  *
13146  * Input:
13147  *      ha:     adapter state pointer.
13148  *      fw:     firmware dump context pointer.
13149  *
13150  * Returns:
13151  *      ql local function return status code.
13152  *
13153  * Context:
13154  *      Interrupt or Kernel context, no mailbox commands allowed.
13155  */
13156 static int
13157 ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13158 {
13159         uint32_t        cnt;
13160         uint16_t        risc_address;
13161         clock_t         timer;
13162         mbx_cmd_t       mc;
13163         mbx_cmd_t       *mcp = &mc;
13164         int             rval = QL_SUCCESS;
13165 
13166         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13167 
13168         /* Disable ISP interrupts. */
13169         WRT16_IO_REG(ha, ictrl, 0);
13170         ADAPTER_STATE_LOCK(ha);
13171         ha->flags &= ~INTERRUPTS_ENABLED;
13172         ADAPTER_STATE_UNLOCK(ha);
13173 
13174         /* Release mailbox registers. */
13175         WRT16_IO_REG(ha, semaphore, 0);
13176 
13177         /* Pause RISC. */
13178         WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13179         timer = 30000;
13180         while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13181                 if (timer-- != 0) {
13182                         drv_usecwait(MILLISEC);
13183                 } else {
13184                         rval = QL_FUNCTION_TIMEOUT;
13185                         break;
13186                 }
13187         }
13188 
13189         if (rval == QL_SUCCESS) {
13190                 (void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13191                     sizeof (fw->pbiu_reg) / 2, 16);
13192 
 
13255                 /* Select frame buffer registers. */
13256                 WRT16_IO_REG(ha, ctrl_status, 0x10);
13257 
13258                 /* Reset frame buffer FIFOs. */
13259                 WRT16_IO_REG(ha, fb_cmd, 0xa000);
13260 
13261                 /* Select RISC module registers. */
13262                 WRT16_IO_REG(ha, ctrl_status, 0);
13263 
13264                 /* Reset RISC module. */
13265                 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13266 
13267                 /* Reset ISP semaphore. */
13268                 WRT16_IO_REG(ha, semaphore, 0);
13269 
13270                 /* Release RISC module. */
13271                 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13272 
13273                 /* Wait for RISC to recover from reset. */
13274                 timer = 30000;
13275                 while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_BUSY) {
13276                         if (timer-- != 0) {
13277                                 drv_usecwait(MILLISEC);
13278                         } else {
13279                                 rval = QL_FUNCTION_TIMEOUT;
13280                                 break;
13281                         }
13282                 }
13283 
13284                 /* Disable RISC pause on FPM parity error. */
13285                 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13286         }
13287 
13288         if (rval == QL_SUCCESS) {
13289                 /* Pause RISC. */
13290                 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13291                 timer = 30000;
13292                 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13293                         if (timer-- != 0) {
13294                                 drv_usecwait(MILLISEC);
13295                         } else {
 
13329                                                 break;
13330                                         }
13331                                         WRT16_IO_REG(ha, hccr,
13332                                             HC_CLR_RISC_INT);
13333                                 }
13334                                 drv_usecwait(5);
13335                         }
13336 
13337                         if (timer == 0) {
13338                                 rval = QL_FUNCTION_TIMEOUT;
13339                         } else {
13340                                 rval = mcp->mb[0];
13341                         }
13342 
13343                         if (rval != QL_SUCCESS) {
13344                                 break;
13345                         }
13346                 }
13347         }
13348 
13349         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13350 
13351         return (rval);
13352 }
13353 
13354 /*
13355  * ql_2300_binary_fw_dump
13356  *
13357  * Input:
13358  *      ha:     adapter state pointer.
13359  *      fw:     firmware dump context pointer.
13360  *
13361  * Returns:
13362  *      ql local function return status code.
13363  *
13364  * Context:
13365  *      Interrupt or Kernel context, no mailbox commands allowed.
13366  */
13367 static int
13368 ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13369 {
13370         clock_t timer;
13371         int     rval = QL_SUCCESS;
13372 
13373         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13374 
13375         /* Disable ISP interrupts. */
13376         WRT16_IO_REG(ha, ictrl, 0);
13377         ADAPTER_STATE_LOCK(ha);
13378         ha->flags &= ~INTERRUPTS_ENABLED;
13379         ADAPTER_STATE_UNLOCK(ha);
13380 
13381         /* Release mailbox registers. */
13382         WRT16_IO_REG(ha, semaphore, 0);
13383 
13384         /* Pause RISC. */
13385         WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13386         timer = 30000;
13387         while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13388                 if (timer-- != 0) {
13389                         drv_usecwait(MILLISEC);
13390                 } else {
13391                         rval = QL_FUNCTION_TIMEOUT;
13392                         break;
13393                 }
13394         }
13395 
13396         if (rval == QL_SUCCESS) {
13397                 (void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13398                     sizeof (fw->pbiu_reg) / 2, 16);
13399 
 
13468                 /* Select frame buffer registers. */
13469                 WRT16_IO_REG(ha, ctrl_status, 0x10);
13470 
13471                 /* Reset frame buffer FIFOs. */
13472                 WRT16_IO_REG(ha, fb_cmd, 0xa000);
13473 
13474                 /* Select RISC module registers. */
13475                 WRT16_IO_REG(ha, ctrl_status, 0);
13476 
13477                 /* Reset RISC module. */
13478                 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13479 
13480                 /* Reset ISP semaphore. */
13481                 WRT16_IO_REG(ha, semaphore, 0);
13482 
13483                 /* Release RISC module. */
13484                 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13485 
13486                 /* Wait for RISC to recover from reset. */
13487                 timer = 30000;
13488                 while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_BUSY) {
13489                         if (timer-- != 0) {
13490                                 drv_usecwait(MILLISEC);
13491                         } else {
13492                                 rval = QL_FUNCTION_TIMEOUT;
13493                                 break;
13494                         }
13495                 }
13496 
13497                 /* Disable RISC pause on FPM parity error. */
13498                 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13499         }
13500 
13501         /* Get RISC SRAM. */
13502         if (rval == QL_SUCCESS) {
13503                 rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
13504         }
13505         /* Get STACK SRAM. */
13506         if (rval == QL_SUCCESS) {
13507                 rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
13508         }
13509         /* Get DATA SRAM. */
13510         if (rval == QL_SUCCESS) {
13511                 rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
13512         }
13513 
13514         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13515 
13516         return (rval);
13517 }
13518 
13519 /*
13520  * ql_24xx_binary_fw_dump
13521  *
13522  * Input:
13523  *      ha:     adapter state pointer.
13524  *      fw:     firmware dump context pointer.
13525  *
13526  * Returns:
13527  *      ql local function return status code.
13528  *
13529  * Context:
13530  *      Interrupt or Kernel context, no mailbox commands allowed.
13531  */
13532 static int
13533 ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
13534 {
13535         uint32_t        *reg32;
13536         void            *bp;
13537         clock_t         timer;
13538         int             rval = QL_SUCCESS;
13539 
13540         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13541 
13542         fw->hccr = RD32_IO_REG(ha, hccr);
13543 
13544         /* Pause RISC. */
13545         if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
13546                 /* Disable ISP interrupts. */
13547                 WRT16_IO_REG(ha, ictrl, 0);
13548 
13549                 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13550                 for (timer = 30000;
13551                     (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
13552                     rval == QL_SUCCESS; timer--) {
13553                         if (timer) {
13554                                 drv_usecwait(100);
13555                         } else {
13556                                 rval = QL_FUNCTION_TIMEOUT;
13557                         }
13558                 }
13559         }
13560 
13561         if (rval == QL_SUCCESS) {
13562                 /* Host interface registers. */
13563                 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13564                     sizeof (fw->host_reg) / 4, 32);
13565 
13566                 /* Disable ISP interrupts. */
13567                 WRT32_IO_REG(ha, ictrl, 0);
13568                 RD32_IO_REG(ha, ictrl);
13569                 ADAPTER_STATE_LOCK(ha);
13570                 ha->flags &= ~INTERRUPTS_ENABLED;
13571                 ADAPTER_STATE_UNLOCK(ha);
13572 
13573                 /* Shadow registers. */
13574 
13575                 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13576                 RD32_IO_REG(ha, io_base_addr);
13577 
13578                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13579                 WRT_REG_DWORD(ha, reg32, 0xB0000000);
13580                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13581                 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13582 
13583                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13584                 WRT_REG_DWORD(ha, reg32, 0xB0100000);
13585                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13586                 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13587 
13588                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13589                 WRT_REG_DWORD(ha, reg32, 0xB0200000);
13590                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13591                 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
 
13848                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13849                 WRT32_IO_REG(ha, io_base_addr, 0x6040);
13850                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13851                 WRT32_IO_REG(ha, io_base_addr, 0x6100);
13852                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13853                 WRT32_IO_REG(ha, io_base_addr, 0x6130);
13854                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13855                 WRT32_IO_REG(ha, io_base_addr, 0x6150);
13856                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13857                 WRT32_IO_REG(ha, io_base_addr, 0x6170);
13858                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13859                 WRT32_IO_REG(ha, io_base_addr, 0x6190);
13860                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13861                 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
13862                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13863         }
13864 
13865         /* Get the request queue */
13866         if (rval == QL_SUCCESS) {
13867                 uint32_t        cnt;
13868                 uint32_t        *w32 = (uint32_t *)ha->request_ring_bp;
13869 
13870                 /* Sync DMA buffer. */
13871                 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
13872                     REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
13873                     DDI_DMA_SYNC_FORKERNEL);
13874 
13875                 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
13876                         fw->req_q[cnt] = *w32++;
13877                         LITTLE_ENDIAN_32(&fw->req_q[cnt]);
13878                 }
13879         }
13880 
13881         /* Get the response queue */
13882         if (rval == QL_SUCCESS) {
13883                 uint32_t        cnt;
13884                 uint32_t        *w32 = (uint32_t *)ha->response_ring_bp;
13885 
13886                 /* Sync DMA buffer. */
13887                 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
13888                     RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
13889                     DDI_DMA_SYNC_FORKERNEL);
13890 
13891                 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
13892                         fw->rsp_q[cnt] = *w32++;
13893                         LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
13894                 }
13895         }
13896 
13897         /* Reset RISC. */
13898         ql_reset_chip(ha);
13899 
13900         /* Memory. */
13901         if (rval == QL_SUCCESS) {
13902                 /* Code RAM. */
13903                 rval = ql_read_risc_ram(ha, 0x20000,
13904                     sizeof (fw->code_ram) / 4, fw->code_ram);
13905         }
13906         if (rval == QL_SUCCESS) {
13907                 /* External Memory. */
13908                 rval = ql_read_risc_ram(ha, 0x100000,
13909                     ha->fw_ext_memory_size / 4, fw->ext_mem);
 
13930         if (rval == QL_SUCCESS) {
13931                 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13932                     (ha->fwfcetracebuf.bp != NULL)) {
13933                         uint32_t        cnt;
13934                         uint32_t        *w32 = ha->fwfcetracebuf.bp;
13935 
13936                         /* Sync DMA buffer. */
13937                         (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
13938                             FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
13939 
13940                         for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13941                                 fw->fce_trace_buf[cnt] = *w32++;
13942                         }
13943                 }
13944         }
13945 
13946         if (rval != QL_SUCCESS) {
13947                 EL(ha, "failed=%xh\n", rval);
13948         } else {
13949                 /*EMPTY*/
13950                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13951         }
13952 
13953         return (rval);
13954 }
13955 
13956 /*
13957  * ql_25xx_binary_fw_dump
13958  *
13959  * Input:
13960  *      ha:     adapter state pointer.
13961  *      fw:     firmware dump context pointer.
13962  *
13963  * Returns:
13964  *      ql local function return status code.
13965  *
13966  * Context:
13967  *      Interrupt or Kernel context, no mailbox commands allowed.
13968  */
13969 static int
13970 ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
13971 {
13972         uint32_t        *reg32;
13973         void            *bp;
13974         clock_t         timer;
13975         int             rval = QL_SUCCESS;
13976 
13977         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13978 
13979         fw->r2h_status = RD32_IO_REG(ha, risc2host);
13980 
13981         /* Pause RISC. */
13982         if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
13983                 /* Disable ISP interrupts. */
13984                 WRT16_IO_REG(ha, ictrl, 0);
13985 
13986                 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13987                 for (timer = 30000;
13988                     (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
13989                     rval == QL_SUCCESS; timer--) {
13990                         if (timer) {
13991                                 drv_usecwait(100);
13992                                 if (timer % 10000 == 0) {
13993                                         EL(ha, "risc pause %d\n", timer);
13994                                 }
13995                         } else {
13996                                 EL(ha, "risc pause timeout\n");
13997                                 rval = QL_FUNCTION_TIMEOUT;
13998                         }
13999                 }
14000         }
14001 
14002         if (rval == QL_SUCCESS) {
14003 
14004                 /* Host Interface registers */
14005 
14006                 /* HostRisc registers. */
14007                 WRT32_IO_REG(ha, io_base_addr, 0x7000);
14008                 bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
14009                     16, 32);
14010                 WRT32_IO_REG(ha, io_base_addr, 0x7010);
14011                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14012 
14013                 /* PCIe registers. */
14014                 WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14015                 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14016                 bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14017                     3, 32);
14018                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14019                 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14020 
14021                 /* Host interface registers. */
14022                 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14023                     sizeof (fw->host_reg) / 4, 32);
14024 
14025                 /* Disable ISP interrupts. */
14026 
14027                 WRT32_IO_REG(ha, ictrl, 0);
14028                 RD32_IO_REG(ha, ictrl);
14029                 ADAPTER_STATE_LOCK(ha);
14030                 ha->flags &= ~INTERRUPTS_ENABLED;
14031                 ADAPTER_STATE_UNLOCK(ha);
14032 
14033                 /* Shadow registers. */
14034 
14035                 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14036                 RD32_IO_REG(ha, io_base_addr);
14037 
14038                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14039                 WRT_REG_DWORD(ha, reg32, 0xB0000000);
14040                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14041                 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14042 
14043                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14044                 WRT_REG_DWORD(ha, reg32, 0xB0100000);
14045                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14046                 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14047 
14048                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14049                 WRT_REG_DWORD(ha, reg32, 0xB0200000);
14050                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14051                 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14052 
 
14380                 WRT32_IO_REG(ha, io_base_addr, 0x6030);
14381                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14382                 WRT32_IO_REG(ha, io_base_addr, 0x6040);
14383                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14384                 WRT32_IO_REG(ha, io_base_addr, 0x6100);
14385                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14386                 WRT32_IO_REG(ha, io_base_addr, 0x6130);
14387                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14388                 WRT32_IO_REG(ha, io_base_addr, 0x6150);
14389                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14390                 WRT32_IO_REG(ha, io_base_addr, 0x6170);
14391                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14392                 WRT32_IO_REG(ha, io_base_addr, 0x6190);
14393                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14394                 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14395                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14396                 WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14397                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14398         }
14399 
14400         /* Get the request queue */
14401         if (rval == QL_SUCCESS) {
14402                 uint32_t        cnt;
14403                 uint32_t        *w32 = (uint32_t *)ha->request_ring_bp;
14404 
14405                 /* Sync DMA buffer. */
14406                 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14407                     REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14408                     DDI_DMA_SYNC_FORKERNEL);
14409 
14410                 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14411                         fw->req_q[cnt] = *w32++;
14412                         LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14413                 }
14414         }
14415 
14416         /* Get the respons queue */
14417         if (rval == QL_SUCCESS) {
14418                 uint32_t        cnt;
14419                 uint32_t        *w32 = (uint32_t *)ha->response_ring_bp;
14420 
14421                 /* Sync DMA buffer. */
14422                 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14423                     RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14424                     DDI_DMA_SYNC_FORKERNEL);
14425 
14426                 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14427                         fw->rsp_q[cnt] = *w32++;
14428                         LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14429                 }
14430         }
14431 
14432         /* Reset RISC. */
14433 
14434         ql_reset_chip(ha);
14435 
14436         /* Memory. */
14437 
14438         if (rval == QL_SUCCESS) {
14439                 /* Code RAM. */
14440                 rval = ql_read_risc_ram(ha, 0x20000,
14441                     sizeof (fw->code_ram) / 4, fw->code_ram);
14442         }
14443         if (rval == QL_SUCCESS) {
14444                 /* External Memory. */
14445                 rval = ql_read_risc_ram(ha, 0x100000,
14446                     ha->fw_ext_memory_size / 4, fw->ext_mem);
14447         }
14448 
14449         /* Get the FC event trace buffer */
14450         if (rval == QL_SUCCESS) {
14451                 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14452                     (ha->fwfcetracebuf.bp != NULL)) {
14453                         uint32_t        cnt;
14454                         uint32_t        *w32 = ha->fwfcetracebuf.bp;
14455 
14456                         /* Sync DMA buffer. */
14457                         (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14458                             FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14459 
14460                         for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14461                                 fw->fce_trace_buf[cnt] = *w32++;
14462                         }
14463                 }
14464         }
14465 
14466         /* Get the extended trace buffer */
14467         if (rval == QL_SUCCESS) {
14468                 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14469                     (ha->fwexttracebuf.bp != NULL)) {
14470                         uint32_t        cnt;
14471                         uint32_t        *w32 = ha->fwexttracebuf.bp;
14472 
14473                         /* Sync DMA buffer. */
14474                         (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14475                             FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14476 
14477                         for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14478                                 fw->ext_trace_buf[cnt] = *w32++;
14479                         }
14480                 }
14481         }
14482 
14483         if (rval != QL_SUCCESS) {
14484                 EL(ha, "failed=%xh\n", rval);
14485         } else {
14486                 /*EMPTY*/
14487                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14488         }
14489 
14490         return (rval);
14491 }
14492 
14493 /*
14494  * ql_81xx_binary_fw_dump
14495  *
14496  * Input:
14497  *      ha:     adapter state pointer.
14498  *      fw:     firmware dump context pointer.
14499  *
14500  * Returns:
14501  *      ql local function return status code.
14502  *
14503  * Context:
14504  *      Interrupt or Kernel context, no mailbox commands allowed.
14505  */
14506 static int
14507 ql_81xx_binary_fw_dump(ql_adapter_state_t *ha, ql_81xx_fw_dump_t *fw)
14508 {
14509         uint32_t        *reg32;
14510         void            *bp;
14511         clock_t         timer;
14512         int             rval = QL_SUCCESS;
14513 
14514         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14515 
14516         fw->r2h_status = RD32_IO_REG(ha, risc2host);
14517 
14518         /* Pause RISC. */
14519         if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
14520                 /* Disable ISP interrupts. */
14521                 WRT16_IO_REG(ha, ictrl, 0);
14522 
14523                 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
14524                 for (timer = 30000;
14525                     (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
14526                     rval == QL_SUCCESS; timer--) {
14527                         if (timer) {
14528                                 drv_usecwait(100);
14529                                 if (timer % 10000 == 0) {
14530                                         EL(ha, "risc pause %d\n", timer);
14531                                 }
14532                         } else {
14533                                 EL(ha, "risc pause timeout\n");
14534                                 rval = QL_FUNCTION_TIMEOUT;
14535                         }
14536                 }
14537         }
14538 
14539         if (rval == QL_SUCCESS) {
14540 
14541                 /* Host Interface registers */
14542 
14543                 /* HostRisc registers. */
14544                 WRT32_IO_REG(ha, io_base_addr, 0x7000);
14545                 bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
14546                     16, 32);
14547                 WRT32_IO_REG(ha, io_base_addr, 0x7010);
14548                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14549 
14550                 /* PCIe registers. */
14551                 WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14552                 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14553                 bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14554                     3, 32);
14555                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14556                 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14557 
14558                 /* Host interface registers. */
14559                 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14560                     sizeof (fw->host_reg) / 4, 32);
14561 
14562                 /* Disable ISP interrupts. */
14563 
14564                 WRT32_IO_REG(ha, ictrl, 0);
14565                 RD32_IO_REG(ha, ictrl);
14566                 ADAPTER_STATE_LOCK(ha);
14567                 ha->flags &= ~INTERRUPTS_ENABLED;
14568                 ADAPTER_STATE_UNLOCK(ha);
14569 
14570                 /* Shadow registers. */
14571 
14572                 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14573                 RD32_IO_REG(ha, io_base_addr);
14574 
14575                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14576                 WRT_REG_DWORD(ha, reg32, 0xB0000000);
14577                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14578                 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14579 
14580                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14581                 WRT_REG_DWORD(ha, reg32, 0xB0100000);
14582                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14583                 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14584 
14585                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14586                 WRT_REG_DWORD(ha, reg32, 0xB0200000);
14587                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14588                 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14589 
 
14923                 WRT32_IO_REG(ha, io_base_addr, 0x6040);
14924                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14925                 WRT32_IO_REG(ha, io_base_addr, 0x6100);
14926                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14927                 WRT32_IO_REG(ha, io_base_addr, 0x6130);
14928                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14929                 WRT32_IO_REG(ha, io_base_addr, 0x6150);
14930                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14931                 WRT32_IO_REG(ha, io_base_addr, 0x6170);
14932                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14933                 WRT32_IO_REG(ha, io_base_addr, 0x6190);
14934                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14935                 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14936                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14937                 WRT32_IO_REG(ha, io_base_addr, 0x61C0);
14938                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14939                 WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14940                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14941         }
14942 
14943         /* Get the request queue */
14944         if (rval == QL_SUCCESS) {
14945                 uint32_t        cnt;
14946                 uint32_t        *w32 = (uint32_t *)ha->request_ring_bp;
14947 
14948                 /* Sync DMA buffer. */
14949                 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14950                     REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14951                     DDI_DMA_SYNC_FORKERNEL);
14952 
14953                 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14954                         fw->req_q[cnt] = *w32++;
14955                         LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14956                 }
14957         }
14958 
14959         /* Get the response queue */
14960         if (rval == QL_SUCCESS) {
14961                 uint32_t        cnt;
14962                 uint32_t        *w32 = (uint32_t *)ha->response_ring_bp;
14963 
14964                 /* Sync DMA buffer. */
14965                 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14966                     RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14967                     DDI_DMA_SYNC_FORKERNEL);
14968 
14969                 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14970                         fw->rsp_q[cnt] = *w32++;
14971                         LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14972                 }
14973         }
14974 
14975         /* Reset RISC. */
14976 
14977         ql_reset_chip(ha);
14978 
14979         /* Memory. */
14980 
14981         if (rval == QL_SUCCESS) {
14982                 /* Code RAM. */
14983                 rval = ql_read_risc_ram(ha, 0x20000,
14984                     sizeof (fw->code_ram) / 4, fw->code_ram);
14985         }
14986         if (rval == QL_SUCCESS) {
14987                 /* External Memory. */
14988                 rval = ql_read_risc_ram(ha, 0x100000,
14989                     ha->fw_ext_memory_size / 4, fw->ext_mem);
14990         }
14991 
14992         /* Get the FC event trace buffer */
14993         if (rval == QL_SUCCESS) {
14994                 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14995                     (ha->fwfcetracebuf.bp != NULL)) {
14996                         uint32_t        cnt;
14997                         uint32_t        *w32 = ha->fwfcetracebuf.bp;
14998 
14999                         /* Sync DMA buffer. */
15000                         (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
15001                             FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
15002 
15003                         for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
15004                                 fw->fce_trace_buf[cnt] = *w32++;
15005                         }
15006                 }
15007         }
15008 
15009         /* Get the extended trace buffer */
15010         if (rval == QL_SUCCESS) {
15011                 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
15012                     (ha->fwexttracebuf.bp != NULL)) {
15013                         uint32_t        cnt;
15014                         uint32_t        *w32 = ha->fwexttracebuf.bp;
15015 
15016                         /* Sync DMA buffer. */
15017                         (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
15018                             FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
15019 
15020                         for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
15021                                 fw->ext_trace_buf[cnt] = *w32++;
15022                         }
15023                 }
15024         }
15025 
15026         if (rval != QL_SUCCESS) {
15027                 EL(ha, "failed=%xh\n", rval);
15028         } else {
15029                 /*EMPTY*/
15030                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15031         }
15032 
15033         return (rval);
15034 }
15035 
15036 /*
15037  * ql_read_risc_ram
15038  *      Reads RISC RAM one word at a time.
15039  *      Risc interrupts must be disabled when this routine is called.
15040  *
15041  * Input:
15042  *      ha:     adapter state pointer.
15043  *      risc_address:   RISC code start address.
15044  *      len:            Number of words.
15045  *      buf:            buffer pointer.
15046  *
15047  * Returns:
15048  *      ql local function return status code.
15049  *
15050  * Context:
15051  *      Interrupt or Kernel context, no mailbox commands allowed.
15052  */
15053 static int
15054 ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
15055     void *buf)
15056 {
15057         uint32_t        cnt;
15058         uint16_t        stat;
15059         clock_t         timer;
15060         uint16_t        *buf16 = (uint16_t *)buf;
15061         uint32_t        *buf32 = (uint32_t *)buf;
15062         int             rval = QL_SUCCESS;
15063 
15064         for (cnt = 0; cnt < len; cnt++, risc_address++) {
15065                 WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_EXTENDED);
15066                 WRT16_IO_REG(ha, mailbox_in[1], LSW(risc_address));
15067                 WRT16_IO_REG(ha, mailbox_in[8], MSW(risc_address));
15068                 if (CFG_IST(ha, CFG_CTRL_8021)) {
15069                         WRT32_IO_REG(ha, nx_host_int, NX_MBX_CMD);
15070                 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
15071                         WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
15072                 } else {
15073                         WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
15074                 }
15075                 for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
15076                         if (INTERRUPT_PENDING(ha)) {
15077                                 stat = (uint16_t)
15078                                     (RD16_IO_REG(ha, risc2host) & 0xff);
15079                                 if ((stat == 1) || (stat == 0x10)) {
15080                                         if (CFG_IST(ha, CFG_CTRL_24258081)) {
15081                                                 buf32[cnt] = SHORT_TO_LONG(
15082                                                     RD16_IO_REG(ha,
15083                                                     mailbox_out[2]),
15084                                                     RD16_IO_REG(ha,
15085                                                     mailbox_out[3]));
15086                                         } else {
15087                                                 buf16[cnt] =
15088                                                     RD16_IO_REG(ha,
15089                                                     mailbox_out[2]);
15090                                         }
15091 
15092                                         break;
15093                                 } else if ((stat == 2) || (stat == 0x11)) {
15094                                         rval = RD16_IO_REG(ha, mailbox_out[0]);
15095                                         break;
15096                                 }
15097                                 if (CFG_IST(ha, CFG_CTRL_8021)) {
15098                                         ql_8021_clr_hw_intr(ha);
15099                                         ql_8021_clr_fw_intr(ha);
15100                                 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
15101                                         WRT32_IO_REG(ha, hccr,
15102                                             HC24_CLR_RISC_INT);
15103                                         RD32_IO_REG(ha, hccr);
15104                                 } else {
15105                                         WRT16_IO_REG(ha, hccr,
15106                                             HC_CLR_RISC_INT);
15107                                 }
15108                         }
15109                         drv_usecwait(5);
15110                 }
15111                 if (CFG_IST(ha, CFG_CTRL_8021)) {
15112                         ql_8021_clr_hw_intr(ha);
15113                         ql_8021_clr_fw_intr(ha);
15114                 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
15115                         WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
15116                         RD32_IO_REG(ha, hccr);
15117                 } else {
15118                         WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
15119                         WRT16_IO_REG(ha, semaphore, 0);
15120                 }
15121 
15122                 if (timer == 0) {
15123                         rval = QL_FUNCTION_TIMEOUT;
15124                 }
15125         }
15126 
15127         return (rval);
15128 }
15129 
15130 /*
15131  * ql_read_regs
15132  *      Reads adapter registers to buffer.
15133  *
15134  * Input:
15135  *      ha:     adapter state pointer.
15136  *      buf:    buffer pointer.
15137  *      reg:    start address.
15138  *      count:  number of registers.
15139  *      wds:    register size.
 
15170                 while (count--) {
15171                         *bp8++ = RD_REG_BYTE(ha, reg8++);
15172                 }
15173                 return (bp8);
15174         default:
15175                 EL(ha, "Unknown word size=%d\n", wds);
15176                 return (buf);
15177         }
15178 }
15179 
15180 static int
15181 ql_save_config_regs(dev_info_t *dip)
15182 {
15183         ql_adapter_state_t      *ha;
15184         int                     ret;
15185         ql_config_space_t       chs;
15186         caddr_t                 prop = "ql-config-space";
15187 
15188         ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
15189         if (ha == NULL) {
15190                 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
15191                     ddi_get_instance(dip));
15192                 return (DDI_FAILURE);
15193         }
15194 
15195         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15196 
15197         /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
15198         if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
15199             1) {
15200                 QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
15201                 return (DDI_SUCCESS);
15202         }
15203 
15204         chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
15205         chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
15206             PCI_CONF_HEADER);
15207         if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15208                 chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
15209                     PCI_BCNF_BCNTRL);
15210         }
15211 
15212         chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
15213             PCI_CONF_CACHE_LINESZ);
15214 
15215         chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
15216             PCI_CONF_LATENCY_TIMER);
15217 
15218         if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15219                 chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
15220                     PCI_BCNF_LATENCY_TIMER);
15221         }
15222 
15223         chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
15224         chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
15225         chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
15226         chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
15227         chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
15228         chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
15229 
15230         /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15231         ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
15232             (uchar_t *)&chs, sizeof (ql_config_space_t));
15233 
15234         if (ret != DDI_PROP_SUCCESS) {
15235                 cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
15236                     QL_NAME, ddi_get_instance(dip), prop);
15237                 return (DDI_FAILURE);
15238         }
15239 
15240         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15241 
15242         return (DDI_SUCCESS);
15243 }
15244 
15245 static int
15246 ql_restore_config_regs(dev_info_t *dip)
15247 {
15248         ql_adapter_state_t      *ha;
15249         uint_t                  elements;
15250         ql_config_space_t       *chs_p;
15251         caddr_t                 prop = "ql-config-space";
15252 
15253         ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
15254         if (ha == NULL) {
15255                 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
15256                     ddi_get_instance(dip));
15257                 return (DDI_FAILURE);
15258         }
15259 
15260         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15261 
15262         /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
15263         if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
15264             DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
15265             (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
15266                 QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
15267                 return (DDI_FAILURE);
15268         }
15269 
15270         ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
15271 
15272         if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15273                 ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
15274                     chs_p->chs_bridge_control);
15275         }
15276 
15277         ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
15278             chs_p->chs_cache_line_size);
15279 
15280         ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
15281             chs_p->chs_latency_timer);
15282 
15283         if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15284                 ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
15285                     chs_p->chs_sec_latency_timer);
15286         }
15287 
15288         ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
15289         ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
15290         ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
15291         ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
15292         ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
15293         ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
15294 
15295         ddi_prop_free(chs_p);
15296 
15297         /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15298         if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
15299                 cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
15300                     QL_NAME, ddi_get_instance(dip), prop);
15301         }
15302 
15303         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15304 
15305         return (DDI_SUCCESS);
15306 }
15307 
15308 uint8_t
15309 ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
15310 {
15311         if (CFG_IST(ha, CFG_SBUS_CARD)) {
15312                 return (ddi_get8(ha->sbus_config_handle,
15313                     (uint8_t *)(ha->sbus_config_base + off)));
15314         }
15315 
15316 #ifdef KERNEL_32
15317         return (pci_config_getb(ha->pci_handle, off));
15318 #else
15319         return (pci_config_get8(ha->pci_handle, off));
15320 #endif
15321 }
15322 
15323 uint16_t
 
15394 #endif
15395         }
15396 }
15397 
15398 /*
15399  * ql_halt
15400  *      Waits for commands that are running to finish and
15401  *      if they do not, commands are aborted.
15402  *      Finally the adapter is reset.
15403  *
15404  * Input:
15405  *      ha:     adapter state pointer.
15406  *      pwr:    power state.
15407  *
15408  * Context:
15409  *      Kernel context.
15410  */
15411 static void
15412 ql_halt(ql_adapter_state_t *ha, int pwr)
15413 {
15414         uint32_t        cnt;
15415         ql_tgt_t        *tq;
15416         ql_srb_t        *sp;
15417         uint16_t        index;
15418         ql_link_t       *link;
15419 
15420         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15421 
15422         /* Wait for all commands running to finish. */
15423         for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
15424                 for (link = ha->dev[index].first; link != NULL;
15425                     link = link->next) {
15426                         tq = link->base_address;
15427                         (void) ql_abort_device(ha, tq, 0);
15428 
15429                         /* Wait for 30 seconds for commands to finish. */
15430                         for (cnt = 3000; cnt != 0; cnt--) {
15431                                 /* Acquire device queue lock. */
15432                                 DEVICE_QUEUE_LOCK(tq);
15433                                 if (tq->outcnt == 0) {
15434                                         /* Release device queue lock. */
15435                                         DEVICE_QUEUE_UNLOCK(tq);
15436                                         break;
15437                                 } else {
15438                                         /* Release device queue lock. */
15439                                         DEVICE_QUEUE_UNLOCK(tq);
15440                                         ql_delay(ha, 10000);
15441                                 }
15442                         }
15443 
15444                         /* Finish any commands waiting for more status. */
15445                         if (ha->status_srb != NULL) {
15446                                 sp = ha->status_srb;
15447                                 ha->status_srb = NULL;
15448                                 sp->cmd.next = NULL;
15449                                 ql_done(&sp->cmd);
15450                         }
15451 
15452                         /* Abort commands that did not finish. */
15453                         if (cnt == 0) {
15454                                 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS;
15455                                     cnt++) {
15456                                         if (ha->pending_cmds.first != NULL) {
15457                                                 ql_start_iocb(ha, NULL);
15458                                                 cnt = 1;
15459                                         }
15460                                         sp = ha->outstanding_cmds[cnt];
15461                                         if (sp != NULL &&
15462                                             sp->lun_queue->target_queue ==
15463                                             tq) {
15464                                                 (void) ql_abort((opaque_t)ha,
15465                                                     sp->pkt, 0);
15466                                         }
15467                                 }
15468                         }
15469                 }
15470         }
15471 
15472         /* Shutdown IP. */
15473         if (ha->flags & IP_INITIALIZED) {
15474                 (void) ql_shutdown_ip(ha);
15475         }
15476 
15477         /* Stop all timers. */
15478         ADAPTER_STATE_LOCK(ha);
15479         ha->port_retry_timer = 0;
15480         ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
15481         ha->watchdog_timer = 0;
15482         ADAPTER_STATE_UNLOCK(ha);
15483 
15484         if (pwr == PM_LEVEL_D3) {
15485                 ADAPTER_STATE_LOCK(ha);
15486                 ha->flags &= ~ONLINE;
15487                 ADAPTER_STATE_UNLOCK(ha);
15488 
15489                 /* Reset ISP chip. */
15490                 ql_reset_chip(ha);
15491         }
15492 
15493         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15494 }
15495 
15496 /*
15497  * ql_get_dma_mem
15498  *      Function used to allocate dma memory.
15499  *
15500  * Input:
15501  *      ha:                     adapter state pointer.
15502  *      mem:                    pointer to dma memory object.
15503  *      size:                   size of the request in bytes
15504  *
15505  * Returns:
15506  *      qn local function return status code.
15507  *
15508  * Context:
15509  *      Kernel context.
15510  */
15511 int
15512 ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
15513     mem_alloc_type_t allocation_type, mem_alignment_t alignment)
15514 {
15515         int     rval;
15516 
15517         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15518 
15519         mem->size = size;
15520         mem->type = allocation_type;
15521         mem->cookie_count = 1;
15522 
15523         switch (alignment) {
15524         case QL_DMA_DATA_ALIGN:
15525                 mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
15526                 break;
15527         case QL_DMA_RING_ALIGN:
15528                 mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
15529                 break;
15530         default:
15531                 EL(ha, "failed, unknown alignment type %x\n", alignment);
15532                 break;
15533         }
15534 
15535         if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
15536                 ql_free_phys(ha, mem);
15537                 EL(ha, "failed, alloc_phys=%xh\n", rval);
15538         }
15539 
15540         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15541 
15542         return (rval);
15543 }
15544 
15545 /*
15546  * ql_alloc_phys
15547  *      Function used to allocate memory and zero it.
15548  *      Memory is below 4 GB.
15549  *
15550  * Input:
15551  *      ha:                     adapter state pointer.
15552  *      mem:                    pointer to dma memory object.
15553  *      sleep:                  KM_SLEEP/KM_NOSLEEP flag.
15554  *      mem->cookie_count    number of segments allowed.
15555  *      mem->type            memory allocation type.
15556  *      mem->size            memory size.
15557  *      mem->alignment               memory alignment.
15558  *
15559  * Returns:
15560  *      qn local function return status code.
15561  *
15562  * Context:
15563  *      Kernel context.
15564  */
15565 int
15566 ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15567 {
15568         size_t                  rlen;
15569         ddi_dma_attr_t          dma_attr;
15570         ddi_device_acc_attr_t   acc_attr = ql_dev_acc_attr;
15571 
15572         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15573 
15574         dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15575             ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15576 
15577         dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
15578         dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15579 
15580         /*
15581          * Workaround for SUN XMITS buffer must end and start on 8 byte
15582          * boundary. Else, hardware will overrun the buffer. Simple fix is
15583          * to make sure buffer has enough room for overrun.
15584          */
15585         if (mem->size & 7) {
15586                 mem->size += 8 - (mem->size & 7);
15587         }
15588 
15589         mem->flags = DDI_DMA_CONSISTENT;
15590 
15591         /*
15592          * Allocate DMA memory for command.
15593          */
15594         if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15595             DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15596             DDI_SUCCESS) {
15597                 EL(ha, "failed, ddi_dma_alloc_handle\n");
15598                 mem->dma_handle = NULL;
 
15630                         mem->acc_handle = NULL;
15631                         mem->bp = NULL;
15632                 }
15633                 break;
15634         default:
15635                 EL(ha, "failed, unknown type=%xh\n", mem->type);
15636                 mem->acc_handle = NULL;
15637                 mem->bp = NULL;
15638                 break;
15639         }
15640 
15641         if (mem->bp == NULL) {
15642                 EL(ha, "failed, ddi_dma_mem_alloc\n");
15643                 ddi_dma_free_handle(&mem->dma_handle);
15644                 mem->dma_handle = NULL;
15645                 return (QL_MEMORY_ALLOC_FAILED);
15646         }
15647 
15648         mem->flags |= DDI_DMA_RDWR;
15649 
15650         if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15651                 EL(ha, "failed, ddi_dma_addr_bind_handle\n");
15652                 ql_free_phys(ha, mem);
15653                 return (QL_MEMORY_ALLOC_FAILED);
15654         }
15655 
15656         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15657 
15658         return (QL_SUCCESS);
15659 }
15660 
15661 /*
15662  * ql_free_phys
15663  *      Function used to free physical memory.
15664  *
15665  * Input:
15666  *      ha:     adapter state pointer.
15667  *      mem:    pointer to dma memory object.
15668  *
15669  * Context:
15670  *      Kernel context.
15671  */
15672 void
15673 ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
15674 {
15675         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15676 
15677         if (mem != NULL && mem->dma_handle != NULL) {
15678                 ql_unbind_dma_buffer(ha, mem);
15679                 switch (mem->type) {
15680                 case KERNEL_MEM:
15681                         if (mem->bp != NULL) {
15682                                 kmem_free(mem->bp, mem->size);
15683                         }
15684                         break;
15685                 case LITTLE_ENDIAN_DMA:
15686                 case BIG_ENDIAN_DMA:
15687                 case NO_SWAP_DMA:
15688                         if (mem->acc_handle != NULL) {
15689                                 ddi_dma_mem_free(&mem->acc_handle);
15690                                 mem->acc_handle = NULL;
15691                         }
15692                         break;
15693                 default:
15694                         break;
15695                 }
15696                 mem->bp = NULL;
15697                 ddi_dma_free_handle(&mem->dma_handle);
15698                 mem->dma_handle = NULL;
15699         }
15700 
15701         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15702 }
15703 
15704 /*
15705  * ql_alloc_dma_resouce.
15706  *      Allocates DMA resource for buffer.
15707  *
15708  * Input:
15709  *      ha:                     adapter state pointer.
15710  *      mem:                    pointer to dma memory object.
15711  *      sleep:                  KM_SLEEP/KM_NOSLEEP flag.
15712  *      mem->cookie_count    number of segments allowed.
15713  *      mem->type            memory allocation type.
15714  *      mem->size            memory size.
15715  *      mem->bp                      pointer to memory or struct buf
15716  *
15717  * Returns:
15718  *      qn local function return status code.
15719  *
15720  * Context:
15721  *      Kernel context.
15722  */
15723 int
15724 ql_alloc_dma_resouce(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15725 {
15726         ddi_dma_attr_t  dma_attr;
15727 
15728         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15729 
15730         dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15731             ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15732         dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15733 
15734         /*
15735          * Allocate DMA handle for command.
15736          */
15737         if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15738             DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15739             DDI_SUCCESS) {
15740                 EL(ha, "failed, ddi_dma_alloc_handle\n");
15741                 mem->dma_handle = NULL;
15742                 return (QL_MEMORY_ALLOC_FAILED);
15743         }
15744 
15745         mem->flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
15746 
15747         if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15748                 EL(ha, "failed, bind_dma_buffer\n");
15749                 ddi_dma_free_handle(&mem->dma_handle);
15750                 mem->dma_handle = NULL;
15751                 return (QL_MEMORY_ALLOC_FAILED);
15752         }
15753 
15754         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15755 
15756         return (QL_SUCCESS);
15757 }
15758 
15759 /*
15760  * ql_free_dma_resource
15761  *      Frees DMA resources.
15762  *
15763  * Input:
15764  *      ha:             adapter state pointer.
15765  *      mem:            pointer to dma memory object.
15766  *      mem->dma_handle      DMA memory handle.
15767  *
15768  * Context:
15769  *      Kernel context.
15770  */
15771 void
15772 ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
15773 {
15774         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15775 
15776         ql_free_phys(ha, mem);
15777 
15778         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15779 }
15780 
15781 /*
15782  * ql_bind_dma_buffer
15783  *      Binds DMA buffer.
15784  *
15785  * Input:
15786  *      ha:                     adapter state pointer.
15787  *      mem:                    pointer to dma memory object.
15788  *      sleep:                  KM_SLEEP or KM_NOSLEEP.
15789  *      mem->dma_handle              DMA memory handle.
15790  *      mem->cookie_count    number of segments allowed.
15791  *      mem->type            memory allocation type.
15792  *      mem->size            memory size.
15793  *      mem->bp                      pointer to memory or struct buf
15794  *
15795  * Returns:
15796  *      mem->cookies         pointer to list of cookies.
15797  *      mem->cookie_count    number of cookies.
15798  *      status                  success = DDI_DMA_MAPPED
15799  *                              DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
15800  *                              DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
15801  *                              DDI_DMA_TOOBIG
15802  *
15803  * Context:
15804  *      Kernel context.
15805  */
15806 static int
15807 ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15808 {
15809         int                     rval;
15810         ddi_dma_cookie_t        *cookiep;
15811         uint32_t                cnt = mem->cookie_count;
15812 
15813         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15814 
15815         if (mem->type == STRUCT_BUF_MEMORY) {
15816                 rval = ddi_dma_buf_bind_handle(mem->dma_handle, mem->bp,
15817                     mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15818                     DDI_DMA_DONTWAIT, NULL, &mem->cookie, &mem->cookie_count);
15819         } else {
15820                 rval = ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
15821                     mem->size, mem->flags, (sleep == KM_SLEEP) ?
15822                     DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
15823                     &mem->cookie_count);
15824         }
15825 
15826         if (rval == DDI_DMA_MAPPED) {
15827                 if (mem->cookie_count > cnt) {
15828                         (void) ddi_dma_unbind_handle(mem->dma_handle);
15829                         EL(ha, "failed, cookie_count %d > %d\n",
15830                             mem->cookie_count, cnt);
15831                         rval = DDI_DMA_TOOBIG;
15832                 } else {
15833                         if (mem->cookie_count > 1) {
15834                                 if (mem->cookies = kmem_zalloc(
15835                                     sizeof (ddi_dma_cookie_t) *
15836                                     mem->cookie_count, sleep)) {
15837                                         *mem->cookies = mem->cookie;
15838                                         cookiep = mem->cookies;
15839                                         for (cnt = 1; cnt < mem->cookie_count;
15840                                             cnt++) {
15841                                                 ddi_dma_nextcookie(
15842                                                     mem->dma_handle,
15843                                                     ++cookiep);
15844                                         }
15845                                 } else {
15846                                         (void) ddi_dma_unbind_handle(
15847                                             mem->dma_handle);
15848                                         EL(ha, "failed, kmem_zalloc\n");
15849                                         rval = DDI_DMA_NORESOURCES;
15850                                 }
15851                         } else {
15852                                 /*
15853                                  * It has been reported that dmac_size at times
15854                                  * may be incorrect on sparc machines so for
15855                                  * sparc machines that only have one segment
15856                                  * use the buffer size instead.
15857                                  */
15858                                 mem->cookies = &mem->cookie;
15859                                 mem->cookies->dmac_size = mem->size;
15860                         }
15861                 }
15862         }
15863 
15864         if (rval != DDI_DMA_MAPPED) {
15865                 EL(ha, "failed=%xh\n", rval);
15866         } else {
15867                 /*EMPTY*/
15868                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15869         }
15870 
15871         return (rval);
15872 }
15873 
15874 /*
15875  * ql_unbind_dma_buffer
15876  *      Unbinds DMA buffer.
15877  *
15878  * Input:
15879  *      ha:                     adapter state pointer.
15880  *      mem:                    pointer to dma memory object.
15881  *      mem->dma_handle              DMA memory handle.
15882  *      mem->cookies         pointer to cookie list.
15883  *      mem->cookie_count    number of cookies.
15884  *
15885  * Context:
15886  *      Kernel context.
15887  */
15888 /* ARGSUSED */
15889 static void
15890 ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
15891 {
15892         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15893 
15894         (void) ddi_dma_unbind_handle(mem->dma_handle);
15895         if (mem->cookie_count > 1) {
15896                 kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
15897                     mem->cookie_count);
15898                 mem->cookies = NULL;
15899         }
15900         mem->cookie_count = 0;
15901 
15902         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15903 }
15904 
15905 static int
15906 ql_suspend_adapter(ql_adapter_state_t *ha)
15907 {
15908         clock_t timer = 32 * drv_usectohz(1000000);
15909 
15910         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15911 
15912         /*
15913          * First we will claim mbox ownership so that no
15914          * thread using mbox hangs when we disable the
15915          * interrupt in the middle of it.
15916          */
15917         MBX_REGISTER_LOCK(ha);
15918 
15919         /* Check for mailbox available, if not wait for signal. */
15920         while (ha->mailbox_flags & MBX_BUSY_FLG) {
15921                 ha->mailbox_flags = (uint8_t)
15922                     (ha->mailbox_flags | MBX_WANT_FLG);
15923 
15924                 /* 30 seconds from now */
15925                 if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
15926                     timer, TR_CLOCK_TICK) == -1) {
15927 
15928                         /* Release mailbox register lock. */
15929                         MBX_REGISTER_UNLOCK(ha);
15930                         EL(ha, "failed, Suspend mbox");
15931                         return (QL_FUNCTION_TIMEOUT);
15932                 }
15933         }
15934 
15935         /* Set busy flag. */
15936         ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
15937         MBX_REGISTER_UNLOCK(ha);
15938 
15939         (void) ql_wait_outstanding(ha);
15940 
15941         /*
15942          * here we are sure that there will not be any mbox interrupt.
15943          * So, let's make sure that we return back all the outstanding
15944          * cmds as well as internally queued commands.
15945          */
15946         ql_halt(ha, PM_LEVEL_D0);
15947 
15948         if (ha->power_level != PM_LEVEL_D3) {
15949                 /* Disable ISP interrupts. */
15950                 WRT16_IO_REG(ha, ictrl, 0);
15951         }
15952 
15953         ADAPTER_STATE_LOCK(ha);
15954         ha->flags &= ~INTERRUPTS_ENABLED;
15955         ADAPTER_STATE_UNLOCK(ha);
15956 
15957         MBX_REGISTER_LOCK(ha);
15958         /* Reset busy status. */
15959         ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
15960 
15961         /* If thread is waiting for mailbox go signal it to start. */
15962         if (ha->mailbox_flags & MBX_WANT_FLG) {
15963                 ha->mailbox_flags = (uint8_t)
15964                     (ha->mailbox_flags & ~MBX_WANT_FLG);
15965                 cv_broadcast(&ha->cv_mbx_wait);
15966         }
15967         /* Release mailbox register lock. */
15968         MBX_REGISTER_UNLOCK(ha);
15969 
15970         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15971 
15972         return (QL_SUCCESS);
15973 }
15974 
15975 /*
15976  * ql_add_link_b
15977  *      Add link to the end of the chain.
15978  *
15979  * Input:
15980  *      head = Head of link list.
15981  *      link = link to be added.
15982  *      LOCK must be already obtained.
15983  *
15984  * Context:
15985  *      Interrupt or Kernel context, no mailbox commands allowed.
15986  */
15987 void
15988 ql_add_link_b(ql_head_t *head, ql_link_t *link)
15989 {
15990         /* at the end there isn't a next */
15991         link->next = NULL;
15992 
15993         if ((link->prev = head->last) == NULL) {
15994                 head->first = link;
15995         } else {
15996                 head->last->next = link;
15997         }
15998 
15999         head->last = link;
16000         link->head = head;   /* the queue we're on */
16001 }
16002 
16003 /*
16004  * ql_add_link_t
16005  *      Add link to the beginning of the chain.
16006  *
16007  * Input:
16008  *      head = Head of link list.
16009  *      link = link to be added.
16010  *      LOCK must be already obtained.
16011  *
16012  * Context:
16013  *      Interrupt or Kernel context, no mailbox commands allowed.
16014  */
16015 void
16016 ql_add_link_t(ql_head_t *head, ql_link_t *link)
16017 {
16018         link->prev = NULL;
16019 
16020         if ((link->next = head->first) == NULL)   {
16021                 head->last = link;
16022         } else {
16023                 head->first->prev = link;
16024         }
16025 
16026         head->first = link;
16027         link->head = head;   /* the queue we're on */
16028 }
16029 
16030 /*
16031  * ql_remove_link
16032  *      Remove a link from the chain.
16033  *
16034  * Input:
16035  *      head = Head of link list.
16036  *      link = link to be removed.
16037  *      LOCK must be already obtained.
16038  *
16039  * Context:
16040  *      Interrupt or Kernel context, no mailbox commands allowed.
16041  */
16042 void
16043 ql_remove_link(ql_head_t *head, ql_link_t *link)
16044 {
16045         if (link->prev != NULL) {
16046                 if ((link->prev->next = link->next) == NULL) {
16047                         head->last = link->prev;
16048                 } else {
16049                         link->next->prev = link->prev;
16050                 }
16051         } else if ((head->first = link->next) == NULL) {
16052                 head->last = NULL;
16053         } else {
16054                 head->first->prev = NULL;
16055         }
16056 
16057         /* not on a queue any more */
16058         link->prev = link->next = NULL;
16059         link->head = NULL;
16060 }
16061 
16062 /*
16063  * ql_chg_endian
16064  *      Change endianess of byte array.
16065  *
16066  * Input:
16067  *      buf = array pointer.
16068  *      size = size of array in bytes.
16069  *
16070  * Context:
16071  *      Interrupt or Kernel context, no mailbox commands allowed.
16072  */
16073 void
16074 ql_chg_endian(uint8_t buf[], size_t size)
16075 {
16076         uint8_t byte;
16077         size_t  cnt1;
16078         size_t  cnt;
16079 
 
16128                 *ans += num * mul;
16129         }
16130 
16131         return (cnt);
16132 }
16133 
16134 /*
16135  * ql_delay
16136  *      Calls delay routine if threads are not suspended, otherwise, busy waits
16137  *      Minimum = 1 tick = 10ms
16138  *
16139  * Input:
16140  *      dly = delay time in microseconds.
16141  *
16142  * Context:
16143  *      Kernel or Interrupt context, no mailbox commands allowed.
16144  */
16145 void
16146 ql_delay(ql_adapter_state_t *ha, clock_t usecs)
16147 {
16148         if (QL_DAEMON_SUSPENDED(ha) || ddi_in_panic()) {
16149                 drv_usecwait(usecs);
16150         } else {
16151                 delay(drv_usectohz(usecs));
16152         }
16153 }
16154 
16155 /*
16156  * ql_stall_drv
16157  *      Stalls one or all driver instances, waits for 30 seconds.
16158  *
16159  * Input:
16160  *      ha:             adapter state pointer or NULL for all.
16161  *      options:        BIT_0 --> leave driver stalled on exit if
16162  *                                failed.
16163  *
16164  * Returns:
16165  *      ql local function return status code.
16166  *
16167  * Context:
16168  *      Kernel context.
16169  */
16170 int
16171 ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
16172 {
16173         ql_link_t               *link;
16174         ql_adapter_state_t      *ha2;
16175         uint32_t                timer;
16176 
16177         QL_PRINT_3(CE_CONT, "started\n");
16178 
16179         /* Wait for 30 seconds for daemons unstall. */
16180         timer = 3000;
16181         link = ha == NULL ? ql_hba.first : &ha->hba;
16182         while (link != NULL && timer) {
16183                 ha2 = link->base_address;
16184 
16185                 ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
16186 
16187                 if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
16188                     (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
16189                     (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
16190                     ql_wait_outstanding(ha2) == MAX_OUTSTANDING_COMMANDS)) {
16191                         link = ha == NULL ? link->next : NULL;
16192                         continue;
16193                 }
16194 
16195                 ql_delay(ha2, 10000);
16196                 timer--;
16197                 link = ha == NULL ? ql_hba.first : &ha->hba;
16198         }
16199 
16200         if (ha2 != NULL && timer == 0) {
16201                 EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
16202                     ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
16203                     "unstalled"));
16204                 if (options & BIT_0) {
16205                         ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
16206                 }
16207                 return (QL_FUNCTION_TIMEOUT);
16208         }
16209 
16210         QL_PRINT_3(CE_CONT, "done\n");
16211 
16212         return (QL_SUCCESS);
16213 }
16214 
16215 /*
16216  * ql_restart_driver
16217  *      Restarts one or all driver instances.
16218  *
16219  * Input:
16220  *      ha:     adapter state pointer or NULL for all.
16221  *
16222  * Context:
16223  *      Kernel context.
16224  */
16225 void
16226 ql_restart_driver(ql_adapter_state_t *ha)
16227 {
16228         ql_link_t               *link;
16229         ql_adapter_state_t      *ha2;
16230         uint32_t                timer;
16231 
16232         QL_PRINT_3(CE_CONT, "started\n");
16233 
16234         /* Tell all daemons to unstall. */
16235         link = ha == NULL ? ql_hba.first : &ha->hba;
16236         while (link != NULL) {
16237                 ha2 = link->base_address;
16238 
16239                 ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
16240 
16241                 link = ha == NULL ? link->next : NULL;
16242         }
16243 
16244         /* Wait for 30 seconds for all daemons unstall. */
16245         timer = 3000;
16246         link = ha == NULL ? ql_hba.first : &ha->hba;
16247         while (link != NULL && timer) {
16248                 ha2 = link->base_address;
16249 
16250                 if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
16251                     (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
16252                     (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
16253                         QL_PRINT_2(CE_CONT, "(%d,%d): restarted\n",
16254                             ha2->instance, ha2->vp_index);
16255                         ql_restart_queues(ha2);
16256                         link = ha == NULL ? link->next : NULL;
16257                         continue;
16258                 }
16259 
16260                 QL_PRINT_2(CE_CONT, "(%d,%d): failed, tdf=%xh\n",
16261                     ha2->instance, ha2->vp_index, ha2->task_daemon_flags);
16262 
16263                 ql_delay(ha2, 10000);
16264                 timer--;
16265                 link = ha == NULL ? ql_hba.first : &ha->hba;
16266         }
16267 
16268         QL_PRINT_3(CE_CONT, "done\n");
16269 }
16270 
16271 /*
16272  * ql_setup_interrupts
16273  *      Sets up interrupts based on the HBA's and platform's
16274  *      capabilities (e.g., legacy / MSI / FIXED).
16275  *
16276  * Input:
16277  *      ha = adapter state pointer.
16278  *
16279  * Returns:
16280  *      DDI_SUCCESS or DDI_FAILURE.
16281  *
16282  * Context:
16283  *      Kernel context.
16284  */
16285 static int
16286 ql_setup_interrupts(ql_adapter_state_t *ha)
16287 {
16288         int32_t         rval = DDI_FAILURE;
16289         int32_t         i;
16290         int32_t         itypes = 0;
16291 
16292         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16293 
16294         /*
16295          * The Solaris Advanced Interrupt Functions (aif) are only
16296          * supported on s10U1 or greater.
16297          */
16298         if (ql_os_release_level < 10 || ql_disable_aif != 0) {
16299                 EL(ha, "interrupt framework is not supported or is "
16300                     "disabled, using legacy\n");
16301                 return (ql_legacy_intr(ha));
16302         } else if (ql_os_release_level == 10) {
16303                 /*
16304                  * See if the advanced interrupt functions (aif) are
16305                  * in the kernel
16306                  */
16307                 void    *fptr = (void *)&ddi_intr_get_supported_types;
16308 
16309                 if (fptr == NULL) {
16310                         EL(ha, "aif is not supported, using legacy "
16311                             "interrupts (rev)\n");
16312                         return (ql_legacy_intr(ha));
 
16319                 EL(ha, "get supported types failed, rval=%xh, "
16320                     "assuming FIXED\n", i);
16321                 itypes = DDI_INTR_TYPE_FIXED;
16322         }
16323 
16324         EL(ha, "supported types are: %xh\n", itypes);
16325 
16326         if ((itypes & DDI_INTR_TYPE_MSIX) &&
16327             (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
16328                 EL(ha, "successful MSI-X setup\n");
16329         } else if ((itypes & DDI_INTR_TYPE_MSI) &&
16330             (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
16331                 EL(ha, "successful MSI setup\n");
16332         } else {
16333                 rval = ql_setup_fixed(ha);
16334         }
16335 
16336         if (rval != DDI_SUCCESS) {
16337                 EL(ha, "failed, aif, rval=%xh\n", rval);
16338         } else {
16339                 /*EMPTY*/
16340                 QL_PRINT_3(CE_CONT, "(%d): done\n");
16341         }
16342 
16343         return (rval);
16344 }
16345 
16346 /*
16347  * ql_setup_msi
16348  *      Set up aif MSI interrupts
16349  *
16350  * Input:
16351  *      ha = adapter state pointer.
16352  *
16353  * Returns:
16354  *      DDI_SUCCESS or DDI_FAILURE.
16355  *
16356  * Context:
16357  *      Kernel context.
16358  */
16359 static int
16360 ql_setup_msi(ql_adapter_state_t *ha)
16361 {
16362         int32_t         count = 0;
16363         int32_t         avail = 0;
16364         int32_t         actual = 0;
16365         int32_t         msitype = DDI_INTR_TYPE_MSI;
16366         int32_t         ret;
16367         ql_ifunc_t      itrfun[10] = {0};
16368 
16369         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16370 
16371         if (ql_disable_msi != 0) {
16372                 EL(ha, "MSI is disabled by user\n");
16373                 return (DDI_FAILURE);
16374         }
16375 
16376         /* MSI support is only suported on 24xx HBA's. */
16377         if (!(CFG_IST(ha, CFG_CTRL_24258081))) {
16378                 EL(ha, "HBA does not support MSI\n");
16379                 return (DDI_FAILURE);
16380         }
16381 
16382         /* Get number of MSI interrupts the system supports */
16383         if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16384             DDI_SUCCESS) || count == 0) {
16385                 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16386                 return (DDI_FAILURE);
16387         }
16388 
16389         /* Get number of available MSI interrupts */
16390         if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16391             DDI_SUCCESS) || avail == 0) {
16392                 EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16393                 return (DDI_FAILURE);
16394         }
16395 
16396         /* MSI requires only 1.  */
16397         count = 1;
16398         itrfun[0].ifunc = &ql_isr_aif;
16399 
16400         /* Allocate space for interrupt handles */
16401         ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16402         ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16403 
16404         ha->iflags |= IFLG_INTR_MSI;
16405 
16406         /* Allocate the interrupts */
16407         if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
16408             &actual, 0)) != DDI_SUCCESS || actual < count) {
16409                 EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16410                     "actual=%xh\n", ret, count, actual);
16411                 ql_release_intr(ha);
16412                 return (DDI_FAILURE);
16413         }
16414 
16415         ha->intr_cnt = actual;
16416 
16417         /* Get interrupt priority */
16418         if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16419             DDI_SUCCESS) {
16420                 EL(ha, "failed, get_pri ret=%xh\n", ret);
16421                 ql_release_intr(ha);
16422                 return (ret);
16423         }
16424 
16425         /* Add the interrupt handler */
16426         if ((ret = ddi_intr_add_handler(ha->htable[0], itrfun[0].ifunc,
16427             (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
16428                 EL(ha, "failed, intr_add ret=%xh\n", ret);
16429                 ql_release_intr(ha);
16430                 return (ret);
16431         }
16432 
16433         /* Setup mutexes */
16434         if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16435                 EL(ha, "failed, mutex init ret=%xh\n", ret);
16436                 ql_release_intr(ha);
16437                 return (ret);
16438         }
16439 
16440         /* Get the capabilities */
16441         (void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16442 
16443         /* Enable interrupts */
16444         if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16445                 if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16446                     DDI_SUCCESS) {
16447                         EL(ha, "failed, block enable, ret=%xh\n", ret);
16448                         ql_destroy_mutex(ha);
16449                         ql_release_intr(ha);
16450                         return (ret);
16451                 }
16452         } else {
16453                 if ((ret = ddi_intr_enable(ha->htable[0])) != DDI_SUCCESS) {
16454                         EL(ha, "failed, intr enable, ret=%xh\n", ret);
16455                         ql_destroy_mutex(ha);
16456                         ql_release_intr(ha);
16457                         return (ret);
16458                 }
16459         }
16460 
16461         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16462 
16463         return (DDI_SUCCESS);
16464 }
16465 
16466 /*
16467  * ql_setup_msix
16468  *      Set up aif MSI-X interrupts
16469  *
16470  * Input:
16471  *      ha = adapter state pointer.
16472  *
16473  * Returns:
16474  *      DDI_SUCCESS or DDI_FAILURE.
16475  *
16476  * Context:
16477  *      Kernel context.
16478  */
16479 static int
16480 ql_setup_msix(ql_adapter_state_t *ha)
16481 {
16482         uint16_t        hwvect;
16483         int32_t         count = 0;
16484         int32_t         avail = 0;
16485         int32_t         actual = 0;
16486         int32_t         msitype = DDI_INTR_TYPE_MSIX;
16487         int32_t         ret;
16488         uint32_t        i;
16489         ql_ifunc_t      itrfun[QL_MSIX_MAXAIF] = {0};
16490 
16491         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16492 
16493         if (ql_disable_msix != 0) {
16494                 EL(ha, "MSI-X is disabled by user\n");
16495                 return (DDI_FAILURE);
16496         }
16497 
16498         /*
16499          * MSI-X support is only available on 24xx HBA's that have
16500          * rev A2 parts (revid = 3) or greater.
16501          */
16502         if (!((ha->device_id == 0x2532) || (ha->device_id == 0x2432) ||
16503             (ha->device_id == 0x8432) || (ha->device_id == 0x8001) ||
16504             (ha->device_id == 0x8021))) {
16505                 EL(ha, "HBA does not support MSI-X\n");
16506                 return (DDI_FAILURE);
16507         }
16508 
16509         if (CFG_IST(ha, CFG_CTRL_2422) && (ha->rev_id < 3)) {
16510                 EL(ha, "HBA does not support MSI-X (revid)\n");
16511                 return (DDI_FAILURE);
16512         }
16513 
16514         /* Per HP, these HP branded HBA's are not supported with MSI-X */
16515         if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
16516             ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
16517                 EL(ha, "HBA does not support MSI-X (subdevid)\n");
16518                 return (DDI_FAILURE);
16519         }
16520 
16521         /* Get the number of 24xx/25xx MSI-X h/w vectors */
16522         hwvect = (uint16_t)(((CFG_IST(ha, CFG_CTRL_2422) ?
16523             ql_pci_config_get16(ha, 0x7e) :
16524             ql_pci_config_get16(ha, 0xa2)) & 0x3ff) + 1);
16525 
16526         EL(ha, "pcie config space hwvect = %d\n", hwvect);
16527 
16528         if (hwvect < QL_MSIX_MAXAIF) {
16529                 EL(ha, "failed, min h/w vectors req'd: %d, avail: %d\n",
16530                     QL_MSIX_MAXAIF, hwvect);
16531                 return (DDI_FAILURE);
16532         }
16533 
16534         /* Get number of MSI-X interrupts the platform h/w supports */
16535         if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16536             DDI_SUCCESS) || count == 0) {
16537                 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16538                 return (DDI_FAILURE);
16539         }
16540 
16541         /* Get number of available system interrupts */
16542         if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16543             DDI_SUCCESS) || avail == 0) {
16544                 EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16545                 return (DDI_FAILURE);
16546         }
16547 
16548         /* Fill out the intr table */
16549         count = QL_MSIX_MAXAIF;
16550         itrfun[QL_MSIX_AIF].ifunc = &ql_isr_aif;
16551         itrfun[QL_MSIX_RSPQ].ifunc = &ql_isr_aif;
16552 
16553         /* Allocate space for interrupt handles */
16554         ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
16555         if ((ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP)) == NULL) {
16556                 ha->hsize = 0;
16557                 EL(ha, "failed, unable to allocate htable space\n");
16558                 return (DDI_FAILURE);
16559         }
16560 
16561         ha->iflags |= IFLG_INTR_MSIX;
16562 
16563         /* Allocate the interrupts */
16564         if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
16565             DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
16566             actual < QL_MSIX_MAXAIF) {
16567                 EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16568                     "actual=%xh\n", ret, count, actual);
16569                 ql_release_intr(ha);
16570                 return (DDI_FAILURE);
16571         }
16572 
16573         ha->intr_cnt = actual;
16574 
16575         /* Get interrupt priority */
16576         if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16577             DDI_SUCCESS) {
16578                 EL(ha, "failed, get_pri ret=%xh\n", ret);
16579                 ql_release_intr(ha);
16580                 return (ret);
16581         }
16582 
16583         /* Add the interrupt handlers */
16584         for (i = 0; i < actual; i++) {
16585                 if ((ret = ddi_intr_add_handler(ha->htable[i], itrfun[i].ifunc,
16586                     (void *)ha, (void *)((ulong_t)i))) != DDI_SUCCESS) {
16587                         EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
16588                             actual, ret);
16589                         ql_release_intr(ha);
16590                         return (ret);
16591                 }
16592         }
16593 
16594         /*
16595          * duplicate the rest of the intr's
16596          * ddi_intr_dup_handler() isn't working on x86 just yet...
16597          */
16598 #ifdef __sparc
16599         for (i = actual; i < hwvect; i++) {
16600                 if ((ret = ddi_intr_dup_handler(ha->htable[0], (int)i,
16601                     &ha->htable[i])) != DDI_SUCCESS) {
16602                         EL(ha, "failed, intr_dup#=%xh, act=%xh, ret=%xh\n",
16603                             i, actual, ret);
16604                         ql_release_intr(ha);
16605                         return (ret);
16606                 }
16607         }
16608 #endif
16609 
16610         /* Setup mutexes */
16611         if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16612                 EL(ha, "failed, mutex init ret=%xh\n", ret);
16613                 ql_release_intr(ha);
16614                 return (ret);
16615         }
16616 
16617         /* Get the capabilities */
16618         (void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16619 
16620         /* Enable interrupts */
16621         if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16622                 if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16623                     DDI_SUCCESS) {
16624                         EL(ha, "failed, block enable, ret=%xh\n", ret);
16625                         ql_destroy_mutex(ha);
16626                         ql_release_intr(ha);
16627                         return (ret);
16628                 }
16629         } else {
16630                 for (i = 0; i < ha->intr_cnt; i++) {
16631                         if ((ret = ddi_intr_enable(ha->htable[i])) !=
16632                             DDI_SUCCESS) {
16633                                 EL(ha, "failed, intr enable, ret=%xh\n", ret);
16634                                 ql_destroy_mutex(ha);
16635                                 ql_release_intr(ha);
16636                                 return (ret);
16637                         }
16638                 }
16639         }
16640 
16641         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16642 
16643         return (DDI_SUCCESS);
16644 }
16645 
16646 /*
16647  * ql_setup_fixed
16648  *      Sets up aif FIXED interrupts
16649  *
16650  * Input:
16651  *      ha = adapter state pointer.
16652  *
16653  * Returns:
16654  *      DDI_SUCCESS or DDI_FAILURE.
16655  *
16656  * Context:
16657  *      Kernel context.
16658  */
16659 static int
16660 ql_setup_fixed(ql_adapter_state_t *ha)
16661 {
16662         int32_t         count = 0;
16663         int32_t         actual = 0;
16664         int32_t         ret;
16665         uint32_t        i;
16666 
16667         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16668 
16669         /* Get number of fixed interrupts the system supports */
16670         if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
16671             &count)) != DDI_SUCCESS) || count == 0) {
16672                 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16673                 return (DDI_FAILURE);
16674         }
16675 
16676         ha->iflags |= IFLG_INTR_FIXED;
16677 
16678         /* Allocate space for interrupt handles */
16679         ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16680         ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16681 
16682         /* Allocate the interrupts */
16683         if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
16684             0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
16685             actual < count) {
16686                 EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
16687                     "actual=%xh\n", ret, count, actual);
16688                 ql_release_intr(ha);
16689                 return (DDI_FAILURE);
16690         }
16691 
16692         ha->intr_cnt = actual;
16693 
16694         /* Get interrupt priority */
16695         if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16696             DDI_SUCCESS) {
16697                 EL(ha, "failed, get_pri ret=%xh\n", ret);
16698                 ql_release_intr(ha);
16699                 return (ret);
16700         }
16701 
16702         /* Add the interrupt handlers */
16703         for (i = 0; i < ha->intr_cnt; i++) {
16704                 if ((ret = ddi_intr_add_handler(ha->htable[i], &ql_isr_aif,
16705                     (void *)ha, (void *)((ulong_t)(i)))) != DDI_SUCCESS) {
16706                         EL(ha, "failed, intr_add ret=%xh\n", ret);
16707                         ql_release_intr(ha);
16708                         return (ret);
16709                 }
16710         }
16711 
16712         /* Setup mutexes */
16713         if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16714                 EL(ha, "failed, mutex init ret=%xh\n", ret);
16715                 ql_release_intr(ha);
16716                 return (ret);
16717         }
16718 
16719         /* Enable interrupts */
16720         for (i = 0; i < ha->intr_cnt; i++) {
16721                 if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
16722                         EL(ha, "failed, intr enable, ret=%xh\n", ret);
16723                         ql_destroy_mutex(ha);
16724                         ql_release_intr(ha);
16725                         return (ret);
16726                 }
16727         }
16728 
16729         EL(ha, "using FIXED interupts\n");
16730 
16731         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16732 
16733         return (DDI_SUCCESS);
16734 }
16735 
16736 /*
16737  * ql_disable_intr
16738  *      Disables interrupts
16739  *
16740  * Input:
16741  *      ha = adapter state pointer.
16742  *
16743  * Returns:
16744  *
16745  * Context:
16746  *      Kernel context.
16747  */
16748 static void
16749 ql_disable_intr(ql_adapter_state_t *ha)
16750 {
16751         uint32_t        i, rval;
16752 
16753         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16754 
16755         if (!(ha->iflags & IFLG_INTR_AIF)) {
16756 
16757                 /* Disable legacy interrupts */
16758                 (void) ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
16759 
16760         } else if ((ha->intr_cap & DDI_INTR_FLAG_BLOCK) &&
16761             (ha->iflags & (IFLG_INTR_MSI | IFLG_INTR_MSIX))) {
16762 
16763                 /* Remove AIF block interrupts (MSI) */
16764                 if ((rval = ddi_intr_block_disable(ha->htable, ha->intr_cnt))
16765                     != DDI_SUCCESS) {
16766                         EL(ha, "failed intr block disable, rval=%x\n", rval);
16767                 }
16768 
16769         } else {
16770 
16771                 /* Remove AIF non-block interrupts (fixed).  */
16772                 for (i = 0; i < ha->intr_cnt; i++) {
16773                         if ((rval = ddi_intr_disable(ha->htable[i])) !=
16774                             DDI_SUCCESS) {
16775                                 EL(ha, "failed intr disable, intr#=%xh, "
16776                                     "rval=%xh\n", i, rval);
16777                         }
16778                 }
16779         }
16780 
16781         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16782 }
16783 
16784 /*
16785  * ql_release_intr
16786  *      Releases aif legacy interrupt resources
16787  *
16788  * Input:
16789  *      ha = adapter state pointer.
16790  *
16791  * Returns:
16792  *
16793  * Context:
16794  *      Kernel context.
16795  */
16796 static void
16797 ql_release_intr(ql_adapter_state_t *ha)
16798 {
16799         int32_t         i;
16800 
16801         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16802 
16803         if (!(ha->iflags & IFLG_INTR_AIF)) {
16804                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16805                 return;
16806         }
16807 
16808         ha->iflags &= ~(IFLG_INTR_AIF);
16809         if (ha->htable != NULL && ha->hsize > 0) {
16810                 i = (int32_t)ha->hsize / (int32_t)sizeof (ddi_intr_handle_t);
16811                 while (i-- > 0) {
16812                         if (ha->htable[i] == 0) {
16813                                 EL(ha, "htable[%x]=0h\n", i);
16814                                 continue;
16815                         }
16816 
16817                         (void) ddi_intr_disable(ha->htable[i]);
16818 
16819                         if (i < ha->intr_cnt) {
16820                                 (void) ddi_intr_remove_handler(ha->htable[i]);
16821                         }
16822 
16823                         (void) ddi_intr_free(ha->htable[i]);
16824                 }
16825 
16826                 kmem_free(ha->htable, ha->hsize);
16827                 ha->htable = NULL;
16828         }
16829 
16830         ha->hsize = 0;
16831         ha->intr_cnt = 0;
16832         ha->intr_pri = 0;
16833         ha->intr_cap = 0;
16834 
16835         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16836 }
16837 
16838 /*
16839  * ql_legacy_intr
16840  *      Sets up legacy interrupts.
16841  *
16842  *      NB: Only to be used if AIF (Advanced Interupt Framework)
16843  *          if NOT in the kernel.
16844  *
16845  * Input:
16846  *      ha = adapter state pointer.
16847  *
16848  * Returns:
16849  *      DDI_SUCCESS or DDI_FAILURE.
16850  *
16851  * Context:
16852  *      Kernel context.
16853  */
16854 static int
16855 ql_legacy_intr(ql_adapter_state_t *ha)
16856 {
16857         int     rval = DDI_SUCCESS;
16858 
16859         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16860 
16861         /* Setup mutexes */
16862         if (ql_init_mutex(ha) != DDI_SUCCESS) {
16863                 EL(ha, "failed, mutex init\n");
16864                 return (DDI_FAILURE);
16865         }
16866 
16867         /* Setup standard/legacy interrupt handler */
16868         if (ddi_add_intr(ha->dip, (uint_t)0, &ha->iblock_cookie,
16869             (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
16870                 cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
16871                     QL_NAME, ha->instance);
16872                 ql_destroy_mutex(ha);
16873                 rval = DDI_FAILURE;
16874         }
16875 
16876         if (rval == DDI_SUCCESS) {
16877                 ha->iflags |= IFLG_INTR_LEGACY;
16878                 EL(ha, "using legacy interrupts\n");
16879         }
16880 
16881         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16882 
16883         return (rval);
16884 }
16885 
16886 /*
16887  * ql_init_mutex
16888  *      Initializes mutex's
16889  *
16890  * Input:
16891  *      ha = adapter state pointer.
16892  *
16893  * Returns:
16894  *      DDI_SUCCESS or DDI_FAILURE.
16895  *
16896  * Context:
16897  *      Kernel context.
16898  */
16899 static int
16900 ql_init_mutex(ql_adapter_state_t *ha)
16901 {
16902         int     ret;
16903         void    *intr;
16904 
16905         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16906 
16907         if (ha->iflags & IFLG_INTR_AIF) {
16908                 intr = (void *)(uintptr_t)ha->intr_pri;
16909         } else {
16910                 /* Get iblock cookies to initialize mutexes */
16911                 if ((ret = ddi_get_iblock_cookie(ha->dip, 0,
16912                     &ha->iblock_cookie)) != DDI_SUCCESS) {
16913                         EL(ha, "failed, get_iblock: %xh\n", ret);
16914                         return (DDI_FAILURE);
16915                 }
16916                 intr = (void *)ha->iblock_cookie;
16917         }
16918 
16919         /* mutexes to protect the adapter state structure. */
16920         mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, intr);
16921 
16922         /* mutex to protect the ISP response ring. */
16923         mutex_init(&ha->intr_mutex, NULL, MUTEX_DRIVER, intr);
16924 
16925         /* mutex to protect the mailbox registers. */
16926         mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, intr);
16927 
16928         /* power management protection */
16929         mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, intr);
16930 
16931         /* Mailbox wait and interrupt conditional variable. */
16932         cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
16933         cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
16934 
16935         /* mutex to protect the ISP request ring. */
16936         mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, intr);
16937 
16938         /* Unsolicited buffer conditional variable. */
16939         cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
16940 
16941         mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, intr);
16942         mutex_init(&ha->cache_mutex, NULL, MUTEX_DRIVER, intr);
16943 
16944         /* Suspended conditional variable. */
16945         cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
16946 
16947         /* mutex to protect task daemon context. */
16948         mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, intr);
16949 
16950         /* Task_daemon thread conditional variable. */
16951         cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
16952 
16953         /* mutex to protect diag port manage interface */
16954         mutex_init(&ha->portmutex, NULL, MUTEX_DRIVER, intr);
16955 
16956         /* mutex to protect per instance f/w dump flags and buffer */
16957         mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, intr);
16958 
16959         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16960 
16961         return (DDI_SUCCESS);
16962 }
16963 
16964 /*
16965  * ql_destroy_mutex
16966  *      Destroys mutex's
16967  *
16968  * Input:
16969  *      ha = adapter state pointer.
16970  *
16971  * Returns:
16972  *
16973  * Context:
16974  *      Kernel context.
16975  */
16976 static void
16977 ql_destroy_mutex(ql_adapter_state_t *ha)
16978 {
16979         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16980 
16981         mutex_destroy(&ha->dump_mutex);
16982         mutex_destroy(&ha->portmutex);
16983         cv_destroy(&ha->cv_task_daemon);
16984         mutex_destroy(&ha->task_daemon_mutex);
16985         cv_destroy(&ha->cv_dr_suspended);
16986         mutex_destroy(&ha->cache_mutex);
16987         mutex_destroy(&ha->ub_mutex);
16988         cv_destroy(&ha->cv_ub);
16989         mutex_destroy(&ha->req_ring_mutex);
16990         cv_destroy(&ha->cv_mbx_intr);
16991         cv_destroy(&ha->cv_mbx_wait);
16992         mutex_destroy(&ha->pm_mutex);
16993         mutex_destroy(&ha->mbx_mutex);
16994         mutex_destroy(&ha->intr_mutex);
16995         mutex_destroy(&ha->mutex);
16996 
16997         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16998 }
16999 
17000 /*
17001  * ql_fwmodule_resolve
17002  *      Loads and resolves external firmware module and symbols
17003  *
17004  * Input:
17005  *      ha:             adapter state pointer.
17006  *
17007  * Returns:
17008  *      ql local function return status code:
17009  *              QL_SUCCESS - external f/w module module and symbols resolved
17010  *              QL_FW_NOT_SUPPORTED - Driver does not support ISP type
17011  *              QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
17012  *              QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
17013  * Context:
17014  *      Kernel context.
17015  *
17016  * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time.  We
17017  * could switch to a tighter scope around acutal download (and add an extra
17018  * ddi_modopen for module opens that occur before root is mounted).
17019  *
17020  */
17021 uint32_t
17022 ql_fwmodule_resolve(ql_adapter_state_t *ha)
17023 {
17024         int8_t                  module[128];
17025         int8_t                  fw_version[128];
17026         uint32_t                rval = QL_SUCCESS;
17027         caddr_t                 code, code02;
17028         uint8_t                 *p_ucfw;
17029         uint16_t                *p_usaddr, *p_uslen;
17030         uint32_t                *p_uiaddr, *p_uilen, *p_uifw;
17031         uint32_t                *p_uiaddr02, *p_uilen02;
17032         struct fw_table         *fwt;
17033         extern struct fw_table  fw_table[];
17034 
17035         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17036 
17037         if (ha->fw_module != NULL) {
17038                 EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
17039                     ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
17040                     ha->fw_subminor_version);
17041                 return (rval);
17042         }
17043 
17044         /* make sure the fw_class is in the fw_table of supported classes */
17045         for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
17046                 if (fwt->fw_class == ha->fw_class)
17047                         break;                  /* match */
17048         }
17049         if (fwt->fw_version == NULL) {
17050                 cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
17051                     "in driver's fw_table", QL_NAME, ha->instance,
17052                     ha->fw_class);
17053                 return (QL_FW_NOT_SUPPORTED);
17054         }
17055 
 
17088                         rval = QL_FWSYM_NOT_FOUND;
17089                         EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
17090                 } else if ((p_ucfw = ddi_modsym(ha->fw_module,
17091                     "firmware_version", NULL)) == NULL) {
17092                         rval = QL_FWSYM_NOT_FOUND;
17093                         EL(ha, "failed, f/w module %d fwver symbol\n", module);
17094                 }
17095 
17096                 if (rval == QL_SUCCESS) {
17097                         ha->risc_fw[0].code = code;
17098                         ha->risc_fw[0].addr = *p_usaddr;
17099                         ha->risc_fw[0].length = *p_uslen;
17100 
17101                         (void) snprintf(fw_version, sizeof (fw_version),
17102                             "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
17103                 }
17104                 break;
17105 
17106         case 0x2400:
17107         case 0x2500:
17108         case 0x8100:
17109 
17110                 if ((code = ddi_modsym(ha->fw_module, "risc_code01",
17111                     NULL)) == NULL) {
17112                         rval = QL_FWSYM_NOT_FOUND;
17113                         EL(ha, "failed, f/w module %d rc01 symbol\n", module);
17114                 } else if ((p_uiaddr = ddi_modsym(ha->fw_module,
17115                     "risc_code_addr01", NULL)) == NULL) {
17116                         rval = QL_FWSYM_NOT_FOUND;
17117                         EL(ha, "failed, f/w module %d rca01 symbol\n", module);
17118                 } else if ((p_uilen = ddi_modsym(ha->fw_module,
17119                     "risc_code_length01", NULL)) == NULL) {
17120                         rval = QL_FWSYM_NOT_FOUND;
17121                         EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
17122                 } else if ((p_uifw = ddi_modsym(ha->fw_module,
17123                     "firmware_version", NULL)) == NULL) {
17124                         rval = QL_FWSYM_NOT_FOUND;
17125                         EL(ha, "failed, f/w module %d fwver symbol\n", module);
17126                 }
17127 
17128                 if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
17129                     NULL)) == NULL) {
17130                         rval = QL_FWSYM_NOT_FOUND;
17131                         EL(ha, "failed, f/w module %d rc02 symbol\n", module);
17132                 } else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
17133                     "risc_code_addr02", NULL)) == NULL) {
17134                         rval = QL_FWSYM_NOT_FOUND;
17135                         EL(ha, "failed, f/w module %d rca02 symbol\n", module);
17136                 } else if ((p_uilen02 = ddi_modsym(ha->fw_module,
17137                     "risc_code_length02", NULL)) == NULL) {
17138                         rval = QL_FWSYM_NOT_FOUND;
17139                         EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
17140                 }
17141 
17142                 if (rval == QL_SUCCESS) {
17143                         ha->risc_fw[0].code = code;
17144                         ha->risc_fw[0].addr = *p_uiaddr;
17145                         ha->risc_fw[0].length = *p_uilen;
17146                         ha->risc_fw[1].code = code02;
17147                         ha->risc_fw[1].addr = *p_uiaddr02;
17148                         ha->risc_fw[1].length = *p_uilen02;
17149 
17150                         (void) snprintf(fw_version, sizeof (fw_version),
17151                             "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
17152                 }
17153                 break;
17154 
17155         default:
17156                 EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
17157                 rval = QL_FW_NOT_SUPPORTED;
17158         }
17159 
17160         if (rval != QL_SUCCESS) {
17161                 cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
17162                     "module %s (%x)", QL_NAME, ha->instance, module, rval);
 
17165                         ha->fw_module = NULL;
17166                 }
17167         } else {
17168                 /*
17169                  * check for firmware version mismatch between module and
17170                  * compiled in fw_table version.
17171                  */
17172 
17173                 if (strcmp(fwt->fw_version, fw_version) != 0) {
17174 
17175                         /*
17176                          * If f/w / driver version mismatches then
17177                          * return a successful status -- however warn
17178                          * the user that this is NOT recommended.
17179                          */
17180 
17181                         cmn_err(CE_WARN, "%s(%d): driver / f/w version "
17182                             "mismatch for %x: driver-%s module-%s", QL_NAME,
17183                             ha->instance, ha->fw_class, fwt->fw_version,
17184                             fw_version);
17185 
17186                         ha->cfg_flags |= CFG_FW_MISMATCH;
17187                 } else {
17188                         ha->cfg_flags &= ~CFG_FW_MISMATCH;
17189                 }
17190         }
17191 
17192         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17193 
17194         return (rval);
17195 }
17196 
17197 /*
17198  * ql_port_state
17199  *      Set the state on all adapter ports.
17200  *
17201  * Input:
17202  *      ha:     parent adapter state pointer.
17203  *      state:  port state.
17204  *      flags:  task daemon flags to set.
17205  *
17206  * Context:
17207  *      Interrupt or Kernel context, no mailbox commands allowed.
17208  */
17209 void
17210 ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
17211 {
17212         ql_adapter_state_t      *vha;
17213 
17214         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17215 
17216         TASK_DAEMON_LOCK(ha);
17217         for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
17218                 if (FC_PORT_STATE_MASK(vha->state) != state) {
17219                         vha->state = state != FC_STATE_OFFLINE ?
17220                             (FC_PORT_SPEED_MASK(vha->state) | state) : state;
17221                         vha->task_daemon_flags |= flags;
17222                 }
17223         }
17224         ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
17225         TASK_DAEMON_UNLOCK(ha);
17226 
17227         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17228 }
17229 
17230 /*
17231  * ql_el_trace_desc_ctor - Construct an extended logging trace descriptor.
17232  *
17233  * Input:       Pointer to the adapter state structure.
17234  * Returns:     Success or Failure.
17235  * Context:     Kernel context.
17236  */
17237 int
17238 ql_el_trace_desc_ctor(ql_adapter_state_t *ha)
17239 {
17240         int     rval = DDI_SUCCESS;
17241 
17242         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17243 
17244         ha->el_trace_desc =
17245             (el_trace_desc_t *)kmem_zalloc(sizeof (el_trace_desc_t), KM_SLEEP);
17246 
17247         if (ha->el_trace_desc == NULL) {
17248                 cmn_err(CE_WARN, "%s(%d): can't construct trace descriptor",
17249                     QL_NAME, ha->instance);
17250                 rval = DDI_FAILURE;
17251         } else {
17252                 ha->el_trace_desc->next           = 0;
17253                 ha->el_trace_desc->trace_buffer =
17254                     (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
17255 
17256                 if (ha->el_trace_desc->trace_buffer == NULL) {
17257                         cmn_err(CE_WARN, "%s(%d): can't get trace buffer",
17258                             QL_NAME, ha->instance);
17259                         kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17260                         rval = DDI_FAILURE;
17261                 } else {
17262                         ha->el_trace_desc->trace_buffer_size =
17263                             EL_TRACE_BUF_SIZE;
17264                         mutex_init(&ha->el_trace_desc->mutex, NULL,
17265                             MUTEX_DRIVER, NULL);
17266                 }
17267         }
17268 
17269         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17270 
17271         return (rval);
17272 }
17273 
17274 /*
17275  * ql_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
17276  *
17277  * Input:       Pointer to the adapter state structure.
17278  * Returns:     Success or Failure.
17279  * Context:     Kernel context.
17280  */
17281 int
17282 ql_el_trace_desc_dtor(ql_adapter_state_t *ha)
17283 {
17284         int     rval = DDI_SUCCESS;
17285 
17286         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17287 
17288         if (ha->el_trace_desc == NULL) {
17289                 cmn_err(CE_WARN, "%s(%d): can't destroy el trace descriptor",
17290                     QL_NAME, ha->instance);
17291                 rval = DDI_FAILURE;
17292         } else {
17293                 if (ha->el_trace_desc->trace_buffer != NULL) {
17294                         kmem_free(ha->el_trace_desc->trace_buffer,
17295                             ha->el_trace_desc->trace_buffer_size);
17296                 }
17297                 mutex_destroy(&ha->el_trace_desc->mutex);
17298                 kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17299         }
17300 
17301         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17302 
17303         return (rval);
17304 }
17305 
17306 /*
17307  * els_cmd_text - Return a pointer to a string describing the command
17308  *
17309  * Input:       els_cmd = the els command opcode.
17310  * Returns:     pointer to a string.
17311  * Context:     Kernel context.
17312  */
17313 char *
17314 els_cmd_text(int els_cmd)
17315 {
17316         cmd_table_t *entry = &els_cmd_tbl[0];
17317 
17318         return (cmd_text(entry, els_cmd));
17319 }
17320 
17321 /*
17322  * mbx_cmd_text - Return a pointer to a string describing the command
17323  *
 
17336 /*
17337  * cmd_text     Return a pointer to a string describing the command
17338  *
17339  * Input:       entry = the command table
17340  *              cmd = the command.
17341  * Returns:     pointer to a string.
17342  * Context:     Kernel context.
17343  */
17344 char *
17345 cmd_text(cmd_table_t *entry, int cmd)
17346 {
17347         for (; entry->cmd != 0; entry++) {
17348                 if (entry->cmd == cmd) {
17349                         break;
17350                 }
17351         }
17352         return (entry->string);
17353 }
17354 
17355 /*
17356  * ql_els_24xx_mbox_cmd_iocb - els request indication.
17357  *
17358  * Input:       ha = adapter state pointer.
17359  *              srb = scsi request block pointer.
17360  *              arg = els passthru entry iocb pointer.
17361  * Returns:
17362  * Context:     Kernel context.
17363  */
17364 void
17365 ql_els_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *srb, void *arg)
17366 {
17367         els_descriptor_t        els_desc;
17368 
17369         /* Extract the ELS information */
17370         ql_fca_isp_els_request(ha, (fc_packet_t *)srb->pkt, &els_desc);
17371 
17372         /* Construct the passthru entry */
17373         ql_isp_els_request_ctor(&els_desc, (els_passthru_entry_t *)arg);
17374 
17375         /* Ensure correct endianness */
17376         ql_isp_els_handle_cmd_endian(ha, srb);
17377 }
17378 
17379 /*
17380  * ql_fca_isp_els_request - Extract into an els descriptor the info required
17381  *                          to build an els_passthru iocb from an fc packet.
17382  *
17383  * Input:       ha = adapter state pointer.
17384  *              pkt = fc packet pointer
17385  *              els_desc = els descriptor pointer
17386  * Returns:
17387  * Context:     Kernel context.
17388  */
17389 static void
17390 ql_fca_isp_els_request(ql_adapter_state_t *ha, fc_packet_t *pkt,
17391     els_descriptor_t *els_desc)
17392 {
17393         ls_code_t       els;
17394 
17395         ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17396             (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17397 
17398         els_desc->els = els.ls_code;
17399 
17400         els_desc->els_handle = ha->hba_buf.acc_handle;
17401         els_desc->d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
17402         els_desc->s_id.b24 = pkt->pkt_cmd_fhdr.s_id;
17403         /* if n_port_handle is not < 0x7d use 0 */
17404         if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17405                 els_desc->n_port_handle = ha->n_port->n_port_handle;
17406         } else {
17407                 els_desc->n_port_handle = 0;
17408         }
17409         els_desc->control_flags = 0;
17410         els_desc->cmd_byte_count = pkt->pkt_cmdlen;
17411         /*
17412          * Transmit DSD. This field defines the Fibre Channel Frame payload
17413          * (without the frame header) in system memory.
17414          */
17415         els_desc->tx_dsd.addr[0] = LSD(pkt->pkt_cmd_cookie->dmac_laddress);
17416         els_desc->tx_dsd.addr[1] = MSD(pkt->pkt_cmd_cookie->dmac_laddress);
17417         els_desc->tx_dsd.length = (uint32_t)pkt->pkt_cmd_cookie->dmac_size;
17418 
17419         els_desc->rsp_byte_count = pkt->pkt_rsplen;
17420         /*
 
17457             els_desc->els);
17458         ddi_put8(els_desc->els_handle, &els_entry->d_id_7_0,
17459             els_desc->d_id.b.al_pa);
17460         ddi_put8(els_desc->els_handle, &els_entry->d_id_15_8,
17461             els_desc->d_id.b.area);
17462         ddi_put8(els_desc->els_handle, &els_entry->d_id_23_16,
17463             els_desc->d_id.b.domain);
17464         ddi_put8(els_desc->els_handle, &els_entry->s_id_7_0,
17465             els_desc->s_id.b.al_pa);
17466         ddi_put8(els_desc->els_handle, &els_entry->s_id_15_8,
17467             els_desc->s_id.b.area);
17468         ddi_put8(els_desc->els_handle, &els_entry->s_id_23_16,
17469             els_desc->s_id.b.domain);
17470         ddi_put16(els_desc->els_handle, &els_entry->control_flags,
17471             els_desc->control_flags);
17472         ddi_put32(els_desc->els_handle, &els_entry->rcv_payld_data_bcnt,
17473             els_desc->rsp_byte_count);
17474         ddi_put32(els_desc->els_handle, &els_entry->xmt_payld_data_bcnt,
17475             els_desc->cmd_byte_count);
17476         /* Load transmit data segments and count. */
17477         ptr32 = (uint32_t *)&els_entry->xmt_dseg_0_address;
17478         ddi_put16(els_desc->els_handle, &els_entry->xmt_dseg_count, 1);
17479         ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[0]);
17480         ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[1]);
17481         ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.length);
17482         ddi_put16(els_desc->els_handle, &els_entry->rcv_dseg_count, 1);
17483         ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[0]);
17484         ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[1]);
17485         ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.length);
17486 }
17487 
17488 /*
17489  * ql_isp_els_handle_cmd_endian - els requests must be in big endian
17490  *                                in host memory.
17491  *
17492  * Input:       ha = adapter state pointer.
17493  *              srb = scsi request block
17494  * Returns:
17495  * Context:     Kernel context.
17496  */
17497 void
 
 
17604         }
17605 }
17606 
17607 /*
17608  * ql_n_port_plogi
17609  *      In N port 2 N port topology where an N Port has logged in with the
17610  *      firmware because it has the N_Port login initiative, we send up
17611  *      a plogi by proxy which stimulates the login procedure to continue.
17612  *
17613  * Input:
17614  *      ha = adapter state pointer.
17615  * Returns:
17616  *
17617  * Context:
17618  *      Kernel context.
17619  */
17620 static int
17621 ql_n_port_plogi(ql_adapter_state_t *ha)
17622 {
17623         int             rval;
17624         ql_tgt_t        *tq;
17625         ql_head_t done_q = { NULL, NULL };
17626 
17627         rval = QL_SUCCESS;
17628 
17629         if (ha->topology & QL_N_PORT) {
17630                 /* if we're doing this the n_port_handle must be good */
17631                 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17632                         tq = ql_loop_id_to_queue(ha,
17633                             ha->n_port->n_port_handle);
17634                         if (tq != NULL) {
17635                                 (void) ql_send_plogi(ha, tq, &done_q);
17636                         } else {
17637                                 EL(ha, "n_port_handle = %x, tq = %x\n",
17638                                     ha->n_port->n_port_handle, tq);
17639                         }
17640                 } else {
17641                         EL(ha, "n_port_handle = %x, tq = %x\n",
17642                             ha->n_port->n_port_handle, tq);
17643                 }
17644                 if (done_q.first != NULL) {
17645                         ql_done(done_q.first);
17646                 }
17647         }
17648         return (rval);
17649 }
17650 
17651 /*
17652  * Compare two WWNs. The NAA is omitted for comparison.
17653  *
17654  * Note particularly that the indentation used in this
17655  * function  isn't according to Sun recommendations. It
17656  * is indented to make reading a bit easy.
17657  *
17658  * Return Values:
17659  *   if first == second return  0
17660  *   if first > second  return  1
17661  *   if first < second  return -1
17662  */
17663 int
17664 ql_wwn_cmp(ql_adapter_state_t *ha, la_wwn_t *first, la_wwn_t *second)
17665 {
17666         la_wwn_t t1, t2;
17667         int rval;
17668 
17669         EL(ha, "WWPN=%08x%08x\n",
17670             BE_32(first->i_wwn[0]), BE_32(first->i_wwn[1]));
17671         EL(ha, "WWPN=%08x%08x\n",
17672             BE_32(second->i_wwn[0]), BE_32(second->i_wwn[1]));
17673         /*
17674          * Fibre Channel protocol is big endian, so compare
17675          * as big endian values
17676          */
17677         t1.i_wwn[0] = BE_32(first->i_wwn[0]);
17678         t1.i_wwn[1] = BE_32(first->i_wwn[1]);
17679 
17680         t2.i_wwn[0] = BE_32(second->i_wwn[0]);
17681         t2.i_wwn[1] = BE_32(second->i_wwn[1]);
17682 
17683         if (t1.i_wwn[0] == t2.i_wwn[0]) {
17684                 if (t1.i_wwn[1] == t2.i_wwn[1]) {
17685                         rval = 0;
17686                 } else if (t1.i_wwn[1] > t2.i_wwn[1]) {
17687                         rval = 1;
17688                 } else {
17689                         rval = -1;
17690                 }
17691         } else {
17692                 if (t1.i_wwn[0] > t2.i_wwn[0]) {
17693                         rval = 1;
17694                 } else {
17695                         rval = -1;
17696                 }
17697         }
17698         return (rval);
17699 }
17700 
17701 /*
17702  * ql_wait_for_td_stop
17703  *      Wait for task daemon to stop running.  Internal command timeout
17704  *      is approximately 30 seconds, so it may help in some corner
17705  *      cases to wait that long
17706  *
17707  * Input:
17708  *      ha = adapter state pointer.
17709  *
17710  * Returns:
17711  *      DDI_SUCCESS or DDI_FAILURE.
17712  *
17713  * Context:
17714  *      Kernel context.
17715  */
17716 
17717 static int
17718 ql_wait_for_td_stop(ql_adapter_state_t *ha)
17719 {
17720         int     rval = DDI_FAILURE;
17721         UINT16  wait_cnt;
17722 
17723         for (wait_cnt = 0; wait_cnt < 3000; wait_cnt++) {
17724                 /* The task daemon clears the stop flag on exit. */
17725                 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
17726                         if (ha->cprinfo.cc_events & CALLB_CPR_START ||
17727                             ddi_in_panic()) {
17728                                 drv_usecwait(10000);
17729                         } else {
17730                                 delay(drv_usectohz(10000));
17731                         }
17732                 } else {
17733                         rval = DDI_SUCCESS;
17734                         break;
17735                 }
17736         }
17737         return (rval);
17738 }
17739 
17740 /*
17741  * ql_nvram_cache_desc_ctor - Construct an nvram cache descriptor.
17742  *
17743  * Input:       Pointer to the adapter state structure.
17744  * Returns:     Success or Failure.
17745  * Context:     Kernel context.
17746  */
17747 int
17748 ql_nvram_cache_desc_ctor(ql_adapter_state_t *ha)
17749 {
17750         int     rval = DDI_SUCCESS;
17751 
17752         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17753 
17754         ha->nvram_cache =
17755             (nvram_cache_desc_t *)kmem_zalloc(sizeof (nvram_cache_desc_t),
17756             KM_SLEEP);
17757 
17758         if (ha->nvram_cache == NULL) {
17759                 cmn_err(CE_WARN, "%s(%d): can't construct nvram cache"
17760                     " descriptor", QL_NAME, ha->instance);
17761                 rval = DDI_FAILURE;
17762         } else {
17763                 if (CFG_IST(ha, CFG_CTRL_24258081)) {
17764                         ha->nvram_cache->size = sizeof (nvram_24xx_t);
17765                 } else {
17766                         ha->nvram_cache->size = sizeof (nvram_t);
17767                 }
17768                 ha->nvram_cache->cache =
17769                     (void *)kmem_zalloc(ha->nvram_cache->size, KM_SLEEP);
17770                 if (ha->nvram_cache->cache == NULL) {
17771                         cmn_err(CE_WARN, "%s(%d): can't get nvram cache buffer",
17772                             QL_NAME, ha->instance);
17773                         kmem_free(ha->nvram_cache,
17774                             sizeof (nvram_cache_desc_t));
17775                         ha->nvram_cache = 0;
17776                         rval = DDI_FAILURE;
17777                 } else {
17778                         mutex_init(&ha->nvram_cache->mutex, NULL,
17779                             MUTEX_DRIVER, NULL);
17780                         ha->nvram_cache->valid = 0;
17781                 }
17782         }
17783 
17784         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17785 
17786         return (rval);
17787 }
17788 
17789 /*
17790  * ql_nvram_cache_desc_dtor - Destroy an nvram cache descriptor.
17791  *
17792  * Input:       Pointer to the adapter state structure.
17793  * Returns:     Success or Failure.
17794  * Context:     Kernel context.
17795  */
17796 int
17797 ql_nvram_cache_desc_dtor(ql_adapter_state_t *ha)
17798 {
17799         int     rval = DDI_SUCCESS;
17800 
17801         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17802 
17803         if (ha->nvram_cache == NULL) {
17804                 cmn_err(CE_WARN, "%s(%d): can't destroy nvram descriptor",
17805                     QL_NAME, ha->instance);
17806                 rval = DDI_FAILURE;
17807         } else {
17808                 if (ha->nvram_cache->cache != NULL) {
17809                         kmem_free(ha->nvram_cache->cache,
17810                             ha->nvram_cache->size);
17811                 }
17812                 mutex_destroy(&ha->nvram_cache->mutex);
17813                 kmem_free(ha->nvram_cache, sizeof (nvram_cache_desc_t));
17814         }
17815 
17816         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17817 
17818         return (rval);
17819 }
17820 
17821 /*
17822  * ql_process_idc_event - Handle an Inter-Driver Communication async event.
17823  *
17824  * Input:       Pointer to the adapter state structure.
17825  * Returns:     void
17826  * Context:     Kernel context.
17827  */
17828 static void
17829 ql_process_idc_event(ql_adapter_state_t *ha)
17830 {
17831         int     rval;
17832 
17833         switch (ha->idc_mb[0]) {
17834         case MBA_IDC_NOTIFICATION:
17835                 /*
17836                  * The informational opcode (idc_mb[2]) can be a
17837                  * defined value or the mailbox command being executed
17838                  * on another function which stimulated this IDC message.
17839                  */
17840                 ADAPTER_STATE_LOCK(ha);
17841                 switch (ha->idc_mb[2]) {
17842                 case IDC_OPC_DRV_START:
17843                         if (ha->idc_flash_acc != 0) {
17844                                 ha->idc_flash_acc--;
17845                                 if (ha->idc_flash_acc == 0) {
17846                                         ha->idc_flash_acc_timer = 0;
17847                                         GLOBAL_HW_UNLOCK();
17848                                 }
17849                         }
17850                         if (ha->idc_restart_cnt != 0) {
17851                                 ha->idc_restart_cnt--;
17852                                 if (ha->idc_restart_cnt == 0) {
17853                                         ha->idc_restart_timer = 0;
17854                                         ADAPTER_STATE_UNLOCK(ha);
17855                                         TASK_DAEMON_LOCK(ha);
17856                                         ha->task_daemon_flags &= ~DRIVER_STALL;
17857                                         TASK_DAEMON_UNLOCK(ha);
17858                                         ql_restart_queues(ha);
17859                                 } else {
17860                                         ADAPTER_STATE_UNLOCK(ha);
17861                                 }
17862                         } else {
17863                                 ADAPTER_STATE_UNLOCK(ha);
17864                         }
17865                         break;
17866                 case IDC_OPC_FLASH_ACC:
17867                         ha->idc_flash_acc_timer = 30;
17868                         if (ha->idc_flash_acc == 0) {
17869                                 GLOBAL_HW_LOCK();
17870                         }
17871                         ha->idc_flash_acc++;
17872                         ADAPTER_STATE_UNLOCK(ha);
17873                         break;
17874                 case IDC_OPC_RESTART_MPI:
17875                         ha->idc_restart_timer = 30;
17876                         ha->idc_restart_cnt++;
17877                         ADAPTER_STATE_UNLOCK(ha);
17878                         TASK_DAEMON_LOCK(ha);
17879                         ha->task_daemon_flags |= DRIVER_STALL;
17880                         TASK_DAEMON_UNLOCK(ha);
17881                         break;
17882                 case IDC_OPC_PORT_RESET_MBC:
17883                 case IDC_OPC_SET_PORT_CONFIG_MBC:
17884                         ha->idc_restart_timer = 30;
17885                         ha->idc_restart_cnt++;
17886                         ADAPTER_STATE_UNLOCK(ha);
17887                         TASK_DAEMON_LOCK(ha);
17888                         ha->task_daemon_flags |= DRIVER_STALL;
17889                         TASK_DAEMON_UNLOCK(ha);
17890                         (void) ql_wait_outstanding(ha);
17891                         break;
17892                 default:
17893                         ADAPTER_STATE_UNLOCK(ha);
17894                         EL(ha, "Unknown IDC opcode=%xh %xh\n", ha->idc_mb[0],
17895                             ha->idc_mb[2]);
17896                         break;
17897                 }
17898                 /*
17899                  * If there is a timeout value associated with this IDC
17900                  * notification then there is an implied requirement
17901                  * that we return an ACK.
17902                  */
17903                 if (ha->idc_mb[1] & IDC_TIMEOUT_MASK) {
17904                         rval = ql_idc_ack(ha);
17905                         if (rval != QL_SUCCESS) {
17906                                 EL(ha, "idc_ack status=%xh %xh\n", rval,
17907                                     ha->idc_mb[2]);
17908                         }
17909                 }
17910                 break;
17911         case MBA_IDC_COMPLETE:
17912                 /*
17913                  * We don't ACK completions, only these require action.
17914                  */
17915                 switch (ha->idc_mb[2]) {
17916                 case IDC_OPC_PORT_RESET_MBC:
17917                 case IDC_OPC_SET_PORT_CONFIG_MBC:
17918                         ADAPTER_STATE_LOCK(ha);
17919                         if (ha->idc_restart_cnt != 0) {
17920                                 ha->idc_restart_cnt--;
17921                                 if (ha->idc_restart_cnt == 0) {
17922                                         ha->idc_restart_timer = 0;
17923                                         ADAPTER_STATE_UNLOCK(ha);
17924                                         TASK_DAEMON_LOCK(ha);
17925                                         ha->task_daemon_flags &= ~DRIVER_STALL;
17926                                         TASK_DAEMON_UNLOCK(ha);
17927                                         ql_restart_queues(ha);
17928                                 } else {
17929                                         ADAPTER_STATE_UNLOCK(ha);
17930                                 }
17931                         } else {
17932                                 ADAPTER_STATE_UNLOCK(ha);
17933                         }
17934                         break;
17935                 default:
17936                         break; /* Don't care... */
17937                 }
17938                 break;
17939         case MBA_IDC_TIME_EXTENDED:
17940                 QL_PRINT_10(CE_CONT, "(%d): MBA_IDC_TIME_EXTENDED="
17941                     "%xh\n", ha->instance, ha->idc_mb[2]);
17942                 break;
17943         default:
17944                 EL(ha, "Inconsistent IDC event =%xh %xh\n", ha->idc_mb[0],
17945                     ha->idc_mb[2]);
17946                 ADAPTER_STATE_UNLOCK(ha);
17947                 break;
17948         }
17949 }
 | 
 
 
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /* Copyright 2015 QLogic Corporation */
  23 
  24 /*
  25  * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
  26  */
  27 /*
  28  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
  29  * Copyright (c) 2016 by Delphix. All rights reserved.
  30  */
  31 
  32 #pragma ident   "Copyright 2015 QLogic Corporation; ql_api.c"
  33 
  34 /*
  35  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
  36  *
  37  * ***********************************************************************
  38  * *                                                                    **
  39  * *                            NOTICE                                  **
  40  * *            COPYRIGHT (C) 1996-2015 QLOGIC CORPORATION              **
  41  * *                    ALL RIGHTS RESERVED                             **
  42  * *                                                                    **
  43  * ***********************************************************************
  44  *
  45  */
  46 
  47 #include <ql_apps.h>
  48 #include <ql_api.h>
  49 #include <ql_debug.h>
  50 #include <ql_init.h>
  51 #include <ql_iocb.h>
  52 #include <ql_ioctl.h>
  53 #include <ql_isr.h>
  54 #include <ql_mbx.h>
  55 #include <ql_nx.h>
  56 #include <ql_xioctl.h>
  57 #include <ql_fm.h>
  58 
  59 /*
  60  * Solaris external defines.
  61  */
  62 extern pri_t minclsyspri;
  63 extern pri_t maxclsyspri;
  64 
  65 /*
  66  * dev_ops functions prototypes
  67  */
  68 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
  69 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
  70 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
  71 static int ql_power(dev_info_t *, int, int);
  72 static int ql_quiesce(dev_info_t *);
  73 
  74 /*
  75  * FCA functions prototypes exported by means of the transport table
  76  */
  77 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
  78     fc_fca_bind_info_t *);
  79 static void ql_unbind_port(opaque_t);
  80 static int ql_init_pkt(opaque_t, fc_packet_t *, int);
  81 static int ql_un_init_pkt(opaque_t, fc_packet_t *);
  82 static int ql_els_send(opaque_t, fc_packet_t *);
  83 static int ql_get_cap(opaque_t, char *, void *);
  84 static int ql_set_cap(opaque_t, char *, void *);
  85 static int ql_getmap(opaque_t, fc_lilpmap_t *);
  86 static int ql_transport(opaque_t, fc_packet_t *);
  87 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
  88 static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
  89 static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
  90 static int ql_abort(opaque_t, fc_packet_t *, int);
  91 static int ql_reset(opaque_t, uint32_t);
  92 static int ql_port_manage(opaque_t, fc_fca_pm_t *);
  93 static opaque_t ql_get_device(opaque_t, fc_portid_t);
  94 
  95 /*
  96  * FCA Driver Support Function Prototypes.
  97  */
  98 static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
  99 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
 100 static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *);
 101 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
 102 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
 103 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
 104 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
 105 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
 106 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
 107 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
 108 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
 109 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
 110 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
 111 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
 112 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
 113 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
 114 static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
 115 static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
 116 static int ql_login_port(ql_adapter_state_t *, port_id_t);
 117 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
 118 static int ql_logout_port(ql_adapter_state_t *, port_id_t);
 119 static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint64_t);
 120 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
 121 static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
 122     ql_srb_t *);
 123 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
 124 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
 125 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
 126     ql_srb_t *);
 127 static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
 128 static void ql_task_daemon(void *);
 129 static void ql_task_thread(ql_adapter_state_t *);
 130 static void ql_idle_check(ql_adapter_state_t *);
 131 static void ql_unsol_callback(ql_srb_t *);
 132 static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
 133 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
 134 static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
 135 static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
 136 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
 137 static int ql_handle_rscn_update(ql_adapter_state_t *);
 138 static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
 139     fc_unsol_buf_t *);
 140 static void ql_timer(void *);
 141 static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
 142 static void ql_watchdog(ql_adapter_state_t *);
 143 static void ql_wdg_tq_list(ql_adapter_state_t *, ql_tgt_t *);
 144 static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *);
 145 static uint16_t ql_wait_outstanding(ql_adapter_state_t *);
 146 static void ql_iidma(ql_adapter_state_t *);
 147 static void ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq);
 148 static void ql_loop_resync(ql_adapter_state_t *);
 149 static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
 150 static int ql_kstat_update(kstat_t *, int);
 151 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
 152 static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
 153 static size_t ql_25xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
 154 static size_t ql_81xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
 155 static size_t ql_8021_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
 156 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
 157 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
 158 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
 159 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
 160 static int ql_81xx_binary_fw_dump(ql_adapter_state_t *, ql_81xx_fw_dump_t *);
 161 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
 162     void *);
 163 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
 164     uint8_t);
 165 static int ql_save_config_regs(dev_info_t *);
 166 static int ql_restore_config_regs(dev_info_t *);
 167 static void ql_halt(ql_adapter_state_t *, int);
 168 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
 169 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
 170 static int ql_suspend_adapter(ql_adapter_state_t *);
 171 static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
 172 static int ql_setup_interrupts(ql_adapter_state_t *);
 173 static int ql_setup_msi(ql_adapter_state_t *);
 174 static int ql_setup_msix(ql_adapter_state_t *);
 175 static int ql_setup_fixed(ql_adapter_state_t *);
 176 static void ql_release_intr(ql_adapter_state_t *);
 177 static int ql_legacy_intr(ql_adapter_state_t *);
 178 static int ql_init_mutex(ql_adapter_state_t *);
 179 static void ql_destroy_mutex(ql_adapter_state_t *);
 180 static void ql_fca_isp_els_request(ql_adapter_state_t *, ql_request_q_t *,
 181     fc_packet_t *, els_descriptor_t *);
 182 static void ql_isp_els_request_ctor(els_descriptor_t *,
 183     els_passthru_entry_t *);
 184 static int ql_n_port_plogi(ql_adapter_state_t *);
 185 static int ql_create_queues(ql_adapter_state_t *);
 186 static int ql_create_rsp_queue(ql_adapter_state_t *, uint16_t);
 187 static void ql_delete_queues(ql_adapter_state_t *);
 188 static int ql_multi_queue_support(ql_adapter_state_t *);
 189 static int ql_map_mem_bar(ql_adapter_state_t *, ddi_acc_handle_t *, caddr_t *,
 190     uint32_t, uint32_t);
 191 static void ql_completion_thread(void *);
 192 static void ql_process_comp_queue(void *);
 193 static int ql_abort_io(ql_adapter_state_t *vha, ql_srb_t *);
 194 static void ql_idc(ql_adapter_state_t *);
 195 static int ql_83xx_binary_fw_dump(ql_adapter_state_t *, ql_83xx_fw_dump_t *);
 196 static size_t ql_83xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
 197 static caddr_t ql_str_ptr(ql_adapter_state_t *, caddr_t, uint32_t *);
 198 static int ql_27xx_binary_fw_dump(ql_adapter_state_t *);
 199 static size_t ql_27xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
 200 static uint32_t ql_2700_dmp_parse_template(ql_adapter_state_t *, ql_dt_hdr_t *,
 201     uint8_t *, uint32_t);
 202 static int ql_2700_dt_riob1(ql_adapter_state_t *, ql_dt_riob1_t *, uint8_t *,
 203     uint8_t *);
 204 static void ql_2700_dt_wiob1(ql_adapter_state_t *, ql_dt_wiob1_t *, uint8_t *,
 205     uint8_t *);
 206 static int ql_2700_dt_riob2(ql_adapter_state_t *, ql_dt_riob2_t *, uint8_t *,
 207     uint8_t *);
 208 static void ql_2700_dt_wiob2(ql_adapter_state_t *, ql_dt_wiob2_t *, uint8_t *,
 209     uint8_t *);
 210 static int ql_2700_dt_rpci(ql_adapter_state_t *, ql_dt_rpci_t *, uint8_t *,
 211     uint8_t *);
 212 static void ql_2700_dt_wpci(ql_adapter_state_t *, ql_dt_wpci_t *, uint8_t *,
 213     uint8_t *);
 214 static int ql_2700_dt_rram(ql_adapter_state_t *, ql_dt_rram_t *, uint8_t *,
 215     uint8_t *);
 216 static int ql_2700_dt_gque(ql_adapter_state_t *, ql_dt_gque_t *, uint8_t *,
 217     uint8_t *);
 218 static int ql_2700_dt_gfce(ql_adapter_state_t *, ql_dt_gfce_t *, uint8_t *,
 219     uint8_t *);
 220 static void ql_2700_dt_prisc(ql_adapter_state_t *, ql_dt_prisc_t *, uint8_t *,
 221     uint8_t *);
 222 static void ql_2700_dt_rrisc(ql_adapter_state_t *, ql_dt_rrisc_t *, uint8_t *,
 223     uint8_t *);
 224 static void ql_2700_dt_dint(ql_adapter_state_t *, ql_dt_dint_t *, uint8_t *,
 225     uint8_t *);
 226 static int ql_2700_dt_ghbd(ql_adapter_state_t *, ql_dt_ghbd_t *, uint8_t *,
 227     uint8_t *);
 228 static int ql_2700_dt_scra(ql_adapter_state_t *, ql_dt_scra_t *, uint8_t *,
 229     uint8_t *);
 230 static int ql_2700_dt_rrreg(ql_adapter_state_t *, ql_dt_rrreg_t *, uint8_t *,
 231     uint8_t *);
 232 static void ql_2700_dt_wrreg(ql_adapter_state_t *, ql_dt_wrreg_t *, uint8_t *,
 233     uint8_t *);
 234 static int ql_2700_dt_rrram(ql_adapter_state_t *, ql_dt_rrram_t *, uint8_t *,
 235     uint8_t *);
 236 static int ql_2700_dt_rpcic(ql_adapter_state_t *, ql_dt_rpcic_t *, uint8_t *,
 237     uint8_t *);
 238 static int ql_2700_dt_gques(ql_adapter_state_t *, ql_dt_gques_t *, uint8_t *,
 239     uint8_t *);
 240 static int ql_2700_dt_wdmp(ql_adapter_state_t *, ql_dt_wdmp_t *, uint8_t *,
 241     uint8_t *);
 242 static int ql_2700_dump_ram(ql_adapter_state_t *, uint16_t, uint32_t, uint32_t,
 243     uint8_t *);
 244 
 245 /*
 246  * Global data
 247  */
 248 static uint8_t  ql_enable_pm = 1;
 249 static int      ql_flash_sbus_fpga = 0;
 250 uint32_t        ql_os_release_level;
 251 uint32_t        ql_disable_aif = 0;
 252 uint32_t        ql_disable_intx = 0;
 253 uint32_t        ql_disable_msi = 0;
 254 uint32_t        ql_disable_msix = 0;
 255 uint32_t        ql_enable_ets = 0;
 256 uint16_t        ql_osc_wait_count = 1000;
 257 uint32_t        ql_task_cb_dly = 64;
 258 uint32_t        qlc_disable_load = 0;
 259 
 260 /* Timer routine variables. */
 261 static timeout_id_t     ql_timer_timeout_id = NULL;
 262 static clock_t          ql_timer_ticks;
 263 
 264 /* Soft state head pointer. */
 265 void *ql_state = NULL;
 266 
 267 /* Head adapter link. */
 268 ql_head_t ql_hba = {
 269         NULL,
 270         NULL
 271 };
 272 
 273 /* Global hba index */
 274 uint32_t ql_gfru_hba_index = 1;
 275 
 276 /*
 277  * Some IP defines and globals
 278  */
 
 493         0x420, 0x422, 0x424, 0x426, 0x428, 0x42a, 0x42c, 0x42e,
 494         0x430, 0x432, 0x434, 0x436, 0x438, 0x43a, 0x43c, 0x43e,
 495 
 496         0xff,   /* fpm_diag_config  - n/a */
 497         0xff,   /* pcr - n/a */
 498         0xff,   /* mctr - n/a */
 499         0xff,   /* fb_cmd - n/a */
 500         0x48,   /* hccr */
 501         0x4c,   /* gpiod */
 502         0x50,   /* gpioe */
 503         0xff,   /* host_to_host_sema - n/a */
 504         0x2c,   /* pri_req_in */
 505         0x30,   /* pri_req_out */
 506         0x3c,   /* atio_req_in */
 507         0x40,   /* atio_req_out */
 508         0x54,   /* io_base_addr */
 509         0x380,  /* nx_host_int */
 510         0x504   /* nx_risc_int */
 511 };
 512 
 513 /* 2700/8300 register offsets */
 514 static reg_off_t reg_off_2700_8300 = {
 515         0x00,   /* flash_address */
 516         0x04,   /* flash_data */
 517         0x08,   /* ctrl_status */
 518         0x0c,   /* ictrl */
 519         0x10,   /* istatus */
 520         0xff,   /* semaphore - n/a */
 521         0xff,   /* nvram - n/a */
 522         0xff,   /* req_in - n/a */
 523         0xff,   /* req_out - n/a */
 524         0xff,   /* resp_in - n/a */
 525         0xff,   /* resp_out - n/a */
 526         0x44,   /* risc2host */
 527         32,     /* Number of mailboxes */
 528 
 529         /* Mailbox in register offsets 0 - 31 */
 530         0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
 531         0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
 532         0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
 533         0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
 534 
 535         /* Mailbox out register offsets 0 - 31 */
 536         0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
 537         0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
 538         0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
 539         0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
 540 
 541         0xff,   /* fpm_diag_config - n/a */
 542         0xff,   /* pcr - n/a */
 543         0xff,   /* mctr - n/a */
 544         0xff,   /* fb_cmd - n/a */
 545         0x48,   /* hccr */
 546         0x4c,   /* gpiod */
 547         0x50,   /* gpioe */
 548         0x58,   /* host_to_host_sema - n/a */
 549         0xff,   /* pri_req_in - n/a */
 550         0xff,   /* pri_req_out - n/a */
 551         0xff,   /* atio_req_in - n/a */
 552         0xff,   /* atio_req_out - n/a */
 553         0x54,   /* io_base_addr */
 554         0xff,   /* nx_host_int - n/a */
 555         0xff    /* nx_risc_int - n/a */
 556 };
 557 
 558 /* mutex for protecting variables shared by all instances of the driver */
 559 kmutex_t ql_global_mutex;
 560 kmutex_t ql_global_hw_mutex;
 561 kmutex_t ql_global_el_mutex;
 562 kmutex_t ql_global_timer_mutex;
 563 
 564 /* DMA access attribute structure. */
 565 ddi_device_acc_attr_t ql_dev_acc_attr = {
 566         DDI_DEVICE_ATTR_V0,
 567         DDI_STRUCTURE_LE_ACC,
 568         DDI_STRICTORDER_ACC
 569 };
 570 
 571 /* I/O DMA attributes structures. */
 572 ddi_dma_attr_t ql_64bit_io_dma_attr = {
 573         DMA_ATTR_V0,                    /* dma_attr_version */
 574         QL_DMA_LOW_ADDRESS,             /* low DMA address range */
 575         QL_DMA_HIGH_64BIT_ADDRESS,      /* high DMA address range */
 576         QL_DMA_XFER_COUNTER,            /* DMA counter register */
 577         QL_DMA_ADDRESS_ALIGNMENT,       /* DMA address alignment */
 578         QL_DMA_BURSTSIZES,              /* DMA burstsizes */
 579         QL_DMA_MIN_XFER_SIZE,           /* min effective DMA size */
 580         QL_DMA_MAX_XFER_SIZE,           /* max DMA xfer size */
 581         QL_DMA_SEGMENT_BOUNDARY,        /* segment boundary */
 582         QL_DMA_SG_LIST_LENGTH,          /* s/g list length */
 583         QL_DMA_GRANULARITY,             /* granularity of device */
 584         QL_DMA_XFER_FLAGS               /* DMA transfer flags */
 585 };
 586 
 587 ddi_dma_attr_t ql_32bit_io_dma_attr = {
 588         DMA_ATTR_V0,                    /* dma_attr_version */
 589         QL_DMA_LOW_ADDRESS,             /* low DMA address range */
 590         QL_DMA_HIGH_32BIT_ADDRESS,      /* high DMA address range */
 591         QL_DMA_XFER_COUNTER,            /* DMA counter register */
 592         QL_DMA_ADDRESS_ALIGNMENT,       /* DMA address alignment */
 593         QL_DMA_BURSTSIZES,              /* DMA burstsizes */
 594         QL_DMA_MIN_XFER_SIZE,           /* min effective DMA size */
 595         QL_DMA_MAX_XFER_SIZE,           /* max DMA xfer size */
 596         QL_DMA_SEGMENT_BOUNDARY,        /* segment boundary */
 597         QL_DMA_SG_LIST_LENGTH,          /* s/g list length */
 598         QL_DMA_GRANULARITY,             /* granularity of device */
 599         QL_DMA_XFER_FLAGS               /* DMA transfer flags */
 600 };
 601 
 602 /* Static declarations of cb_ops entry point functions... */
 603 static struct cb_ops ql_cb_ops = {
 604         ql_open,                        /* b/c open */
 605         ql_close,                       /* b/c close */
 606         nodev,                          /* b strategy */
 607         nodev,                          /* b print */
 608         nodev,                          /* b dump */
 609         nodev,                          /* c read */
 610         nodev,                          /* c write */
 611         ql_ioctl,                       /* c ioctl */
 612         nodev,                          /* c devmap */
 613         nodev,                          /* c mmap */
 614         nodev,                          /* c segmap */
 615         nochpoll,                       /* c poll */
 616         nodev,                          /* cb_prop_op */
 617         NULL,                           /* streamtab  */
 618         D_MP | D_NEW | D_HOTPLUG,       /* Driver compatibility flag */
 619         CB_REV,                         /* cb_ops revision */
 620         nodev,                          /* c aread */
 621         nodev                           /* c awrite */
 
 625 static struct dev_ops ql_devops = {
 626         DEVO_REV,                       /* devo_rev */
 627         0,                              /* refcnt */
 628         ql_getinfo,                     /* getinfo */
 629         nulldev,                        /* identify */
 630         nulldev,                        /* probe */
 631         ql_attach,                      /* attach */
 632         ql_detach,                      /* detach */
 633         nodev,                          /* reset */
 634         &ql_cb_ops,                 /* char/block ops */
 635         NULL,                           /* bus operations */
 636         ql_power,                       /* power management */
 637         ql_quiesce                      /* quiesce device */
 638 };
 639 
 640 /* ELS command code to text converter */
 641 cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE();
 642 /* Mailbox command code to text converter */
 643 cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE();
 644 
 645 char ql_driver_version[] = QL_VERSION;
 646 
 647 uint32_t ql_log_entries = QL_LOG_ENTRIES;
 648 
 649 /*
 650  * Loadable Driver Interface Structures.
 651  * Declare and initialize the module configuration section...
 652  */
 653 static struct modldrv modldrv = {
 654         &mod_driverops,                             /* type of module: driver */
 655         "SunFC Qlogic FCA v" QL_VERSION,        /* name of module */
 656         &ql_devops                          /* driver dev_ops */
 657 };
 658 
 659 static struct modlinkage modlinkage = {
 660         MODREV_1,
 661         &modldrv,
 662         NULL
 663 };
 664 
 665 /* ************************************************************************ */
 666 /*                              Loadable Module Routines.                   */
 667 /* ************************************************************************ */
 668 
 669 /*
 670  * _init
 671  *      Initializes a loadable module. It is called before any other
 672  *      routine in a loadable module.
 673  *
 674  * Returns:
 675  *      0 = success
 676  *
 677  * Context:
 678  *      Kernel context.
 679  */
 680 int
 681 _init(void)
 682 {
 683         uint16_t        w16;
 684         int             rval = 0;
 685 
 686         if (qlc_disable_load) {
 687                 cmn_err(CE_WARN, "%s load disabled", QL_NAME);
 688                 return (EINVAL);
 689         }
 690 
 691         /* Get OS major release level. */
 692         for (w16 = 0; w16 < sizeof (utsname.release); w16++) {
 693                 if (utsname.release[w16] == '.') {
 694                         w16++;
 695                         break;
 696                 }
 697         }
 698         if (w16 < sizeof (utsname.release)) {
 699                 (void) ql_bstr_to_dec(&utsname.release[w16],
 700                     &ql_os_release_level, 0);
 701         } else {
 702                 ql_os_release_level = 0;
 703         }
 704         if (ql_os_release_level < 6) {
 705                 cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
 706                     QL_NAME, ql_os_release_level);
 707                 rval = EINVAL;
 708         }
 709         if (ql_os_release_level == 6) {
 710                 ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
 711                 ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
 712         }
 713 
 714         if (rval == 0) {
 715                 rval = ddi_soft_state_init(&ql_state,
 716                     sizeof (ql_adapter_state_t), 0);
 717         }
 718         if (rval == 0) {
 719                 /* allow the FC Transport to tweak the dev_ops */
 720                 fc_fca_init(&ql_devops);
 721 
 722                 mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
 723                 mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
 724                 mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
 725                 mutex_init(&ql_global_timer_mutex, NULL, MUTEX_DRIVER, NULL);
 726                 rval = mod_install(&modlinkage);
 727                 if (rval != 0) {
 728                         mutex_destroy(&ql_global_timer_mutex);
 729                         mutex_destroy(&ql_global_el_mutex);
 730                         mutex_destroy(&ql_global_hw_mutex);
 731                         mutex_destroy(&ql_global_mutex);
 732                         ddi_soft_state_fini(&ql_state);
 733                 }
 734         }
 735 
 736         if (rval != 0) {
 737                 cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
 738                     QL_NAME);
 739         }
 740 
 741         return (rval);
 742 }
 743 
 744 /*
 745  * _fini
 746  *      Prepares a module for unloading. It is called when the system
 747  *      wants to unload a module. If the module determines that it can
 748  *      be unloaded, then _fini() returns the value returned by
 749  *      mod_remove(). Upon successful return from _fini() no other
 750  *      routine in the module will be called before _init() is called.
 751  *
 752  * Returns:
 753  *      0 = success
 754  *
 755  * Context:
 756  *      Kernel context.
 757  */
 758 int
 759 _fini(void)
 760 {
 761         int     rval;
 762 
 763         rval = mod_remove(&modlinkage);
 764         if (rval == 0) {
 765                 mutex_destroy(&ql_global_timer_mutex);
 766                 mutex_destroy(&ql_global_el_mutex);
 767                 mutex_destroy(&ql_global_hw_mutex);
 768                 mutex_destroy(&ql_global_mutex);
 769                 ddi_soft_state_fini(&ql_state);
 770         }
 771 
 772         return (rval);
 773 }
 774 
 775 /*
 776  * _info
 777  *      Returns information about loadable module.
 778  *
 779  * Input:
 780  *      modinfo = pointer to module information structure.
 781  *
 782  * Returns:
 783  *      Value returned by mod_info().
 784  *
 785  * Context:
 786  *      Kernel context.
 787  */
 788 int
 
 808  *      arg = command specific argument.
 809  *      resultp = pointer to where request information is stored.
 810  *
 811  * Returns:
 812  *      DDI_SUCCESS or DDI_FAILURE.
 813  *
 814  * Context:
 815  *      Kernel context.
 816  */
 817 /* ARGSUSED */
 818 static int
 819 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
 820 {
 821         ql_adapter_state_t      *ha;
 822         int                     minor;
 823         int                     rval = DDI_FAILURE;
 824 
 825         minor = (int)(getminor((dev_t)arg));
 826         ha = ddi_get_soft_state(ql_state, minor);
 827         if (ha == NULL) {
 828                 QL_PRINT_2(ha, "failed, unknown minor=%d\n",
 829                     getminor((dev_t)arg));
 830                 *resultp = NULL;
 831                 return (rval);
 832         }
 833 
 834         QL_PRINT_3(ha, "started\n");
 835 
 836         switch (cmd) {
 837         case DDI_INFO_DEVT2DEVINFO:
 838                 *resultp = ha->dip;
 839                 rval = DDI_SUCCESS;
 840                 break;
 841         case DDI_INFO_DEVT2INSTANCE:
 842                 *resultp = (void *)(uintptr_t)(ha->instance);
 843                 rval = DDI_SUCCESS;
 844                 break;
 845         default:
 846                 EL(ha, "failed, unsupported cmd=%d\n", cmd);
 847                 rval = DDI_FAILURE;
 848                 break;
 849         }
 850 
 851         QL_PRINT_3(ha, "done\n");
 852 
 853         return (rval);
 854 }
 855 
 856 /*
 857  * ql_attach
 858  *      Configure and attach an instance of the driver
 859  *      for a port.
 860  *
 861  * Input:
 862  *      dip = pointer to device information structure.
 863  *      cmd = attach type.
 864  *
 865  * Returns:
 866  *      DDI_SUCCESS or DDI_FAILURE.
 867  *
 868  * Context:
 869  *      Kernel context.
 870  */
 871 static int
 872 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
 873 {
 874         off_t                   regsize;
 875         uint32_t                size;
 876         int                     rval, *ptr;
 877         uint_t                  progress = 0;
 878         char                    *buf, taskq_name[32];
 879         ushort_t                caps_ptr, cap;
 880         fc_fca_tran_t           *tran;
 881         ql_adapter_state_t      *ha = NULL;
 882         int                     instance = ddi_get_instance(dip);
 883 
 884         static char *pmcomps[] = {
 885                 NULL,
 886                 PM_LEVEL_D3_STR,                /* Device OFF */
 887                 PM_LEVEL_D0_STR,                /* Device ON */
 888         };
 889 
 890         QL_PRINT_3(NULL, "started, instance=%d, cmd=%xh\n",
 891             ddi_get_instance(dip), cmd);
 892 
 893         buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
 894 
 895         switch (cmd) {
 896         case DDI_ATTACH:
 897                 cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
 898                     QL_NAME, instance, QL_VERSION);
 899 
 900                 /* Correct OS version? */
 901                 if (ql_os_release_level != 11) {
 902                         cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
 903                             "11", QL_NAME, instance);
 904                         goto attach_failed;
 905                 }
 906 
 907                 /* Hardware is installed in a DMA-capable slot? */
 908                 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
 909                         cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
 910                             instance);
 911                         goto attach_failed;
 912                 }
 913 
 914                 /* Allocate our per-device-instance structure */
 915                 if (ddi_soft_state_zalloc(ql_state,
 916                     instance) != DDI_SUCCESS) {
 917                         cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
 918                             QL_NAME, instance);
 919                         goto attach_failed;
 920                 }
 921 
 922                 ha = ddi_get_soft_state(ql_state, instance);
 923                 if (ha == NULL) {
 924                         cmn_err(CE_WARN, "%s(%d): can't get soft state",
 925                             QL_NAME, instance);
 926                         goto attach_failed;
 927                 }
 928                 ha->dip = dip;
 929                 ha->instance = instance;
 930                 ha->hba.base_address = ha;
 931                 ha->pha = ha;
 932 
 933                 ha->bit32_io_dma_attr = ql_32bit_io_dma_attr;
 934                 ha->bit64_io_dma_attr = ql_64bit_io_dma_attr;
 935 
 936                 (void) ql_el_trace_alloc(ha);
 937 
 938                 progress |= QL_SOFT_STATE_ALLOCED;
 939 
 940                 /* Get extended logging and dump flags. */
 941                 ql_common_properties(ha);
 942 
 943                 qlc_fm_init(ha);
 944                 progress |= QL_FCA_INIT_FM;
 945 
 946                 ha->io_dma_attr = ha->bit32_io_dma_attr;
 947 
 948                 if (strcmp(ddi_driver_name(ddi_get_parent(dip)),
 949                     "sbus") == 0) {
 950                         EL(ha, "%s SBUS card detected\n", QL_NAME);
 951                         ha->cfg_flags |= CFG_SBUS_CARD;
 952                 }
 953 
 954                 ha->dev = kmem_zalloc(sizeof (*ha->dev) *
 955                     DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
 956 
 957                 ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
 958                     QL_UB_LIMIT, KM_SLEEP);
 959 
 960                 ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
 961                     KM_SLEEP);
 962 
 963                 (void) ddi_pathname(dip, buf);
 964                 ha->devpath = kmem_zalloc(strlen(buf) + 1, KM_SLEEP);
 965                 if (ha->devpath == NULL) {
 966                         EL(ha, "devpath mem alloc failed\n");
 967                 } else {
 968                         (void) strcpy(ha->devpath, buf);
 969                         EL(ha, "devpath is: %s\n", ha->devpath);
 970                 }
 971 
 972                 if (CFG_IST(ha, CFG_SBUS_CARD)) {
 973                         /*
 974                          * For cards where PCI is mapped to sbus e.g. Ivory.
 975                          *
 976                          * 0x00 : 0x000 - 0x0FF PCI Config Space for 2200
 977                          *      : 0x100 - 0x3FF PCI IO space for 2200
 978                          * 0x01 : 0x000 - 0x0FF PCI Config Space for fpga
 979                          *      : 0x100 - 0x3FF PCI IO Space for fpga
 980                          */
 981                         if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
 982                             0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle) !=
 983                             DDI_SUCCESS) {
 984                                 cmn_err(CE_WARN, "%s(%d): Unable to map device"
 
1004                          */
1005                         if (ddi_regs_map_setup(dip, 0,
1006                             (caddr_t *)&ha->sbus_config_base, 0, 0x100,
1007                             &ql_dev_acc_attr, &ha->sbus_config_handle) !=
1008                             DDI_SUCCESS) {
1009                                 cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
1010                                     "config registers", QL_NAME, instance);
1011                                 goto attach_failed;
1012                         }
1013                         progress |= QL_CONFIG_SPACE_SETUP;
1014                 } else {
1015                         /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
1016                         rval = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
1017                             DDI_PROP_DONTPASS, "reg", &ptr, &size);
1018                         if (rval != DDI_PROP_SUCCESS) {
1019                                 cmn_err(CE_WARN, "%s(%d): Unable to get PCI "
1020                                     "address registers", QL_NAME, instance);
1021                                 goto attach_failed;
1022                         } else {
1023                                 ha->pci_bus_addr = ptr[0];
1024                                 ha->pci_function_number = (uint8_t)
1025                                     (ha->pci_bus_addr >> 8 & 7);
1026                                 ddi_prop_free(ptr);
1027                         }
1028 
1029                         /*
1030                          * We should map config space before adding interrupt
1031                          * So that the chip type (2200 or 2300) can be
1032                          * determined before the interrupt routine gets a
1033                          * chance to execute.
1034                          */
1035                         if (pci_config_setup(ha->dip, &ha->pci_handle) !=
1036                             DDI_SUCCESS) {
1037                                 cmn_err(CE_WARN, "%s(%d): can't setup PCI "
1038                                     "config space", QL_NAME, instance);
1039                                 goto attach_failed;
1040                         }
1041                         progress |= QL_CONFIG_SPACE_SETUP;
1042 
1043                         /*
1044                          * Setup the ISP2200 registers address mapping to be
1045                          * accessed by this particular driver.
1046                          * 0x0   Configuration Space
1047                          * 0x1   I/O Space
1048                          * 0x2   32-bit Memory Space address
1049                          * 0x3   64-bit Memory Space address
1050                          */
1051                         size = ql_pci_config_get32(ha, PCI_CONF_BASE0) & BIT_0 ?
1052                             2 : 1;
1053 
1054                         if (qlc_fm_check_acc_handle(ha, ha->pci_handle)
1055                             != DDI_FM_OK) {
1056                                 qlc_fm_report_err_impact(ha,
1057                                     QL_FM_EREPORT_ACC_HANDLE_CHECK);
1058                                 goto attach_failed;
1059                         }
1060 
1061                         if (ddi_dev_regsize(dip, size, ®size) !=
1062                             DDI_SUCCESS ||
1063                             ddi_regs_map_setup(dip, size, &ha->iobase,
1064                             0, regsize, &ql_dev_acc_attr, &ha->dev_handle) !=
1065                             DDI_SUCCESS) {
1066                                 cmn_err(CE_WARN, "%s(%d): regs_map_setup(mem) "
1067                                     "failed", QL_NAME, instance);
1068                                 goto attach_failed;
1069                         }
1070                         progress |= QL_REGS_MAPPED;
1071 
1072                         if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
1073                             != DDI_FM_OK) {
1074                                 qlc_fm_report_err_impact(ha,
1075                                     QL_FM_EREPORT_ACC_HANDLE_CHECK);
1076                                 goto attach_failed;
1077                         }
1078 
1079                         /*
1080                          * We need I/O space mappings for 23xx HBAs for
1081                          * loading flash (FCode). The chip has a bug due to
1082                          * which loading flash fails through mem space
1083                          * mappings in PCI-X mode.
1084                          */
1085                         if (size == 1) {
1086                                 ha->iomap_iobase = ha->iobase;
1087                                 ha->iomap_dev_handle = ha->dev_handle;
1088                         } else {
1089                                 if (ddi_dev_regsize(dip, 1, ®size) !=
1090                                     DDI_SUCCESS ||
1091                                     ddi_regs_map_setup(dip, 1,
1092                                     &ha->iomap_iobase, 0, regsize,
1093                                     &ql_dev_acc_attr, &ha->iomap_dev_handle) !=
1094                                     DDI_SUCCESS) {
1095                                         cmn_err(CE_WARN, "%s(%d): regs_map_"
1096                                             "setup(I/O) failed", QL_NAME,
1097                                             instance);
1098                                         goto attach_failed;
1099                                 }
1100                                 progress |= QL_IOMAP_IOBASE_MAPPED;
1101 
1102                                 if (qlc_fm_check_acc_handle(ha,
1103                                     ha->iomap_dev_handle) != DDI_FM_OK) {
1104                                         qlc_fm_report_err_impact(ha,
1105                                             QL_FM_EREPORT_ACC_HANDLE_CHECK);
1106                                         goto attach_failed;
1107                                 }
1108                         }
1109                 }
1110 
1111                 ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
1112                     PCI_CONF_SUBSYSID);
1113                 ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
1114                     PCI_CONF_SUBVENID);
1115                 ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
1116                     PCI_CONF_VENID);
1117                 ha->device_id = (uint16_t)ql_pci_config_get16(ha,
1118                     PCI_CONF_DEVID);
1119                 ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
1120                     PCI_CONF_REVID);
1121 
1122                 EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
1123                     "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
1124                     ha->subven_id, ha->subsys_id);
1125 
1126                 switch (ha->device_id) {
1127                 case 0x2300:
1128                 case 0x2312:
1129                 case 0x2322:
1130                 case 0x6312:
1131                 case 0x6322:
1132                         if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1133                                 ha->function_number = 1;
1134                         }
1135                         if (ha->device_id == 0x2322 ||
1136                             ha->device_id == 0x6322) {
1137                                 ha->cfg_flags |= CFG_CTRL_63XX;
1138                                 ha->fw_class = 0x6322;
1139                                 ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
1140                         } else {
1141                                 ha->cfg_flags |= CFG_CTRL_23XX;
1142                                 ha->fw_class = 0x2300;
1143                                 ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
1144                         }
1145                         ha->reg_off = ®_off_2300;
1146                         ha->interrupt_count = 1;
1147                         ha->osc_max_cnt = 1024;
1148                         if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1149                                 goto attach_failed;
1150                         }
1151                         ha->fcp_cmd = ql_command_iocb;
1152                         ha->ip_cmd = ql_ip_iocb;
1153                         ha->ms_cmd = ql_ms_iocb;
1154                         if (CFG_IST(ha, CFG_SBUS_CARD)) {
1155                                 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1156                                 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1157                         } else {
1158                                 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1159                                 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1160                         }
1161                         break;
1162 
1163                 case 0x2200:
1164                         ha->cfg_flags |= CFG_CTRL_22XX;
1165                         ha->reg_off = ®_off_2200;
1166                         ha->interrupt_count = 1;
1167                         ha->osc_max_cnt = 1024;
1168                         ha->fw_class = 0x2200;
1169                         if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1170                                 goto attach_failed;
1171                         }
1172                         ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
1173                         ha->fcp_cmd = ql_command_iocb;
1174                         ha->ip_cmd = ql_ip_iocb;
1175                         ha->ms_cmd = ql_ms_iocb;
1176                         if (CFG_IST(ha, CFG_SBUS_CARD)) {
1177                                 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1178                                 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1179                         } else {
1180                                 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1181                                 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1182                         }
1183                         break;
1184 
1185                 case 0x2422:
1186                 case 0x2432:
1187                 case 0x5422:
1188                 case 0x5432:
1189                 case 0x8432:
1190                         if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1191                                 ha->function_number = 1;
1192                         }
1193                         ha->cfg_flags |= CFG_CTRL_24XX;
1194                         if (ha->device_id == 0x8432) {
1195                                 ha->cfg_flags |= CFG_CTRL_MENLO;
1196                         } else {
1197                                 ha->flags |= VP_ENABLED;
1198                                 ha->max_vports = MAX_24_VIRTUAL_PORTS;
1199                         }
1200 
1201                         ha->reg_off = ®_off_2400_2500;
1202                         ha->interrupt_count = 2;
1203                         ha->osc_max_cnt = 2048;
1204                         ha->fw_class = 0x2400;
1205                         if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1206                                 goto attach_failed;
1207                         }
1208                         ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
1209                         ha->fcp_cmd = ql_command_24xx_iocb;
1210                         ha->ip_cmd = ql_ip_24xx_iocb;
1211                         ha->ms_cmd = ql_ms_24xx_iocb;
1212                         ha->els_cmd = ql_els_24xx_iocb;
1213                         ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1214                         ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1215                         break;
1216 
1217                 case 0x2522:
1218                 case 0x2532:
1219                         if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1220                                 ha->function_number = 1;
1221                         }
1222                         ha->cfg_flags |= CFG_CTRL_25XX;
1223                         ha->flags |= VP_ENABLED;
1224                         ha->max_vports = MAX_25_VIRTUAL_PORTS;
1225                         ha->reg_off = ®_off_2400_2500;
1226                         ha->mbar_queue_offset = MBAR2_REG_OFFSET;
1227                         ha->interrupt_count = 2;
1228                         ha->osc_max_cnt = 2048;
1229                         ha->fw_class = 0x2500;
1230                         if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1231                                 goto attach_failed;
1232                         }
1233                         ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1234                         ha->fcp_cmd = ql_command_24xx_iocb;
1235                         ha->ms_cmd = ql_ms_24xx_iocb;
1236                         ha->els_cmd = ql_els_24xx_iocb;
1237                         ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1238                         ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1239                         if (ql_multi_queue_support(ha) == QL_SUCCESS) {
1240                                 ha->flags |= MULTI_QUEUE;
1241                         }
1242                         break;
1243 
1244                 case 0x2031:
1245                         /* Get queue pointer memory mapped registers */
1246                         if (ddi_dev_regsize(dip, 3, ®size) != DDI_SUCCESS ||
1247                             ddi_regs_map_setup(dip, 3, &ha->mbar,
1248                             0, regsize, &ql_dev_acc_attr,
1249                             &ha->mbar_dev_handle) != DDI_SUCCESS) {
1250                                 cmn_err(CE_WARN, "%s(%d): regs_map_setup"
1251                                     "(mbar) failed", QL_NAME, instance);
1252                                 goto attach_failed;
1253                         }
1254                         ha->mbar_size = (uint32_t)regsize;
1255 
1256                         if (ha->pci_function_number != 0 &&
1257                             ha->pci_function_number != 2) {
1258                                 ha->function_number = 1;
1259                         }
1260                         ha->cfg_flags |= CFG_CTRL_83XX;
1261                         ha->flags |= VP_ENABLED | MULTI_QUEUE;
1262                         ha->max_vports = MAX_83_VIRTUAL_PORTS;
1263                         ha->reg_off = ®_off_2700_8300;
1264                         ha->mbar_queue_offset = MBAR2_REG_OFFSET;
1265                         ha->interrupt_count = 2;
1266                         ha->osc_max_cnt = 2048;
1267                         ha->fw_class = 0x8301fc;
1268                         if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1269                                 goto attach_failed;
1270                         }
1271                         ha->risc_dump_size = QL_83XX_FW_DUMP_SIZE;
1272                         ha->fcp_cmd = ql_command_24xx_iocb;
1273                         ha->ms_cmd = ql_ms_24xx_iocb;
1274                         ha->els_cmd = ql_els_24xx_iocb;
1275                         ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1276                         ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1277                         break;
1278 
1279                 case 0x2071:
1280                 case 0x2261:
1281                 case 0x2271:
1282                         /* Get queue pointer memory mapped registers */
1283                         if (ddi_dev_regsize(dip, 3, ®size) != DDI_SUCCESS ||
1284                             ddi_regs_map_setup(dip, 3, &ha->mbar,
1285                             0, regsize, &ql_dev_acc_attr,
1286                             &ha->mbar_dev_handle) != DDI_SUCCESS) {
1287                                 cmn_err(CE_WARN, "%s(%d): regs_map_setup"
1288                                     "(mbar) failed", QL_NAME, instance);
1289                                 goto attach_failed;
1290                         }
1291                         ha->mbar_size = (uint32_t)regsize;
1292 
1293                         ha->function_number = ha->pci_function_number;
1294                         ha->cfg_flags |= CFG_CTRL_27XX;
1295                         ha->flags |= VP_ENABLED | MULTI_QUEUE |
1296                             QUEUE_SHADOW_PTRS;
1297                         ha->max_vports = MAX_27_VIRTUAL_PORTS;
1298                         ha->reg_off = ®_off_2700_8300;
1299                         ha->mbar_queue_offset = MBAR2_REG_OFFSET;
1300                         ha->interrupt_count = 2;
1301                         ha->osc_max_cnt = 2048;
1302                         ha->fw_class = 0x2700;
1303                         if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1304                                 goto attach_failed;
1305                         }
1306                         ha->risc_dump_size = QL_27XX_FW_DUMP_SIZE;
1307                         ha->fcp_cmd = ql_command_24xx_iocb;
1308                         ha->ms_cmd = ql_ms_24xx_iocb;
1309                         ha->els_cmd = ql_els_24xx_iocb;
1310                         ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1311                         ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1312                         break;
1313 
1314                 case 0x8001:
1315                         if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 4) {
1316                                 ha->function_number = 1;
1317                         }
1318                         ha->cfg_flags |= CFG_CTRL_81XX;
1319                         ha->flags |= VP_ENABLED;
1320                         ha->max_vports = MAX_81XX_VIRTUAL_PORTS;
1321                         ha->reg_off = ®_off_2400_2500;
1322                         ha->mbar_queue_offset = MBAR2_REG_OFFSET;
1323                         ha->interrupt_count = 2;
1324                         ha->osc_max_cnt = 2048;
1325                         ha->fw_class = 0x8100;
1326                         if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1327                                 goto attach_failed;
1328                         }
1329                         ha->risc_dump_size = QL_81XX_FW_DUMP_SIZE;
1330                         ha->fcp_cmd = ql_command_24xx_iocb;
1331                         ha->ms_cmd = ql_ms_24xx_iocb;
1332                         ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1333                         ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1334                         if (ql_multi_queue_support(ha) == QL_SUCCESS) {
1335                                 ha->flags |= MULTI_QUEUE;
1336                         }
1337                         break;
1338 
1339                 case 0x8021:
1340                         if (ha->pci_function_number & BIT_0) {
1341                                 ha->function_number = 1;
1342                         }
1343                         ha->cfg_flags |= CFG_CTRL_82XX;
1344                         ha->flags |= VP_ENABLED;
1345                         ha->max_vports = MAX_8021_VIRTUAL_PORTS;
1346                         ha->reg_off = ®_off_8021;
1347                         ha->interrupt_count = 2;
1348                         ha->osc_max_cnt = 2048;
1349                         ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1350                         ha->fcp_cmd = ql_command_24xx_iocb;
1351                         ha->ms_cmd = ql_ms_24xx_iocb;
1352                         ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1353                         ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1354                         ha->io_dma_attr.dma_attr_flags |=
1355                             DDI_DMA_RELAXED_ORDERING;
1356 
1357                         ha->nx_pcibase = ha->iobase;
1358                         ha->iobase += 0xBC000 + (ha->pci_function_number << 11);
1359                         ha->iomap_iobase += 0xBC000 +
1360                             (ha->pci_function_number << 11);
1361 
1362                         /* map doorbell */
1363                         if (ddi_dev_regsize(dip, 2, ®size) != DDI_SUCCESS ||
1364                             ddi_regs_map_setup(dip, 2, &ha->db_iobase,
1365                             0, regsize, &ql_dev_acc_attr,
1366                             &ha->db_dev_handle) !=
1367                             DDI_SUCCESS) {
1368                                 cmn_err(CE_WARN, "%s(%d): regs_map_setup"
1369                                     "(doorbell) failed", QL_NAME, instance);
1370                                 goto attach_failed;
1371                         }
1372                         progress |= QL_DB_IOBASE_MAPPED;
1373 
1374                         if (qlc_fm_check_acc_handle(ha, ha->db_dev_handle)
1375                             != DDI_FM_OK) {
1376                                 qlc_fm_report_err_impact(ha,
1377                                     QL_FM_EREPORT_ACC_HANDLE_CHECK);
1378                                 goto attach_failed;
1379                         }
1380 
1381                         ha->nx_req_in = (uint32_t *)(ha->db_iobase +
1382                             (ha->pci_function_number << 12));
1383                         ha->db_read = ha->nx_pcibase + (512 * 1024) +
1384                             (ha->pci_function_number * 8);
1385 
1386                         ql_8021_update_crb_int_ptr(ha);
1387                         ql_8021_set_drv_active(ha);
1388                         break;
1389 
1390                 default:
1391                         cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1392                             QL_NAME, instance, ha->device_id);
1393                         goto attach_failed;
1394                 }
1395 
1396                 ha->outstanding_cmds = kmem_zalloc(
1397                     sizeof (*ha->outstanding_cmds) * ha->osc_max_cnt,
1398                     KM_SLEEP);
1399 
1400                 /* Setup interrupts */
1401                 if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1402                         cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1403                             "rval=%xh", QL_NAME, instance, rval);
1404                         goto attach_failed;
1405                 }
1406 
1407                 progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1408 
1409                 /* Setup hba buffer. */
1410                 if (ql_create_queues(ha) != QL_SUCCESS) {
1411                         cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1412                             "alloc failed", QL_NAME, instance);
1413                         goto attach_failed;
1414                 }
1415                 progress |= QL_HBA_BUFFER_SETUP;
1416 
1417                 /* Allocate resource for QLogic IOCTL */
1418                 (void) ql_alloc_xioctl_resource(ha);
1419 
1420 
1421                 if (ql_nvram_cache_desc_ctor(ha) != DDI_SUCCESS) {
1422                         cmn_err(CE_WARN, "%s(%d): can't setup nvram cache",
1423                             QL_NAME, instance);
1424                         goto attach_failed;
1425                 }
1426 
1427                 progress |= QL_NVRAM_CACHE_CREATED;
1428 
1429                 if (ql_plogi_params_desc_ctor(ha) != DDI_SUCCESS) {
1430                         cmn_err(CE_WARN, "%s(%d): can't setup plogi params",
1431                             QL_NAME, instance);
1432                         goto attach_failed;
1433                 }
1434 
1435                 progress |= QL_PLOGI_PARAMS_CREATED;
1436 
1437                 /*
1438                  * Allocate an N Port information structure
1439                  * for use when in P2P topology.
1440                  */
1441                 ha->n_port = (ql_n_port_info_t *)
1442                     kmem_zalloc(sizeof (ql_n_port_info_t), KM_SLEEP);
1443                 if (ha->n_port == NULL) {
1444                         cmn_err(CE_WARN, "%s(%d): Failed to create N Port info",
1445                             QL_NAME, instance);
1446                         goto attach_failed;
1447                 }
1448 
1449                 progress |= QL_N_PORT_INFO_CREATED;
1450 
1451                 /*
1452                  * Determine support for Power Management
1453                  */
1454                 caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1455 
1456                 while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
 
1507                                     PM_LEVEL_D0) != DDI_SUCCESS) {
1508                                         cmn_err(CE_WARN, "%s(%d): failed to"
1509                                             " raise power or initialize"
1510                                             " adapter", QL_NAME, instance);
1511                                 }
1512                         }
1513                 } else {
1514                         /* Initialize adapter. */
1515                         ha->power_level = PM_LEVEL_D0;
1516                         if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1517                                 cmn_err(CE_WARN, "%s(%d): failed to initialize"
1518                                     " adapter", QL_NAME, instance);
1519                         }
1520                 }
1521 
1522                 if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1523                     ha->fw_subminor_version == 0) {
1524                         cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1525                             QL_NAME, ha->instance);
1526                 } else {
1527                         int     rval, rval1;
1528                         char    ver_fmt[256];
1529 
1530                         rval1 = (int)snprintf(ver_fmt, (size_t)sizeof (ver_fmt),
1531                             "Firmware version %d.%d.%d", ha->fw_major_version,
1532                             ha->fw_minor_version, ha->fw_subminor_version);
1533 
1534                         if (CFG_IST(ha, CFG_CTRL_81XX)) {
1535                                 rval = (int)snprintf(ver_fmt + rval1,
1536                                     (size_t)sizeof (ver_fmt),
1537                                     ", MPI fw version %d.%d.%d",
1538                                     ha->mpi_fw_major_version,
1539                                     ha->mpi_fw_minor_version,
1540                                     ha->mpi_fw_subminor_version);
1541 
1542                                 if (ha->subsys_id == 0x17B ||
1543                                     ha->subsys_id == 0x17D) {
1544                                         (void) snprintf(ver_fmt + rval1 + rval,
1545                                             (size_t)sizeof (ver_fmt),
1546                                             ", PHY fw version %d.%d.%d",
1547                                             ha->phy_fw_major_version,
1548                                             ha->phy_fw_minor_version,
1549                                             ha->phy_fw_subminor_version);
1550                                 }
1551                         }
1552                         cmn_err(CE_NOTE, "!%s(%d): %s",
1553                             QL_NAME, ha->instance, ver_fmt);
1554                 }
1555 
1556                 ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1557                     "controller", KSTAT_TYPE_RAW,
1558                     (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1559                 if (ha->k_stats == NULL) {
1560                         cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1561                             QL_NAME, instance);
1562                         goto attach_failed;
1563                 }
1564                 progress |= QL_KSTAT_CREATED;
 
1575                     instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1576                         cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1577                             QL_NAME, instance);
1578                         goto attach_failed;
1579                 }
1580                 progress |= QL_MINOR_NODE_CREATED;
1581 
1582                 /* Allocate a transport structure for this instance */
1583                 tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1584                 if (tran == NULL) {
1585                         cmn_err(CE_WARN, "%s(%d): failed to allocate transport",
1586                             QL_NAME, instance);
1587                         goto attach_failed;
1588                 }
1589 
1590                 progress |= QL_FCA_TRAN_ALLOCED;
1591 
1592                 /* fill in the structure */
1593                 tran->fca_numports = 1;
1594                 tran->fca_version = FCTL_FCA_MODREV_5;
1595                 tran->fca_num_npivports = ha->max_vports ?
1596                     ha->max_vports - 1 : 0;
1597                 bcopy(ha->loginparams.node_ww_name.raw_wwn,
1598                     tran->fca_perm_pwwn.raw_wwn, 8);
1599 
1600                 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1601                         ha->io_dma_attr = ha->bit64_io_dma_attr;
1602                         ha->fcsm_cmd_dma_attr = ha->bit64_io_dma_attr;
1603                         ha->fcsm_rsp_dma_attr = ha->bit64_io_dma_attr;
1604                         ha->fcip_cmd_dma_attr = ha->bit64_io_dma_attr;
1605                         ha->fcip_rsp_dma_attr = ha->bit64_io_dma_attr;
1606                         ha->fcp_cmd_dma_attr = ha->bit64_io_dma_attr;
1607                         ha->fcp_rsp_dma_attr = ha->bit64_io_dma_attr;
1608                         ha->fcp_data_dma_attr = ha->bit64_io_dma_attr;
1609                 } else {
1610                         ha->io_dma_attr = ha->bit32_io_dma_attr;
1611                         ha->fcsm_cmd_dma_attr = ha->bit32_io_dma_attr;
1612                         ha->fcsm_rsp_dma_attr = ha->bit32_io_dma_attr;
1613                         ha->fcip_cmd_dma_attr = ha->bit32_io_dma_attr;
1614                         ha->fcip_rsp_dma_attr = ha->bit32_io_dma_attr;
1615                         ha->fcp_cmd_dma_attr = ha->bit32_io_dma_attr;
1616                         ha->fcp_rsp_dma_attr = ha->bit32_io_dma_attr;
1617                         ha->fcp_data_dma_attr = ha->bit32_io_dma_attr;
1618                 }
1619                 ha->fcsm_cmd_dma_attr.dma_attr_sgllen = QL_FCSM_CMD_SGLLEN;
1620                 ha->fcsm_rsp_dma_attr.dma_attr_sgllen = QL_FCSM_RSP_SGLLEN;
1621                 ha->fcip_cmd_dma_attr.dma_attr_sgllen = QL_FCIP_CMD_SGLLEN;
1622                 ha->fcip_rsp_dma_attr.dma_attr_sgllen = QL_FCIP_RSP_SGLLEN;
1623                 ha->fcp_cmd_dma_attr.dma_attr_sgllen = QL_FCP_CMD_SGLLEN;
1624                 ha->fcp_rsp_dma_attr.dma_attr_sgllen = QL_FCP_RSP_SGLLEN;
1625                 if (CFG_IST(ha, CFG_CTRL_82XX)) {
1626                         ha->io_dma_attr.dma_attr_flags |=
1627                             DDI_DMA_RELAXED_ORDERING;
1628                         ha->fcsm_cmd_dma_attr.dma_attr_flags |=
1629                             DDI_DMA_RELAXED_ORDERING;
1630                         ha->fcsm_rsp_dma_attr.dma_attr_flags |=
1631                             DDI_DMA_RELAXED_ORDERING;
1632                         ha->fcip_cmd_dma_attr.dma_attr_flags |=
1633                             DDI_DMA_RELAXED_ORDERING;
1634                         ha->fcip_rsp_dma_attr.dma_attr_flags |=
1635                             DDI_DMA_RELAXED_ORDERING;
1636                         ha->fcp_cmd_dma_attr.dma_attr_flags |=
1637                             DDI_DMA_RELAXED_ORDERING;
1638                         ha->fcp_rsp_dma_attr.dma_attr_flags |=
1639                             DDI_DMA_RELAXED_ORDERING;
1640                         ha->fcp_data_dma_attr.dma_attr_flags |=
1641                             DDI_DMA_RELAXED_ORDERING;
1642                 }
1643 
1644                 /* Specify the amount of space needed in each packet */
1645                 tran->fca_pkt_size = sizeof (ql_srb_t);
1646 
1647                 /* command limits are usually dictated by hardware */
1648                 tran->fca_cmd_max = ha->osc_max_cnt;
1649 
1650                 /* dmaattr are static, set elsewhere. */
1651                 tran->fca_dma_attr = &ha->io_dma_attr;
1652                 tran->fca_dma_fcp_cmd_attr = &ha->fcp_cmd_dma_attr;
1653                 tran->fca_dma_fcp_rsp_attr = &ha->fcp_rsp_dma_attr;
1654                 tran->fca_dma_fcp_data_attr = &ha->fcp_data_dma_attr;
1655                 tran->fca_dma_fcsm_cmd_attr = &ha->fcsm_cmd_dma_attr;
1656                 tran->fca_dma_fcsm_rsp_attr = &ha->fcsm_rsp_dma_attr;
1657                 tran->fca_dma_fcip_cmd_attr = &ha->fcip_cmd_dma_attr;
1658                 tran->fca_dma_fcip_rsp_attr = &ha->fcip_rsp_dma_attr;
1659                 tran->fca_acc_attr = &ql_dev_acc_attr;
1660                 tran->fca_iblock = &(ha->iblock_cookie);
1661 
1662                 /* the remaining values are simply function vectors */
1663                 tran->fca_bind_port = ql_bind_port;
1664                 tran->fca_unbind_port = ql_unbind_port;
1665                 tran->fca_init_pkt = ql_init_pkt;
1666                 tran->fca_un_init_pkt = ql_un_init_pkt;
1667                 tran->fca_els_send = ql_els_send;
1668                 tran->fca_get_cap = ql_get_cap;
1669                 tran->fca_set_cap = ql_set_cap;
1670                 tran->fca_getmap = ql_getmap;
1671                 tran->fca_transport = ql_transport;
1672                 tran->fca_ub_alloc = ql_ub_alloc;
1673                 tran->fca_ub_free = ql_ub_free;
1674                 tran->fca_ub_release = ql_ub_release;
1675                 tran->fca_abort = ql_abort;
1676                 tran->fca_reset = ql_reset;
1677                 tran->fca_port_manage = ql_port_manage;
1678                 tran->fca_get_device = ql_get_device;
1679 
1680                 EL(ha, "Transport interface setup. FCA version %d\n",
1681                     tran->fca_version);
1682 
1683                 /* give it to the FC transport */
1684                 if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1685                         cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1686                             instance);
1687                         goto attach_failed;
1688                 }
1689                 progress |= QL_FCA_ATTACH_DONE;
1690 
1691                 /* Stash the structure so it can be freed at detach */
1692                 ha->tran = tran;
1693 
1694                 /* Acquire global state lock. */
1695                 GLOBAL_STATE_LOCK();
1696 
1697                 /* Add adapter structure to link list. */
1698                 ql_add_link_b(&ql_hba, &ha->hba);
1699 
1700                 /* Determine and populate HBA fru info */
1701                 ql_setup_fruinfo(ha);
1702 
1703                 /* Release global state lock. */
1704                 GLOBAL_STATE_UNLOCK();
1705 
1706                 /* Start one second driver timer. */
1707                 GLOBAL_TIMER_LOCK();
1708                 if (ql_timer_timeout_id == NULL) {
1709                         ql_timer_ticks = drv_usectohz(1000000);
1710                         ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1711                             ql_timer_ticks);
1712                 }
1713                 GLOBAL_TIMER_UNLOCK();
1714 
1715                 /* Setup task_daemon thread. */
1716                 (void) snprintf(taskq_name, sizeof (taskq_name),
1717                     "qlc_%d_driver_thread", instance);
1718                 ha->driver_thread_taskq = ddi_taskq_create(NULL, taskq_name, 1,
1719                     TASKQ_DEFAULTPRI, 0);
1720                 (void) ddi_taskq_dispatch(ha->driver_thread_taskq,
1721                     ql_task_daemon, ha, DDI_SLEEP);
1722                 ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
1723 
1724                 (void) snprintf(taskq_name, sizeof (taskq_name),
1725                     "qlc_%d_comp_thd", instance);
1726                 ha->completion_taskq = ddi_taskq_create(0, taskq_name,
1727                     ha->completion_thds, maxclsyspri, 0);
1728                 for (size = 0; size < ha->completion_thds; size++) {
1729                         (void) ddi_taskq_dispatch(ha->completion_taskq,
1730                             ql_completion_thread, ha, DDI_SLEEP);
1731                 }
1732 
1733                 progress |= QL_TASK_DAEMON_STARTED;
1734 
1735                 ddi_report_dev(dip);
1736 
1737                 /* Disable link reset in panic path */
1738                 ha->lip_on_panic = 1;
1739 
1740                 rval = DDI_SUCCESS;
1741                 break;
1742 
1743 attach_failed:
1744                 if (progress & QL_FCA_INIT_FM) {
1745                         qlc_fm_fini(ha);
1746                         progress &= ~QL_FCA_INIT_FM;
1747                 }
1748 
1749                 if (progress & QL_FCA_ATTACH_DONE) {
1750                         (void) fc_fca_detach(dip);
1751                         progress &= ~QL_FCA_ATTACH_DONE;
1752                 }
1753 
1754                 if (progress & QL_FCA_TRAN_ALLOCED) {
1755                         kmem_free(tran, sizeof (fc_fca_tran_t));
1756                         progress &= ~QL_FCA_TRAN_ALLOCED;
1757                 }
1758 
1759                 if (progress & QL_MINOR_NODE_CREATED) {
1760                         ddi_remove_minor_node(dip, "devctl");
1761                         progress &= ~QL_MINOR_NODE_CREATED;
1762                 }
1763 
1764                 if (progress & QL_KSTAT_CREATED) {
1765                         kstat_delete(ha->k_stats);
1766                         progress &= ~QL_KSTAT_CREATED;
1767                 }
1768 
1769                 if (progress & QL_N_PORT_INFO_CREATED) {
1770                         kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1771                         progress &= ~QL_N_PORT_INFO_CREATED;
1772                 }
1773 
1774                 if (progress & QL_PLOGI_PARAMS_CREATED) {
1775                         (void) ql_plogi_params_desc_dtor(ha);
1776                         progress &= ~QL_PLOGI_PARAMS_CREATED;
1777                 }
1778 
1779                 if (progress & QL_NVRAM_CACHE_CREATED) {
1780                         (void) ql_nvram_cache_desc_dtor(ha);
1781                         progress &= ~QL_NVRAM_CACHE_CREATED;
1782                 }
1783 
1784                 if (progress & QL_TASK_DAEMON_STARTED) {
1785                         if (ha->driver_thread_taskq) {
1786                                 while (ha->task_daemon_flags &
1787                                     TASK_DAEMON_ALIVE_FLG) {
1788                                         /* Delay for 1 tick (10 ms). */
1789                                         ql_awaken_task_daemon(ha, NULL,
1790                                             TASK_DAEMON_STOP_FLG, 0);
1791                                         delay(1);
1792                                 }
1793                                 ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1794 
1795                                 ddi_taskq_destroy(ha->driver_thread_taskq);
1796                                 ha->driver_thread_taskq = NULL;
1797                         }
1798                         if (ha->completion_taskq) {
1799                                 ADAPTER_STATE_LOCK(ha);
1800                                 ha->flags |= COMP_THD_TERMINATE;
1801                                 ADAPTER_STATE_UNLOCK(ha);
1802 
1803                                 do {
1804                                         COMP_Q_LOCK(ha);
1805                                         cv_broadcast(&ha->cv_comp_thread);
1806                                         COMP_Q_UNLOCK(ha);
1807                                         ql_delay(ha, 10000);
1808                                 } while (ha->comp_thds_active != 0);
1809 
1810                                 ddi_taskq_destroy(ha->completion_taskq);
1811                                 ha->completion_taskq = NULL;
1812                         }
1813                         progress &= ~QL_TASK_DAEMON_STARTED;
1814                 }
1815 
1816                 if (progress & QL_DB_IOBASE_MAPPED) {
1817                         ql_8021_clr_drv_active(ha);
1818                         ddi_regs_map_free(&ha->db_dev_handle);
1819                         progress &= ~QL_DB_IOBASE_MAPPED;
1820                 }
1821                 if (progress & QL_IOMAP_IOBASE_MAPPED) {
1822                         ddi_regs_map_free(&ha->iomap_dev_handle);
1823                         progress &= ~QL_IOMAP_IOBASE_MAPPED;
1824                 }
1825                 if (progress & QL_REGS_MAPPED) {
1826                         if (ha->mbar_dev_handle) {
1827                                 ddi_regs_map_free(&ha->mbar_dev_handle);
1828                                 ha->mbar_dev_handle = 0;
1829                         }
1830                 }
1831 
1832                 if (progress & QL_CONFIG_SPACE_SETUP) {
1833                         if (CFG_IST(ha, CFG_SBUS_CARD)) {
1834                                 ddi_regs_map_free(&ha->sbus_config_handle);
1835                         } else {
1836                                 pci_config_teardown(&ha->pci_handle);
1837                         }
1838                         progress &= ~QL_CONFIG_SPACE_SETUP;
1839                 }
1840 
1841                 if (progress & QL_INTR_ADDED) {
1842                         ql_disable_intr(ha);
1843                         ql_release_intr(ha);
1844                         progress &= ~QL_INTR_ADDED;
1845                 }
1846 
1847                 if (progress & QL_MUTEX_CV_INITED) {
1848                         ql_destroy_mutex(ha);
1849                         progress &= ~QL_MUTEX_CV_INITED;
1850                 }
1851 
1852                 if (progress & QL_HBA_BUFFER_SETUP) {
1853                         ql_delete_queues(ha);
1854                         progress &= ~QL_HBA_BUFFER_SETUP;
1855                 }
1856 
1857                 if (progress & QL_REGS_MAPPED) {
1858                         ddi_regs_map_free(&ha->dev_handle);
1859                         if (ha->sbus_fpga_iobase != NULL) {
1860                                 ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1861                         }
1862                         progress &= ~QL_REGS_MAPPED;
1863                 }
1864 
1865                 if (progress & QL_SOFT_STATE_ALLOCED) {
1866 
1867                         ql_fcache_rel(ha->fcache);
1868 
1869                         kmem_free(ha->adapter_stats,
1870                             sizeof (*ha->adapter_stats));
1871 
1872                         kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1873                             QL_UB_LIMIT);
1874 
1875                         if (ha->outstanding_cmds != NULL) {
1876                                 kmem_free(ha->outstanding_cmds,
1877                                     sizeof (*ha->outstanding_cmds) *
1878                                     ha->osc_max_cnt);
1879                         }
1880 
1881                         if (ha->devpath != NULL) {
1882                                 kmem_free(ha->devpath,
1883                                     strlen(ha->devpath) + 1);
1884                         }
1885 
1886                         kmem_free(ha->dev, sizeof (*ha->dev) *
1887                             DEVICE_HEAD_LIST_SIZE);
1888 
1889                         if (ha->xioctl != NULL) {
1890                                 ql_free_xioctl_resource(ha);
1891                         }
1892 
1893                         if (ha->fw_module != NULL) {
1894                                 (void) ddi_modclose(ha->fw_module);
1895                         }
1896                         (void) ql_el_trace_dealloc(ha);
1897 
1898                         ddi_soft_state_free(ql_state, instance);
1899                         progress &= ~QL_SOFT_STATE_ALLOCED;
1900                 }
1901 
1902                 ddi_prop_remove_all(dip);
1903                 rval = DDI_FAILURE;
1904                 break;
1905 
1906         case DDI_RESUME:
1907                 rval = DDI_FAILURE;
1908 
1909                 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1910                 if (ha == NULL) {
1911                         cmn_err(CE_WARN, "%s(%d): can't get soft state",
1912                             QL_NAME, instance);
1913                         break;
1914                 }
1915 
1916                 ha->power_level = PM_LEVEL_D3;
 
1925                         }
1926                 }
1927 
1928                 /*
1929                  * There is a bug in DR that prevents PM framework
1930                  * from calling ql_power.
1931                  */
1932                 if (ha->power_level == PM_LEVEL_D3) {
1933                         ha->power_level = PM_LEVEL_D0;
1934 
1935                         if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1936                                 cmn_err(CE_WARN, "%s(%d): can't initialize the"
1937                                     " adapter", QL_NAME, instance);
1938                         }
1939 
1940                         /* Wake up task_daemon. */
1941                         ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1942                             0);
1943                 }
1944 
1945                 /* Restart driver timer. */
1946                 GLOBAL_TIMER_LOCK();
1947                 if (ql_timer_timeout_id == NULL) {
1948                         ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1949                             ql_timer_ticks);
1950                 }
1951                 GLOBAL_TIMER_LOCK();
1952 
1953                 /* Wake up command start routine. */
1954                 ADAPTER_STATE_LOCK(ha);
1955                 ha->flags &= ~ADAPTER_SUSPENDED;
1956                 ADAPTER_STATE_UNLOCK(ha);
1957 
1958                 rval = DDI_SUCCESS;
1959 
1960                 /* Restart IP if it was running. */
1961                 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1962                         (void) ql_initialize_ip(ha);
1963                         ql_isp_rcvbuf(ha);
1964                 }
1965                 break;
1966 
1967         default:
1968                 cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1969                     " %x", QL_NAME, ddi_get_instance(dip), cmd);
1970                 rval = DDI_FAILURE;
1971                 break;
1972         }
1973 
1974         kmem_free(buf, MAXPATHLEN);
1975 
1976         if (rval != DDI_SUCCESS) {
1977                 /*EMPTY*/
1978                 QL_PRINT_2(ha, "failed instance=%d, rval = %xh\n",
1979                     ddi_get_instance(dip), rval);
1980         } else {
1981                 /*EMPTY*/
1982                 QL_PRINT_3(ha, "done\n");
1983         }
1984 
1985         return (rval);
1986 }
1987 
1988 /*
1989  * ql_detach
1990  *      Used to remove all the states associated with a given
1991  *      instances of a device node prior to the removal of that
1992  *      instance from the system.
1993  *
1994  * Input:
1995  *      dip = pointer to device information structure.
1996  *      cmd = type of detach.
1997  *
1998  * Returns:
1999  *      DDI_SUCCESS or DDI_FAILURE.
2000  *
2001  * Context:
2002  *      Kernel context.
2003  */
2004 static int
2005 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2006 {
2007         ql_adapter_state_t      *ha, *vha;
2008         ql_tgt_t                *tq;
2009         uint16_t                index;
2010         ql_link_t               *link;
2011         char                    *buf;
2012         timeout_id_t            timer_id = NULL;
2013         int                     suspend, rval = DDI_SUCCESS;
2014 
2015         ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2016         if (ha == NULL) {
2017                 QL_PRINT_2(NULL, "no adapter, instance=%d\n",
2018                     ddi_get_instance(dip));
2019                 return (DDI_FAILURE);
2020         }
2021 
2022         QL_PRINT_3(ha, "started, cmd=%xh\n", cmd);
2023 
2024         buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2025 
2026         switch (cmd) {
2027         case DDI_DETACH:
2028                 ADAPTER_STATE_LOCK(ha);
2029                 ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO);
2030                 ADAPTER_STATE_UNLOCK(ha);
2031 
2032                 /* Wait for task thread to see suspend flag. */
2033                 while (!(ha->task_daemon_flags & TASK_DAEMON_STALLED_FLG) &&
2034                     ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
2035                         ql_awaken_task_daemon(ha, NULL, 0, 0);
2036                         /* Delay for 1 tick (10 milliseconds). */
2037                         delay(1);
2038                 }
2039 
2040                 if (ha->driver_thread_taskq) {
2041                         while (ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
2042                                 /* Delay for 1 tick (10 milliseconds). */
2043                                 ql_awaken_task_daemon(ha, NULL,
2044                                     TASK_DAEMON_STOP_FLG, 0);
2045                                 delay(1);
2046                         }
2047                         ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
2048 
2049                         ddi_taskq_destroy(ha->driver_thread_taskq);
2050                         ha->driver_thread_taskq = NULL;
2051                 }
2052 
2053                 if (ha->completion_taskq) {
2054                         ADAPTER_STATE_LOCK(ha);
2055                         ha->flags |= COMP_THD_TERMINATE;
2056                         ADAPTER_STATE_UNLOCK(ha);
2057 
2058                         do {
2059                                 COMP_Q_LOCK(ha);
2060                                 cv_broadcast(&ha->cv_comp_thread);
2061                                 COMP_Q_UNLOCK(ha);
2062                                 ql_delay(ha, 10000);
2063                         } while (ha->comp_thds_active != 0);
2064 
2065                         ddi_taskq_destroy(ha->completion_taskq);
2066                         ha->completion_taskq = NULL;
2067                 }
2068 
2069                 /* Disable driver timer if no adapters. */
2070                 GLOBAL_TIMER_LOCK();
2071                 if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
2072                     ql_hba.last == &ha->hba) {
2073                         timer_id = ql_timer_timeout_id;
2074                         ql_timer_timeout_id = NULL;
2075                 }
2076                 GLOBAL_TIMER_UNLOCK();
2077 
2078                 if (timer_id) {
2079                         (void) untimeout(timer_id);
2080                 }
2081 
2082                 GLOBAL_STATE_LOCK();
2083                 ql_remove_link(&ql_hba, &ha->hba);
2084                 GLOBAL_STATE_UNLOCK();
2085 
2086                 if (ha->pm_capable) {
2087                         if (pm_lower_power(dip, QL_POWER_COMPONENT,
2088                             PM_LEVEL_D3) != DDI_SUCCESS) {
2089                                 cmn_err(CE_WARN, "%s(%d): failed to lower the"
2090                                     " power", QL_NAME, ha->instance);
2091                         }
2092                 }
2093 
2094                 /*
2095                  * If pm_lower_power shutdown the adapter, there
2096                  * isn't much else to do
2097                  */
2098                 if (ha->power_level != PM_LEVEL_D3) {
2099                         ql_halt(ha, PM_LEVEL_D3);
2100                 }
2101 
2102                 /* Remove virtual ports. */
2103                 while ((vha = ha->vp_next) != NULL) {
2104                         ql_vport_destroy(vha);
2105                 }
 
2153                         ha->risc_code_size = 0;
2154                 }
2155 
2156                 if (ha->fw_module != NULL) {
2157                         (void) ddi_modclose(ha->fw_module);
2158                         ha->fw_module = NULL;
2159                 }
2160 
2161                 /* Free resources. */
2162                 ddi_prop_remove_all(dip);
2163                 (void) fc_fca_detach(dip);
2164                 kmem_free(ha->tran, sizeof (fc_fca_tran_t));
2165                 ddi_remove_minor_node(dip, "devctl");
2166                 if (ha->k_stats != NULL) {
2167                         kstat_delete(ha->k_stats);
2168                 }
2169 
2170                 if (CFG_IST(ha, CFG_SBUS_CARD)) {
2171                         ddi_regs_map_free(&ha->sbus_config_handle);
2172                 } else {
2173                         if (CFG_IST(ha, CFG_CTRL_82XX)) {
2174                                 ql_8021_clr_drv_active(ha);
2175                                 ddi_regs_map_free(&ha->db_dev_handle);
2176                         }
2177                         if (ha->iomap_dev_handle != ha->dev_handle) {
2178                                 ddi_regs_map_free(&ha->iomap_dev_handle);
2179                         }
2180                         pci_config_teardown(&ha->pci_handle);
2181                 }
2182 
2183                 ql_disable_intr(ha);
2184                 ql_release_intr(ha);
2185 
2186                 ql_free_xioctl_resource(ha);
2187 
2188                 ql_destroy_mutex(ha);
2189 
2190                 ql_delete_queues(ha);
2191                 ql_free_phys(ha, &ha->fwexttracebuf);
2192                 ql_free_phys(ha, &ha->fwfcetracebuf);
2193 
2194                 ddi_regs_map_free(&ha->dev_handle);
2195                 if (ha->sbus_fpga_iobase != NULL) {
2196                         ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
2197                 }
2198                 if (ha->mbar_dev_handle != NULL) {
2199                         ddi_regs_map_free(&ha->mbar_dev_handle);
2200                 }
2201 
2202                 ql_fcache_rel(ha->fcache);
2203                 if (ha->vcache != NULL) {
2204                         kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
2205                 }
2206 
2207                 if (ha->pi_attrs != NULL) {
2208                         kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
2209                 }
2210 
2211                 kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
2212 
2213                 kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
2214 
2215                 kmem_free(ha->outstanding_cmds,
2216                     sizeof (*ha->outstanding_cmds) * ha->osc_max_cnt);
2217 
2218                 if (ha->n_port != NULL) {
2219                         kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
2220                 }
2221 
2222                 if (ha->devpath != NULL) {
2223                         kmem_free(ha->devpath, strlen(ha->devpath) + 1);
2224                 }
2225 
2226                 kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
2227 
2228                 (void) ql_plogi_params_desc_dtor(ha);
2229 
2230                 (void) ql_nvram_cache_desc_dtor(ha);
2231 
2232                 (void) qlc_fm_fini(ha);
2233 
2234                 EL(ha, "detached\n");
2235 
2236                 (void) ql_el_trace_dealloc(ha);
2237 
2238                 ddi_soft_state_free(ql_state, (int)ha->instance);
2239 
2240                 rval = DDI_SUCCESS;
2241 
2242                 break;
2243 
2244         case DDI_SUSPEND:
2245                 ADAPTER_STATE_LOCK(ha);
2246                 ha->flags |= ADAPTER_SUSPENDED;
2247                 ADAPTER_STATE_UNLOCK(ha);
2248 
2249                 /* Disable driver timer if last adapter. */
2250                 GLOBAL_TIMER_LOCK();
2251                 if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
2252                     ql_hba.last == &ha->hba) {
2253                         timer_id = ql_timer_timeout_id;
2254                         ql_timer_timeout_id = NULL;
2255                 }
2256                 GLOBAL_TIMER_UNLOCK();
2257 
2258                 if (timer_id) {
2259                         (void) untimeout(timer_id);
2260                 }
2261 
2262                 if (ha->flags & IP_INITIALIZED) {
2263                         (void) ql_shutdown_ip(ha);
2264                 }
2265 
2266                 if ((suspend = ql_suspend_adapter(ha)) != QL_SUCCESS) {
2267                         ADAPTER_STATE_LOCK(ha);
2268                         ha->flags &= ~ADAPTER_SUSPENDED;
2269                         ADAPTER_STATE_UNLOCK(ha);
2270                         cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
2271                             QL_NAME, ha->instance, suspend);
2272 
2273                         /* Restart IP if it was running. */
2274                         if (ha->flags & IP_ENABLED &&
2275                             !(ha->flags & IP_INITIALIZED)) {
2276                                 (void) ql_initialize_ip(ha);
2277                                 ql_isp_rcvbuf(ha);
2278                         }
2279                         rval = DDI_FAILURE;
2280                         break;
2281                 }
2282 
2283                 EL(ha, "suspended\n");
2284 
2285                 break;
2286 
2287         default:
2288                 rval = DDI_FAILURE;
2289                 break;
2290         }
2291 
2292         kmem_free(buf, MAXPATHLEN);
2293 
2294         if (rval != DDI_SUCCESS) {
2295                 EL(ha, "failed, rval = %xh\n", rval);
2296         } else {
2297                 /*EMPTY*/
2298                 QL_PRINT_3(ha, "done\n");
2299         }
2300 
2301         return (rval);
2302 }
2303 
2304 /*
2305  * ql_power
2306  *      Power a device attached to the system.
2307  *
2308  * Input:
2309  *      dip = pointer to device information structure.
2310  *      component = device.
2311  *      level = power level.
2312  *
2313  * Returns:
2314  *      DDI_SUCCESS or DDI_FAILURE.
2315  *
2316  * Context:
2317  *      Kernel context.
2318  */
2319 /* ARGSUSED */
2320 static int
2321 ql_power(dev_info_t *dip, int component, int level)
2322 {
2323         int                     rval = DDI_FAILURE;
2324         off_t                   csr;
2325         uint8_t                 saved_pm_val;
2326         ql_adapter_state_t      *ha;
2327         char                    *buf;
2328         char                    *path;
2329 
2330         ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2331         if (ha == NULL || ha->pm_capable == 0) {
2332                 QL_PRINT_2(ha, "no hba or PM not supported\n");
2333                 return (rval);
2334         }
2335 
2336         QL_PRINT_10(ha, "started\n");
2337 
2338         buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2339         path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2340 
2341         if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
2342             level != PM_LEVEL_D3)) {
2343                 EL(ha, "invalid, component=%xh or level=%xh\n",
2344                     component, level);
2345                 return (rval);
2346         }
2347 
2348         GLOBAL_HW_LOCK();
2349         csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
2350         GLOBAL_HW_UNLOCK();
2351 
2352         (void) snprintf(buf, MAXPATHLEN,
2353             "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
2354             ddi_pathname(dip, path));
2355 
2356         switch (level) {
2357         case PM_LEVEL_D0:       /* power up to D0 state - fully on */
2358 
2359                 QL_PM_LOCK(ha);
2360                 if (ha->power_level == PM_LEVEL_D0) {
2361                         QL_PM_UNLOCK(ha);
2362                         rval = DDI_SUCCESS;
2363                         break;
2364                 }
2365 
2366                 /*
2367                  * Enable interrupts now
2368                  */
2369                 saved_pm_val = ha->power_level;
2370                 ha->power_level = PM_LEVEL_D0;
2371                 QL_PM_UNLOCK(ha);
2372 
 
2402                 /* Wake up task_daemon. */
2403                 ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
2404                     TASK_DAEMON_SLEEPING_FLG, 0);
2405 
2406                 /* Restart IP if it was running. */
2407                 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
2408                         (void) ql_initialize_ip(ha);
2409                         ql_isp_rcvbuf(ha);
2410                 }
2411 
2412                 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
2413                     ha->instance, QL_NAME);
2414 
2415                 rval = DDI_SUCCESS;
2416                 break;
2417 
2418         case PM_LEVEL_D3:       /* power down to D3 state - off */
2419 
2420                 QL_PM_LOCK(ha);
2421 
2422                 if (ha->pm_busy || ((ha->task_daemon_flags &
2423                     TASK_DAEMON_SLEEPING_FLG) == 0)) {
2424                         QL_PM_UNLOCK(ha);
2425                         break;
2426                 }
2427 
2428                 if (ha->power_level == PM_LEVEL_D3) {
2429                         rval = DDI_SUCCESS;
2430                         QL_PM_UNLOCK(ha);
2431                         break;
2432                 }
2433                 QL_PM_UNLOCK(ha);
2434 
2435                 if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2436                         cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
2437                             " config regs", QL_NAME, ha->instance, buf);
2438                         break;
2439                 }
2440                 ha->config_saved = 1;
2441 
2442                 /*
 
2459                 ha->power_level = PM_LEVEL_D3;
2460                 QL_PM_UNLOCK(ha);
2461 
2462                 /*
2463                  * Wait for ISR to complete.
2464                  */
2465                 INTR_LOCK(ha);
2466                 ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
2467                 INTR_UNLOCK(ha);
2468 
2469                 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
2470                     ha->instance, QL_NAME);
2471 
2472                 rval = DDI_SUCCESS;
2473                 break;
2474         }
2475 
2476         kmem_free(buf, MAXPATHLEN);
2477         kmem_free(path, MAXPATHLEN);
2478 
2479         QL_PRINT_10(ha, "done\n");
2480 
2481         return (rval);
2482 }
2483 
2484 /*
2485  * ql_quiesce
2486  *      quiesce a device attached to the system.
2487  *
2488  * Input:
2489  *      dip = pointer to device information structure.
2490  *
2491  * Returns:
2492  *      DDI_SUCCESS
2493  *
2494  * Context:
2495  *      Kernel context.
2496  */
2497 static int
2498 ql_quiesce(dev_info_t *dip)
2499 {
2500         ql_adapter_state_t      *ha;
2501         uint32_t                timer;
2502         uint32_t                stat;
2503 
2504         ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2505         if (ha == NULL) {
2506                 /* Oh well.... */
2507                 QL_PRINT_2(NULL, "no adapter, instance=%d\n",
2508                     ddi_get_instance(dip));
2509                 return (DDI_SUCCESS);
2510         }
2511 
2512         QL_PRINT_3(ha, "started\n");
2513 
2514         if (CFG_IST(ha, CFG_CTRL_82XX)) {
2515                 ql_8021_clr_hw_intr(ha);
2516                 ql_8021_clr_fw_intr(ha);
2517                 WRT16_IO_REG(ha, mailbox_in[0], MBC_TOGGLE_INTERRUPT);
2518                 WRT16_IO_REG(ha, mailbox_in[1], 0);
2519                 WRT32_IO_REG(ha, nx_host_int, NX_MBX_CMD);
2520                 for (timer = 0; timer < 20000; timer++) {
2521                         stat = RD32_IO_REG(ha, risc2host);
2522                         if (stat & BIT_15) {
2523                                 ql_8021_clr_hw_intr(ha);
2524                                 if ((stat & 0xff) < 0x12) {
2525                                         ql_8021_clr_fw_intr(ha);
2526                                         break;
2527                                 }
2528                                 ql_8021_clr_fw_intr(ha);
2529                         }
2530                         drv_usecwait(100);
2531                 }
2532                 ql_8021_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
2533                 WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE);
2534                 WRT16_IO_REG(ha, mailbox_in[1], 0);
2535                 WRT32_IO_REG(ha, nx_host_int, NX_MBX_CMD);
2536                 for (timer = 0; timer < 20000; timer++) {
2537                         stat = RD32_IO_REG(ha, risc2host);
2538                         if (stat & BIT_15) {
2539                                 ql_8021_clr_hw_intr(ha);
2540                                 if ((stat & 0xff) < 0x12) {
2541                                         ql_8021_clr_fw_intr(ha);
2542                                         break;
2543                                 }
2544                                 ql_8021_clr_fw_intr(ha);
2545                         }
2546                         drv_usecwait(100);
2547                 }
2548         } else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2549                 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2550                 WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE);
2551                 WRT16_IO_REG(ha, mailbox_in[1], 0);
2552                 WRT16_IO_REG(ha, mailbox_in[2], 0);
2553                 WRT16_IO_REG(ha, mailbox_in[3], 0);
2554                 WRT16_IO_REG(ha, mailbox_in[4], 0);
2555                 WRT16_IO_REG(ha, mailbox_in[5], 0);
2556                 WRT16_IO_REG(ha, mailbox_in[6], 0);
2557                 WRT16_IO_REG(ha, mailbox_in[7], 0);
2558                 WRT16_IO_REG(ha, mailbox_in[8], 0);
2559                 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
2560                 for (timer = 0; timer < 30000; timer++) {
2561                         stat = RD32_IO_REG(ha, risc2host);
2562                         if (stat & BIT_15) {
2563                                 if ((stat & 0xff) < 0x12) {
2564                                         WRT32_IO_REG(ha, hccr,
2565                                             HC24_CLR_RISC_INT);
2566                                         break;
2567                                 }
2568                                 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2569                         }
2570                         drv_usecwait(100);
2571                 }
2572                 /* Reset the chip. */
2573                 if (CFG_IST(ha, CFG_MWB_4096_SUPPORT)) {
2574                         WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2575                             MWB_4096_BYTES);
2576                 } else {
2577                         WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN);
2578                 }
2579                 drv_usecwait(100);
2580 
2581         } else {
2582                 /* Disable ISP interrupts. */
2583                 WRT16_IO_REG(ha, ictrl, 0);
2584                 /* Select RISC module registers. */
2585                 WRT16_IO_REG(ha, ctrl_status, 0);
2586                 /* Reset ISP semaphore. */
2587                 WRT16_IO_REG(ha, semaphore, 0);
2588                 /* Reset RISC module. */
2589                 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
2590                 /* Release RISC module. */
2591                 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
2592         }
2593 
2594         QL_PRINT_3(ha, "done\n");
2595 
2596         return (DDI_SUCCESS);
2597 }
2598 
2599 /* ************************************************************************ */
2600 /*              Fibre Channel Adapter (FCA) Transport Functions.            */
2601 /* ************************************************************************ */
2602 
2603 /*
2604  * ql_bind_port
2605  *      Handling port binding. The FC Transport attempts to bind an FCA port
2606  *      when it is ready to start transactions on the port. The FC Transport
2607  *      will call the fca_bind_port() function specified in the fca_transport
2608  *      structure it receives. The FCA must fill in the port_info structure
2609  *      passed in the call and also stash the information for future calls.
2610  *
2611  * Input:
2612  *      dip = pointer to FCA information structure.
2613  *      port_info = pointer to port information structure.
2614  *      bind_info = pointer to bind information structure.
2615  *
2616  * Returns:
2617  *      NULL = failure
2618  *
2619  * Context:
2620  *      Kernel context.
2621  */
2622 static opaque_t
2623 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2624     fc_fca_bind_info_t *bind_info)
2625 {
2626         ql_adapter_state_t      *ha, *vha;
2627         opaque_t                fca_handle = NULL;
2628         port_id_t               d_id;
2629         int                     port_npiv = bind_info->port_npiv;
2630         uchar_t                 *port_nwwn = bind_info->port_nwwn.raw_wwn;
2631         uchar_t                 *port_pwwn = bind_info->port_pwwn.raw_wwn;
2632 
2633         /* get state info based on the dip */
2634         ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2635         if (ha == NULL) {
2636                 QL_PRINT_2(ha, "no adapter, instance=%d\n",
2637                     ddi_get_instance(dip));
2638                 return (NULL);
2639         }
2640         QL_PRINT_10(ha, "started\n");
2641 
2642         /* Verify port number is supported. */
2643         if (port_npiv != 0) {
2644                 if (!(ha->flags & VP_ENABLED)) {
2645                         QL_PRINT_2(ha, "FC_NPIV_NOT_SUPPORTED\n");
2646                         port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2647                         return (NULL);
2648                 }
2649                 if (!(ha->flags & POINT_TO_POINT)) {
2650                         QL_PRINT_2(ha, "FC_NPIV_WRONG_TOPOLOGY\n");
2651                         port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2652                         return (NULL);
2653                 }
2654                 if (!(ha->flags & FDISC_ENABLED)) {
2655                         QL_PRINT_2(ha, "switch does not support "
2656                             "FDISC\n");
2657                         port_info->pi_error = FC_NPIV_FDISC_FAILED;
2658                         return (NULL);
2659                 }
2660                 if (bind_info->port_num >= ha->max_vports) {
2661                         QL_PRINT_2(ha, "port number=%d "
2662                             "FC_OUTOFBOUNDS\n", bind_info->port_num);
2663                         port_info->pi_error = FC_OUTOFBOUNDS;
2664                         return (NULL);
2665                 }
2666         } else if (bind_info->port_num != 0) {
2667                 QL_PRINT_2(ha, "failed, port number=%d is not "
2668                     "supported\n", bind_info->port_num);
2669                 port_info->pi_error = FC_OUTOFBOUNDS;
2670                 return (NULL);
2671         }
2672 
2673         /* Locate port context. */
2674         for (vha = ha; vha != NULL; vha = vha->vp_next) {
2675                 if (vha->vp_index == bind_info->port_num) {
2676                         break;
2677                 }
2678         }
2679 
2680         /* If virtual port does not exist. */
2681         if (vha == NULL) {
2682                 vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2683         }
2684 
2685         /* make sure this port isn't already bound */
2686         if (vha->flags & FCA_BOUND) {
2687                 port_info->pi_error = FC_ALREADY;
2688         } else {
2689                 if (vha->vp_index != 0) {
2690                         bcopy(port_nwwn,
2691                             vha->loginparams.node_ww_name.raw_wwn, 8);
2692                         bcopy(port_pwwn,
2693                             vha->loginparams.nport_ww_name.raw_wwn, 8);
2694                 }
2695                 if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2696                         if (ql_vport_enable(vha) != QL_SUCCESS) {
2697                                 QL_PRINT_2(ha, "failed to enable "
2698                                     "virtual port=%d\n",
2699                                     vha->vp_index);
2700                                 port_info->pi_error = FC_NPIV_FDISC_FAILED;
2701                                 return (NULL);
2702                         }
2703                         cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2704                             "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2705                             "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2706                             QL_NAME, ha->instance, vha->vp_index,
2707                             port_pwwn[0], port_pwwn[1], port_pwwn[2],
2708                             port_pwwn[3], port_pwwn[4], port_pwwn[5],
2709                             port_pwwn[6], port_pwwn[7],
2710                             port_nwwn[0], port_nwwn[1], port_nwwn[2],
2711                             port_nwwn[3], port_nwwn[4], port_nwwn[5],
2712                             port_nwwn[6], port_nwwn[7]);
2713                 }
2714 
2715                 /* stash the bind_info supplied by the FC Transport */
2716                 vha->bind_info.port_handle = bind_info->port_handle;
2717                 vha->bind_info.port_statec_cb = bind_info->port_statec_cb;
2718                 vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2719 
2720                 /* Set port's source ID. */
2721                 port_info->pi_s_id.port_id = vha->d_id.b24;
2722 
2723                 /* copy out the default login parameters */
2724                 bcopy((void *)&vha->loginparams,
2725                     (void *)&port_info->pi_login_params,
2726                     sizeof (la_els_logi_t));
2727 
2728                 /* Set port's hard address if enabled. */
2729                 port_info->pi_hard_addr.hard_addr = 0;
2730                 if (bind_info->port_num == 0) {
2731                         d_id.b24 = ha->d_id.b24;
2732                         if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2733                                 if (ha->init_ctrl_blk.cb24.
2734                                     firmware_options_1[0] & BIT_0) {
2735                                         d_id.b.al_pa = ql_index_to_alpa[ha->
2736                                             init_ctrl_blk.cb24.
2737                                             hard_address[0]];
2738                                         port_info->pi_hard_addr.hard_addr =
2739                                             d_id.b24;
2740                                 }
2741                         } else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2742                             BIT_0) {
2743                                 d_id.b.al_pa = ql_index_to_alpa[ha->
2744                                     init_ctrl_blk.cb.hard_address[0]];
2745                                 port_info->pi_hard_addr.hard_addr = d_id.b24;
2746                         }
2747 
2748                         /* Set the node id data */
2749                         if (ql_get_rnid_params(ha,
2750                             sizeof (port_info->pi_rnid_params.params),
2751                             (caddr_t)&port_info->pi_rnid_params.params) ==
2752                             QL_SUCCESS) {
 
2764                                     sizeof (fca_port_attrs_t));
2765                         }
2766                 } else {
2767                         port_info->pi_rnid_params.status = FC_FAILURE;
2768                         if (ha->pi_attrs != NULL) {
2769                                 bcopy(ha->pi_attrs, &port_info->pi_attrs,
2770                                     sizeof (fca_port_attrs_t));
2771                         }
2772                 }
2773 
2774                 /* Generate handle for this FCA. */
2775                 fca_handle = (opaque_t)vha;
2776 
2777                 ADAPTER_STATE_LOCK(ha);
2778                 vha->flags |= FCA_BOUND;
2779                 ADAPTER_STATE_UNLOCK(ha);
2780                 /* Set port's current state. */
2781                 port_info->pi_port_state = vha->state;
2782         }
2783 
2784         QL_PRINT_10(ha, "done, pi_port_state=%xh, "
2785             "pi_s_id.port_id=%xh\n",
2786             port_info->pi_port_state, port_info->pi_s_id.port_id);
2787 
2788         return (fca_handle);
2789 }
2790 
2791 /*
2792  * ql_unbind_port
2793  *      To unbind a Fibre Channel Adapter from an FC Port driver.
2794  *
2795  * Input:
2796  *      fca_handle = handle setup by ql_bind_port().
2797  *
2798  * Context:
2799  *      Kernel context.
2800  */
2801 static void
2802 ql_unbind_port(opaque_t fca_handle)
2803 {
2804         ql_adapter_state_t      *ha;
2805         ql_tgt_t                *tq;
2806         uint32_t                flgs;
2807 
2808         ha = ql_fca_handle_to_state(fca_handle);
2809         if (ha == NULL) {
2810                 /*EMPTY*/
2811                 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
2812                     (void *)fca_handle);
2813         } else {
2814                 QL_PRINT_10(ha, "started\n");
2815 
2816                 if (!(ha->flags & FCA_BOUND)) {
2817                         /*EMPTY*/
2818                         QL_PRINT_2(ha, "port already unbound\n");
2819                 } else {
2820                         if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2821                                 (void) ql_vport_control(ha, (uint8_t)
2822                                     (CFG_IST(ha, CFG_FC_TYPE) ?
2823                                     VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT));
2824                                 if ((tq = ql_loop_id_to_queue(ha,
2825                                     FL_PORT_24XX_HDL)) != NULL) {
2826                                         (void) ql_logout_fabric_port(ha, tq);
2827                                 }
2828                                 flgs = FCA_BOUND | VP_ENABLED;
2829                         } else {
2830                                 flgs = FCA_BOUND;
2831                         }
2832                         ADAPTER_STATE_LOCK(ha);
2833                         ha->flags &= ~flgs;
2834                         ADAPTER_STATE_UNLOCK(ha);
2835                 }
2836 
2837                 QL_PRINT_10(ha, "done\n");
2838         }
2839 }
2840 
2841 /*
2842  * ql_init_pkt
2843  *      Initialize FCA portion of packet.
2844  *
2845  * Input:
2846  *      fca_handle = handle setup by ql_bind_port().
2847  *      pkt = pointer to fc_packet.
2848  *
2849  * Returns:
2850  *      FC_SUCCESS - the packet has successfully been initialized.
2851  *      FC_UNBOUND - the fca_handle specified is not bound.
2852  *      FC_NOMEM - the FCA failed initialization due to an allocation error.
2853  *      FC_FAILURE - the FCA failed initialization for undisclosed reasons
2854  *
2855  * Context:
2856  *      Kernel context.
2857  */
2858 /* ARGSUSED */
2859 static int
2860 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2861 {
2862         ql_adapter_state_t      *ha;
2863         ql_srb_t                *sp;
2864         int                     rval = FC_SUCCESS;
2865 
2866         ha = ql_fca_handle_to_state(fca_handle);
2867         if (ha == NULL) {
2868                 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
2869                     (void *)fca_handle);
2870                 return (FC_UNBOUND);
2871         }
2872         QL_PRINT_3(ha, "started\n");
2873 
2874         sp = (ql_srb_t *)pkt->pkt_fca_private;
2875         sp->flags = 0;
2876         sp->handle = 0;
2877 
2878         /* init cmd links */
2879         sp->cmd.base_address = sp;
2880         sp->cmd.prev = NULL;
2881         sp->cmd.next = NULL;
2882         sp->cmd.head = NULL;
2883 
2884         /* init watchdog links */
2885         sp->wdg.base_address = sp;
2886         sp->wdg.prev = NULL;
2887         sp->wdg.next = NULL;
2888         sp->wdg.head = NULL;
2889         sp->pkt = pkt;
2890         sp->ha = ha;
2891         sp->magic_number = QL_FCA_BRAND;
2892         sp->sg_dma.dma_handle = NULL;
2893 #ifndef __sparc
2894         if (CFG_IST(ha, CFG_CTRL_82XX)) {
2895                 /* Setup DMA for scatter gather list. */
2896                 sp->sg_dma.size = sizeof (cmd6_2400_dma_t);
2897                 sp->sg_dma.type = LITTLE_ENDIAN_DMA;
2898                 sp->sg_dma.max_cookie_count = 1;
2899                 sp->sg_dma.alignment = 64;
2900                 if (ql_alloc_phys(ha, &sp->sg_dma, KM_SLEEP) != QL_SUCCESS) {
2901                         rval = FC_NOMEM;
2902                 }
2903         }
2904 #endif  /* __sparc */
2905 
2906         QL_PRINT_3(ha, "done\n");
2907 
2908         return (rval);
2909 }
2910 
2911 /*
2912  * ql_un_init_pkt
2913  *      Release all local resources bound to packet.
2914  *
2915  * Input:
2916  *      fca_handle = handle setup by ql_bind_port().
2917  *      pkt = pointer to fc_packet.
2918  *
2919  * Returns:
2920  *      FC_SUCCESS - the packet has successfully been invalidated.
2921  *      FC_UNBOUND - the fca_handle specified is not bound.
2922  *      FC_BADPACKET - the packet has not been initialized or has
2923  *                      already been freed by this FCA.
2924  *
2925  * Context:
2926  *      Kernel context.
2927  */
2928 static int
2929 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2930 {
2931         ql_adapter_state_t *ha;
2932         int rval;
2933         ql_srb_t *sp;
2934 
2935         ha = ql_fca_handle_to_state(fca_handle);
2936         if (ha == NULL) {
2937                 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
2938                     (void *)fca_handle);
2939                 return (FC_UNBOUND);
2940         }
2941         QL_PRINT_3(ha, "started\n");
2942 
2943         sp = (ql_srb_t *)pkt->pkt_fca_private;
2944 
2945         if (sp->magic_number != QL_FCA_BRAND) {
2946                 EL(ha, "failed, FC_BADPACKET\n");
2947                 rval = FC_BADPACKET;
2948         } else {
2949                 sp->magic_number = NULL;
2950                 ql_free_phys(ha, &sp->sg_dma);
2951                 rval = FC_SUCCESS;
2952         }
2953 
2954         QL_PRINT_3(ha, "done\n");
2955 
2956         return (rval);
2957 }
2958 
2959 /*
2960  * ql_els_send
2961  *      Issue a extended link service request.
2962  *
2963  * Input:
2964  *      fca_handle = handle setup by ql_bind_port().
2965  *      pkt = pointer to fc_packet.
2966  *
2967  * Returns:
2968  *      FC_SUCCESS - the command was successful.
2969  *      FC_ELS_FREJECT - the command was rejected by a Fabric.
2970  *      FC_ELS_PREJECT - the command was rejected by an N-port.
2971  *      FC_TRANSPORT_ERROR - a transport error occurred.
2972  *      FC_UNBOUND - the fca_handle specified is not bound.
2973  *      FC_ELS_BAD - the FCA can not issue the requested ELS.
2974  *
2975  * Context:
2976  *      Kernel context.
2977  */
2978 static int
2979 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2980 {
2981         ql_adapter_state_t      *ha;
2982         int                     rval;
2983         clock_t                 timer = drv_usectohz(30000000);
2984         ls_code_t               els;
2985         la_els_rjt_t            rjt;
2986         ql_srb_t                *sp = (ql_srb_t *)pkt->pkt_fca_private;
2987 
2988         /* Verify proper command. */
2989         ha = ql_cmd_setup(fca_handle, pkt, &rval);
2990         if (ha == NULL) {
2991                 QL_PRINT_2(NULL, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2992                     rval, fca_handle);
2993                 return (FC_INVALID_REQUEST);
2994         }
2995         QL_PRINT_3(ha, "started\n");
2996 
2997         /* Wait for suspension to end. */
2998         TASK_DAEMON_LOCK(ha);
2999         while (DRIVER_SUSPENDED(ha)) {
3000                 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
3001 
3002                 /* 30 seconds from now */
3003                 if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
3004                     &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
3005                         /*
3006                          * The timeout time 'timer' was
3007                          * reached without the condition
3008                          * being signaled.
3009                          */
3010                         pkt->pkt_state = FC_PKT_TRAN_BSY;
3011                         pkt->pkt_reason = FC_REASON_XCHG_BSY;
3012 
3013                         /* Release task daemon lock. */
3014                         TASK_DAEMON_UNLOCK(ha);
3015 
3016                         EL(ha, "QL_SUSPENDED failed=%xh\n",
3017                             QL_FUNCTION_TIMEOUT);
3018                         return (FC_TRAN_BUSY);
3019                 }
 
3029                 bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
3030         }
3031 
3032         pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
3033         pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
3034         pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
3035             R_CTL_SOLICITED_CONTROL;
3036         pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
3037             F_CTL_END_SEQ;
3038 
3039         sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
3040             SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
3041             SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
3042 
3043         sp->flags |= SRB_ELS_PKT;
3044 
3045         /* map the type of ELS to a function */
3046         ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
3047             (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
3048 
3049         QL_PRINT_10(ha, "els.ls_code=%xh, d_id=%xh\n", els.ls_code,
3050             pkt->pkt_cmd_fhdr.d_id);
3051 
3052         sp->iocb = ha->els_cmd;
3053         sp->req_cnt = 1;
3054 
3055         switch (els.ls_code) {
3056         case LA_ELS_RJT:
3057         case LA_ELS_ACC:
3058                 pkt->pkt_state = FC_PKT_SUCCESS;
3059                 rval = FC_SUCCESS;
3060                 break;
3061         case LA_ELS_PLOGI:
3062         case LA_ELS_PDISC:
3063                 rval = ql_els_plogi(ha, pkt);
3064                 break;
3065         case LA_ELS_FLOGI:
3066         case LA_ELS_FDISC:
3067                 rval = ql_els_flogi(ha, pkt);
3068                 break;
3069         case LA_ELS_LOGO:
3070                 rval = ql_els_logo(ha, pkt);
3071                 break;
3072         case LA_ELS_PRLI:
3073                 rval = ql_els_prli(ha, pkt);
3074                 break;
3075         case LA_ELS_PRLO:
3076                 rval = ql_els_prlo(ha, pkt);
3077                 break;
 
3105         case LA_ELS_RNID:
3106                 rval = ql_els_rnid(ha, pkt);
3107                 break;
3108         default:
3109                 EL(ha, "LA_ELS_RJT, FC_REASON_CMD_UNSUPPORTED=%xh\n",
3110                     els.ls_code);
3111                 /* Build RJT. */
3112                 bzero(&rjt, sizeof (rjt));
3113                 rjt.ls_code.ls_code = LA_ELS_RJT;
3114                 rjt.reason = FC_REASON_CMD_UNSUPPORTED;
3115 
3116                 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
3117                     (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
3118 
3119                 pkt->pkt_state = FC_PKT_LOCAL_RJT;
3120                 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3121                 rval = FC_SUCCESS;
3122                 break;
3123         }
3124 
3125         /*
3126          * Return success if the srb was consumed by an iocb. The packet
3127          * completion callback will be invoked by the response handler.
3128          */
3129         if (rval == QL_CONSUMED) {
3130                 rval = FC_SUCCESS;
3131         } else if (rval == FC_SUCCESS &&
3132             !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
3133                 /* Do command callback only if no error */
3134                 ql_io_comp(sp);
3135         }
3136 
3137         if (rval != FC_SUCCESS) {
3138                 EL(ha, "rval=%x, ls_code=%xh sent to d_id=%xh, sp=%ph\n",
3139                     rval, els.ls_code, pkt->pkt_cmd_fhdr.d_id, sp);
3140         } else {
3141                 /*EMPTY*/
3142                 QL_PRINT_10(ha, "done\n");
3143         }
3144         return (rval);
3145 }
3146 
3147 /*
3148  * ql_get_cap
3149  *      Export FCA hardware and software capabilities.
3150  *
3151  * Input:
3152  *      fca_handle = handle setup by ql_bind_port().
3153  *      cap = pointer to the capabilities string.
3154  *      ptr = buffer pointer for return capability.
3155  *
3156  * Returns:
3157  *      FC_CAP_ERROR - no such capability
3158  *      FC_CAP_FOUND - the capability was returned and cannot be set
3159  *      FC_CAP_SETTABLE - the capability was returned and can be set
3160  *      FC_UNBOUND - the fca_handle specified is not bound.
3161  *
3162  * Context:
3163  *      Kernel context.
3164  */
3165 static int
3166 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
3167 {
3168         ql_adapter_state_t      *ha;
3169         int                     rval;
3170         uint32_t                *rptr = (uint32_t *)ptr;
3171 
3172         ha = ql_fca_handle_to_state(fca_handle);
3173         if (ha == NULL) {
3174                 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3175                     (void *)fca_handle);
3176                 return (FC_UNBOUND);
3177         }
3178         QL_PRINT_3(ha, "started\n");
3179 
3180         if (strcmp(cap, FC_NODE_WWN) == 0) {
3181                 bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
3182                     ptr, 8);
3183                 rval = FC_CAP_FOUND;
3184         } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
3185                 bcopy((void *)&ha->loginparams, ptr,
3186                     sizeof (la_els_logi_t));
3187                 rval = FC_CAP_FOUND;
3188         } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
3189                 *rptr = (uint32_t)QL_UB_LIMIT;
3190                 rval = FC_CAP_FOUND;
3191         } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
3192 
3193                 dev_info_t      *psydip = NULL;
3194 #ifdef __sparc
3195                 /*
3196                  * Disable streaming for certain 2 chip adapters
3197                  * below Psycho to handle Psycho byte hole issue.
3198                  */
3199                 if (ha->flags & MULTI_CHIP_ADAPTER &&
3200                     !CFG_IST(ha, CFG_SBUS_CARD)) {
3201                         for (psydip = ddi_get_parent(ha->dip); psydip;
3202                             psydip = ddi_get_parent(psydip)) {
3203                                 if (strcmp(ddi_driver_name(psydip),
3204                                     "pcipsy") == 0) {
3205                                         break;
3206                                 }
3207                         }
3208                 }
3209 #endif  /* __sparc */
3210 
3211                 if (psydip) {
3212                         *rptr = (uint32_t)FC_NO_STREAMING;
3213                         EL(ha, "No Streaming\n");
3214                 } else {
3215                         *rptr = (uint32_t)FC_ALLOW_STREAMING;
3216                         EL(ha, "Allow Streaming\n");
3217                 }
3218                 rval = FC_CAP_FOUND;
3219         } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
3220                 *rptr = ha->loginparams.common_service.rx_bufsize;
3221                 rval = FC_CAP_FOUND;
3222         } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
3223                 *rptr = FC_RESET_RETURN_ALL;
3224                 rval = FC_CAP_FOUND;
3225         } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
3226                 *rptr = FC_NO_DVMA_SPACE;
3227                 rval = FC_CAP_FOUND;
3228         } else {
3229                 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
3230                 rval = FC_CAP_ERROR;
3231         }
3232 
3233         QL_PRINT_3(ha, "done\n");
3234 
3235         return (rval);
3236 }
3237 
3238 /*
3239  * ql_set_cap
3240  *      Allow the FC Transport to set FCA capabilities if possible.
3241  *
3242  * Input:
3243  *      fca_handle = handle setup by ql_bind_port().
3244  *      cap = pointer to the capabilities string.
3245  *      ptr = buffer pointer for capability.
3246  *
3247  * Returns:
3248  *      FC_CAP_ERROR - no such capability
3249  *      FC_CAP_FOUND - the capability cannot be set by the FC Transport.
3250  *      FC_CAP_SETTABLE - the capability was successfully set.
3251  *      FC_UNBOUND - the fca_handle specified is not bound.
3252  *
3253  * Context:
3254  *      Kernel context.
3255  */
3256 /* ARGSUSED */
3257 static int
3258 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
3259 {
3260         ql_adapter_state_t      *ha;
3261         int                     rval;
3262 
3263         ha = ql_fca_handle_to_state(fca_handle);
3264         if (ha == NULL) {
3265                 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3266                     (void *)fca_handle);
3267                 return (FC_UNBOUND);
3268         }
3269         QL_PRINT_3(ha, "started\n");
3270 
3271         if (strcmp(cap, FC_NODE_WWN) == 0) {
3272                 rval = FC_CAP_FOUND;
3273         } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
3274                 rval = FC_CAP_FOUND;
3275         } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
3276                 rval = FC_CAP_FOUND;
3277         } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
3278                 rval = FC_CAP_FOUND;
3279         } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
3280                 rval = FC_CAP_FOUND;
3281         } else {
3282                 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
3283                 rval = FC_CAP_ERROR;
3284         }
3285 
3286         QL_PRINT_3(ha, "done\n");
3287 
3288         return (rval);
3289 }
3290 
3291 /*
3292  * ql_getmap
3293  *      Request of Arbitrated Loop (AL-PA) map.
3294  *
3295  * Input:
3296  *      fca_handle = handle setup by ql_bind_port().
3297  *      mapbuf= buffer pointer for map.
3298  *
3299  * Returns:
3300  *      FC_OLDPORT - the specified port is not operating in loop mode.
3301  *      FC_OFFLINE - the specified port is not online.
3302  *      FC_NOMAP - there is no loop map available for this port.
3303  *      FC_UNBOUND - the fca_handle specified is not bound.
3304  *      FC_SUCCESS - a valid map has been placed in mapbuf.
3305  *
3306  * Context:
3307  *      Kernel context.
3308  */
3309 static int
3310 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
3311 {
3312         ql_adapter_state_t      *ha;
3313         clock_t                 timer = drv_usectohz(30000000);
3314         int                     rval = FC_SUCCESS;
3315 
3316         ha = ql_fca_handle_to_state(fca_handle);
3317         if (ha == NULL) {
3318                 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3319                     (void *)fca_handle);
3320                 return (FC_UNBOUND);
3321         }
3322         QL_PRINT_3(ha, "started\n");
3323 
3324         mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
3325         mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
3326 
3327         /* Wait for suspension to end. */
3328         TASK_DAEMON_LOCK(ha);
3329         while (DRIVER_SUSPENDED(ha)) {
3330                 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
3331 
3332                 /* 30 seconds from now */
3333                 if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
3334                     &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
3335                         /*
3336                          * The timeout time 'timer' was
3337                          * reached without the condition
3338                          * being signaled.
3339                          */
3340 
3341                         /* Release task daemon lock. */
3342                         TASK_DAEMON_UNLOCK(ha);
3343 
3344                         EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
3345                         return (FC_TRAN_BUSY);
3346                 }
3347         }
3348         /* Release task daemon lock. */
3349         TASK_DAEMON_UNLOCK(ha);
3350 
3351         if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
3352             (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
3353                 /*
3354                  * Now, since transport drivers cosider this as an
3355                  * offline condition, let's wait for few seconds
3356                  * for any loop transitions before we reset the.
3357                  * chip and restart all over again.
3358                  */
3359                 ql_delay(ha, 2000000);
3360                 EL(ha, "failed, FC_NO_MAP\n");
3361                 rval = FC_NO_MAP;
3362         } else {
3363                 /*EMPTY*/
3364                 QL_PRINT_3(ha, "my_alpa %xh len %xh "
3365                     "data %xh %xh %xh %xh\n",
3366                     mapbuf->lilp_myalpa, mapbuf->lilp_length,
3367                     mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
3368                     mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
3369         }
3370 
3371         QL_PRINT_3(ha, "done\n");
3372 #if 0
3373         QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
3374 #endif
3375         return (rval);
3376 }
3377 
3378 /*
3379  * ql_transport
3380  *      Issue an I/O request. Handles all regular requests.
3381  *
3382  * Input:
3383  *      fca_handle = handle setup by ql_bind_port().
3384  *      pkt = pointer to fc_packet.
3385  *
3386  * Returns:
3387  *      FC_SUCCESS - the packet was accepted for transport.
3388  *      FC_TRANSPORT_ERROR - a transport error occurred.
3389  *      FC_BADPACKET - the packet to be transported had not been
3390  *                      initialized by this FCA.
3391  *      FC_UNBOUND - the fca_handle specified is not bound.
3392  *
3393  * Context:
3394  *      Kernel context.
3395  */
3396 static int
3397 ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
3398 {
3399         ql_adapter_state_t      *ha;
3400         int                     rval = FC_TRANSPORT_ERROR;
3401         ql_srb_t                *sp = (ql_srb_t *)pkt->pkt_fca_private;
3402 
3403         /* Verify proper command. */
3404         ha = ql_cmd_setup(fca_handle, pkt, &rval);
3405         if (ha == NULL) {
3406                 QL_PRINT_2(NULL, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
3407                     rval, fca_handle);
3408                 return (rval);
3409         }
3410         QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
3411 
3412         /* Reset SRB flags. */
3413         sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
3414             SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_UB_CALLBACK |
3415             SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
3416             SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
3417             SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
3418             SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
3419             SRB_MS_PKT | SRB_ELS_PKT);
3420 
3421         pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
3422         pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
3423         pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
3424         pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
3425         pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
3426 
3427         switch (pkt->pkt_cmd_fhdr.r_ctl) {
3428         case R_CTL_COMMAND:
3429                 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
3430                         sp->flags |= SRB_FCP_CMD_PKT;
3431                         rval = ql_fcp_scsi_cmd(ha, pkt, sp);
3432                 } else {
3433                         pkt->pkt_state = FC_PKT_LOCAL_RJT;
3434                         pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3435                         rval = FC_TRANSPORT_ERROR;
3436                 }
3437                 break;
3438 
3439         default:
3440                 /* Setup response header and buffer. */
3441                 if (pkt->pkt_rsplen) {
3442                         bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
3443                 }
3444 
3445                 switch (pkt->pkt_cmd_fhdr.r_ctl) {
3446                 case R_CTL_UNSOL_DATA:
3447                         if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
3448                                 if (CFG_IST(ha, CFG_FCIP_SUPPORT) &&
3449                                     ha->vp_index == 0) {
3450                                         sp->flags |= SRB_IP_PKT;
3451                                         rval = ql_fcp_ip_cmd(ha, pkt, sp);
3452                                 } else {
3453                                         cmn_err(CE_NOTE, "%s(%d) FC-IP is not "
3454                                             "supported on this adapter\n",
3455                                             QL_NAME, ha->instance);
3456                                         pkt->pkt_state = FC_PKT_LOCAL_RJT;
3457                                         pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3458                                         rval = FC_TRANSPORT_ERROR;
3459                                 }
3460                         }
3461                         break;
3462 
3463                 case R_CTL_UNSOL_CONTROL:
3464                         if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
3465                                 sp->flags |= SRB_GENERIC_SERVICES_PKT;
3466                                 rval = ql_fc_services(ha, pkt);
3467                         } else {
3468                                 pkt->pkt_state = FC_PKT_LOCAL_RJT;
3469                                 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3470                                 rval = FC_TRANSPORT_ERROR;
3471                         }
3472                         break;
3473 
3474                 case R_CTL_SOLICITED_DATA:
3475                 case R_CTL_STATUS:
3476                 default:
3477                         pkt->pkt_state = FC_PKT_LOCAL_RJT;
3478                         pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3479                         rval = FC_TRANSPORT_ERROR;
3480                         EL(ha, "unknown, r_ctl=%xh\n",
3481                             pkt->pkt_cmd_fhdr.r_ctl);
3482                         break;
3483                 }
3484         }
3485 
3486         if (rval != FC_SUCCESS) {
3487                 EL(ha, "failed, rval = %xh\n", rval);
3488         } else {
3489                 /*EMPTY*/
3490                 QL_PRINT_3(ha, "done\n");
3491         }
3492 
3493         return (rval);
3494 }
3495 
3496 /*
3497  * ql_ub_alloc
3498  *      Allocate buffers for unsolicited exchanges.
3499  *
3500  * Input:
3501  *      fca_handle = handle setup by ql_bind_port().
3502  *      tokens = token array for each buffer.
3503  *      size = size of each buffer.
3504  *      count = pointer to number of buffers.
3505  *      type = the FC-4 type the buffers are reserved for.
3506  *              1 = Extended Link Services, 5 = LLC/SNAP
3507  *
3508  * Returns:
3509  *      FC_FAILURE - buffers could not be allocated.
3510  *      FC_TOOMANY - the FCA could not allocate the requested
 
3515  * Context:
3516  *      Kernel context.
3517  */
3518 static int
3519 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
3520     uint32_t *count, uint32_t type)
3521 {
3522         ql_adapter_state_t      *ha;
3523         caddr_t                 bufp = NULL;
3524         fc_unsol_buf_t          *ubp;
3525         ql_srb_t                *sp;
3526         uint32_t                index;
3527         uint32_t                cnt;
3528         uint32_t                ub_array_index = 0;
3529         int                     rval = FC_SUCCESS;
3530         int                     ub_updated = FALSE;
3531 
3532         /* Check handle. */
3533         ha = ql_fca_handle_to_state(fca_handle);
3534         if (ha == NULL) {
3535                 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3536                     (void *)fca_handle);
3537                 return (FC_UNBOUND);
3538         }
3539         QL_PRINT_3(ha, "started, count = %xh\n", *count);
3540 
3541         QL_PM_LOCK(ha);
3542         if (ha->power_level != PM_LEVEL_D0) {
3543                 QL_PM_UNLOCK(ha);
3544                 QL_PRINT_3(ha, "down done\n");
3545                 return (FC_FAILURE);
3546         }
3547         QL_PM_UNLOCK(ha);
3548 
3549         /* Check the count. */
3550         if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
3551                 *count = 0;
3552                 EL(ha, "failed, FC_TOOMANY\n");
3553                 rval = FC_TOOMANY;
3554         }
3555 
3556         /*
3557          * reset ub_array_index
3558          */
3559         ub_array_index = 0;
3560 
3561         /*
3562          * Now proceed to allocate any buffers required
3563          */
3564         for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
3565                 /* Allocate all memory needed. */
3566                 ubp = (fc_unsol_buf_t *)kmem_zalloc(sizeof (fc_unsol_buf_t),
3567                     KM_SLEEP);
3568                 if (ubp == NULL) {
 
3644                         ubp->ub_buffer = bufp;
3645                         ubp->ub_bufsize = size;
3646                         ubp->ub_port_handle = fca_handle;
3647                         ubp->ub_token = ub_array_index;
3648 
3649                         /* Save the token. */
3650                         tokens[index] = ub_array_index;
3651 
3652                         /* Setup FCA private information. */
3653                         sp->ub_type = type;
3654                         sp->handle = ub_array_index;
3655                         sp->flags |= SRB_UB_IN_FCA;
3656 
3657                         ha->ub_array[ub_array_index] = ubp;
3658                         ha->ub_allocated++;
3659                         ub_updated = TRUE;
3660                         QL_UB_UNLOCK(ha);
3661                 }
3662         }
3663 
3664         /* IP buffer. */
3665         if (ub_updated) {
3666                 if (type == FC_TYPE_IS8802_SNAP &&
3667                     CFG_IST(ha, CFG_FCIP_SUPPORT) &&
3668                     ha->vp_index == 0) {
3669 
3670                         ADAPTER_STATE_LOCK(ha);
3671                         ha->flags |= IP_ENABLED;
3672                         ADAPTER_STATE_UNLOCK(ha);
3673 
3674                         if (!(ha->flags & IP_INITIALIZED)) {
3675                                 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
3676                                         ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3677                                             LSB(ql_ip_mtu);
3678                                         ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3679                                             MSB(ql_ip_mtu);
3680                                         ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3681                                             LSB(size);
3682                                         ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3683                                             MSB(size);
3684 
3685                                         cnt = CHAR_TO_SHORT(
3686                                             ha->ip_init_ctrl_blk.cb24.cc[0],
3687                                             ha->ip_init_ctrl_blk.cb24.cc[1]);
3688 
3689                                         if (cnt < *count) {
3690                                                 ha->ip_init_ctrl_blk.cb24.cc[0]
3691                                                     = LSB(*count);
3692                                                 ha->ip_init_ctrl_blk.cb24.cc[1]
3693                                                     = MSB(*count);
3694                                         }
3695                                 } else {
 
3707                                             ha->ip_init_ctrl_blk.cb.cc[1]);
3708 
3709                                         if (cnt < *count) {
3710                                                 ha->ip_init_ctrl_blk.cb.cc[0] =
3711                                                     LSB(*count);
3712                                                 ha->ip_init_ctrl_blk.cb.cc[1] =
3713                                                     MSB(*count);
3714                                         }
3715                                 }
3716 
3717                                 (void) ql_initialize_ip(ha);
3718                         }
3719                         ql_isp_rcvbuf(ha);
3720                 }
3721         }
3722 
3723         if (rval != FC_SUCCESS) {
3724                 EL(ha, "failed=%xh\n", rval);
3725         } else {
3726                 /*EMPTY*/
3727                 QL_PRINT_3(ha, "done\n");
3728         }
3729         return (rval);
3730 }
3731 
3732 /*
3733  * ql_ub_free
3734  *      Free unsolicited buffers.
3735  *
3736  * Input:
3737  *      fca_handle = handle setup by ql_bind_port().
3738  *      count = number of buffers.
3739  *      tokens = token array for each buffer.
3740  *
3741  * Returns:
3742  *      FC_SUCCESS - the requested buffers have been freed.
3743  *      FC_UNBOUND - the fca_handle specified is not bound.
3744  *      FC_UB_BADTOKEN - an invalid token was encountered.
3745  *                       No buffers have been released.
3746  *
3747  * Context:
3748  *      Kernel context.
3749  */
3750 static int
3751 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3752 {
3753         ql_adapter_state_t      *ha;
3754         ql_srb_t                *sp;
3755         uint32_t                index;
3756         uint64_t                ub_array_index;
3757         int                     rval = FC_SUCCESS;
3758 
3759         /* Check handle. */
3760         ha = ql_fca_handle_to_state(fca_handle);
3761         if (ha == NULL) {
3762                 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3763                     (void *)fca_handle);
3764                 return (FC_UNBOUND);
3765         }
3766         QL_PRINT_3(ha, "started\n");
3767 
3768         /* Check all returned tokens. */
3769         for (index = 0; index < count; index++) {
3770                 fc_unsol_buf_t  *ubp;
3771 
3772                 /* Check the token range. */
3773                 if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3774                         EL(ha, "failed, FC_UB_BADTOKEN\n");
3775                         rval = FC_UB_BADTOKEN;
3776                         break;
3777                 }
3778 
3779                 /* Check the unsolicited buffer array. */
3780                 QL_UB_LOCK(ha);
3781                 ubp = ha->ub_array[ub_array_index];
3782 
3783                 if (ubp == NULL) {
3784                         EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3785                         rval = FC_UB_BADTOKEN;
3786                         QL_UB_UNLOCK(ha);
3787                         break;
3788                 }
3789 
3790                 /* Check the state of the unsolicited buffer. */
3791                 sp = ha->ub_array[ub_array_index]->ub_fca_private;
3792                 sp->flags |= SRB_UB_FREE_REQUESTED;
3793 
3794                 while (!(sp->flags & SRB_UB_IN_FCA) ||
3795                     (sp->flags & (SRB_UB_CALLBACK | SRB_UB_ACQUIRED))) {
3796                         QL_UB_UNLOCK(ha);
3797                         delay(drv_usectohz(100000));
3798                         QL_UB_LOCK(ha);
3799                 }
3800                 ha->ub_array[ub_array_index] = NULL;
3801                 QL_UB_UNLOCK(ha);
3802                 ql_free_unsolicited_buffer(ha, ubp);
3803         }
3804 
3805         if (rval == FC_SUCCESS) {
3806                 /*
3807                  * Signal any pending hardware reset when there are
3808                  * no more unsolicited buffers in use.
3809                  */
3810                 if (ha->ub_allocated == 0) {
3811                         QL_UB_LOCK(ha);
3812                         cv_broadcast(&ha->pha->cv_ub);
3813                         QL_UB_UNLOCK(ha);
3814                 }
3815         }
3816 
3817         if (rval != FC_SUCCESS) {
3818                 EL(ha, "failed=%xh\n", rval);
3819         } else {
3820                 /*EMPTY*/
3821                 QL_PRINT_3(ha, "done\n");
3822         }
3823         return (rval);
3824 }
3825 
3826 /*
3827  * ql_ub_release
3828  *      Release unsolicited buffers from FC Transport
3829  *      to FCA for future use.
3830  *
3831  * Input:
3832  *      fca_handle = handle setup by ql_bind_port().
3833  *      count = number of buffers.
3834  *      tokens = token array for each buffer.
3835  *
3836  * Returns:
3837  *      FC_SUCCESS - the requested buffers have been released.
3838  *      FC_UNBOUND - the fca_handle specified is not bound.
3839  *      FC_UB_BADTOKEN - an invalid token was encountered.
3840  *              No buffers have been released.
3841  *
3842  * Context:
3843  *      Kernel context.
3844  */
3845 static int
3846 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3847 {
3848         ql_adapter_state_t      *ha;
3849         ql_srb_t                *sp;
3850         uint32_t                index;
3851         uint64_t                ub_array_index;
3852         int                     rval = FC_SUCCESS;
3853         int                     ub_ip_updated = FALSE;
3854 
3855         /* Check handle. */
3856         ha = ql_fca_handle_to_state(fca_handle);
3857         if (ha == NULL) {
3858                 QL_PRINT_2(NULL, ": failed, no adapter=%ph\n",
3859                     (void *)fca_handle);
3860                 return (FC_UNBOUND);
3861         }
3862         QL_PRINT_3(ha, "started\n");
3863 
3864         /* Acquire adapter state lock. */
3865         QL_UB_LOCK(ha);
3866 
3867         /* Check all returned tokens. */
3868         for (index = 0; index < count; index++) {
3869                 /* Check the token range. */
3870                 if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3871                         EL(ha, "failed, FC_UB_BADTOKEN\n");
3872                         rval = FC_UB_BADTOKEN;
3873                         break;
3874                 }
3875 
3876                 /* Check the unsolicited buffer array. */
3877                 if (ha->ub_array[ub_array_index] == NULL) {
3878                         EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3879                         rval = FC_UB_BADTOKEN;
3880                         break;
3881                 }
3882 
3883                 /* Check the state of the unsolicited buffer. */
3884                 sp = ha->ub_array[ub_array_index]->ub_fca_private;
 
3894                 /* Check all returned tokens. */
3895                 for (index = 0; index < count; index++) {
3896                         fc_unsol_buf_t  *ubp;
3897 
3898                         ub_array_index = tokens[index];
3899                         ubp = ha->ub_array[ub_array_index];
3900                         sp = ubp->ub_fca_private;
3901 
3902                         ubp->ub_resp_flags = 0;
3903                         sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
3904                         sp->flags |= SRB_UB_IN_FCA;
3905 
3906                         /* IP buffer. */
3907                         if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3908                                 ub_ip_updated = TRUE;
3909                         }
3910                 }
3911         }
3912 
3913         QL_UB_UNLOCK(ha);
3914 
3915         /*
3916          * XXX: We should call ql_isp_rcvbuf() to return a
3917          * buffer to ISP only if the number of buffers fall below
3918          * the low water mark.
3919          */
3920         if (ub_ip_updated) {
3921                 ql_isp_rcvbuf(ha);
3922         }
3923 
3924         if (rval != FC_SUCCESS) {
3925                 EL(ha, "failed, rval = %xh\n", rval);
3926         } else {
3927                 /*EMPTY*/
3928                 QL_PRINT_3(ha, "done\n");
3929         }
3930         return (rval);
3931 }
3932 
3933 /*
3934  * ql_abort
3935  *      Abort a packet.
3936  *
3937  * Input:
3938  *      fca_handle = handle setup by ql_bind_port().
3939  *      pkt = pointer to fc_packet.
3940  *      flags = KM_SLEEP flag.
3941  *
3942  * Returns:
3943  *      FC_SUCCESS - the packet has successfully aborted.
3944  *      FC_ABORTED - the packet has successfully aborted.
3945  *      FC_ABORTING - the packet is being aborted.
3946  *      FC_ABORT_FAILED - the packet could not be aborted.
3947  *      FC_TRANSPORT_ERROR - a transport error occurred while attempting
3948  *              to abort the packet.
3949  *      FC_BADEXCHANGE - no packet found.
3950  *      FC_UNBOUND - the fca_handle specified is not bound.
3951  *
3952  * Context:
3953  *      Kernel context.
3954  */
3955 /*ARGSUSED*/
3956 static int
3957 ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3958 {
3959         port_id_t               d_id;
3960         ql_link_t               *link;
3961         ql_adapter_state_t      *ha, *pha;
3962         ql_tgt_t                *tq;
3963         ql_lun_t                *lq;
3964         int                     rval = FC_ABORTED;
3965         ql_srb_t                *sp = (ql_srb_t *)pkt->pkt_fca_private;
3966 
3967         ha = ql_fca_handle_to_state(fca_handle);
3968         if (ha == NULL) {
3969                 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3970                     (void *)fca_handle);
3971                 return (FC_UNBOUND);
3972         }
3973 
3974         pha = ha->pha;
3975 
3976         QL_PRINT_3(ha, "started\n");
3977 
3978         /* Get target queue pointer. */
3979         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3980         tq = ql_d_id_to_queue(ha, d_id);
3981 
3982         if ((tq == NULL) || (lq = sp->lun_queue) == NULL ||
3983             (pha->task_daemon_flags & LOOP_DOWN)) {
3984                 if (tq == NULL || lq == NULL) {
3985                         EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3986                         rval = FC_TRANSPORT_ERROR;
3987                 } else {
3988                         EL(ha, "failed, FC_OFFLINE\n");
3989                         rval = FC_OFFLINE;
3990                 }
3991                 return (rval);
3992         }
3993 
3994         /* Acquire target queue lock. */
3995         DEVICE_QUEUE_LOCK(tq);
3996         REQUEST_RING_LOCK(ha);
3997 
3998         /* If command not already started. */
3999         if (!(sp->flags & SRB_ISP_STARTED)) {
4000                 /* Check pending queue for command. */
4001                 sp = NULL;
4002                 for (link = pha->pending_cmds.first; link != NULL;
4003                     link = link->next) {
4004                         sp = link->base_address;
4005                         if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
4006                                 /* Remove srb from q. */
4007                                 ql_remove_link(&pha->pending_cmds, &sp->cmd);
4008                                 break;
4009                         } else {
4010                                 sp = NULL;
4011                         }
4012                 }
4013                 REQUEST_RING_UNLOCK(ha);
 
4020                                 if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
4021                                         /* Remove srb from q. */
4022                                         ql_remove_link(&lq->cmd, &sp->cmd);
4023                                         break;
4024                                 } else {
4025                                         sp = NULL;
4026                                 }
4027                         }
4028                 }
4029                 /* Release device lock */
4030                 DEVICE_QUEUE_UNLOCK(tq);
4031 
4032                 /* If command on target queue. */
4033                 if (sp != NULL) {
4034                         sp->flags &= ~SRB_IN_DEVICE_QUEUE;
4035 
4036                         /* Set return status */
4037                         pkt->pkt_reason = CS_ABORTED;
4038 
4039                         sp->cmd.next = NULL;
4040                         ql_done(&sp->cmd, B_TRUE);
4041                         rval = FC_ABORTED;
4042                 } else {
4043                         EL(ha, "failed, FC_BADEXCHANGE\n");
4044                         rval = FC_BADEXCHANGE;
4045                 }
4046         } else if (sp->flags & SRB_ISP_COMPLETED) {
4047                 /* Release device queue lock. */
4048                 REQUEST_RING_UNLOCK(ha);
4049                 DEVICE_QUEUE_UNLOCK(tq);
4050                 EL(ha, "failed, already done, FC_FAILURE\n");
4051                 rval = FC_FAILURE;
4052         } else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
4053             (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
4054                 /*
4055                  * If here, target data/resp ctio is with Fw.
4056                  * Since firmware is supposed to terminate such I/Os
4057                  * with an error, we need not do any thing. If FW
4058                  * decides not to terminate those IOs and simply keep
4059                  * quite then we need to initiate cleanup here by
4060                  * calling ql_done.
4061                  */
4062                 REQUEST_RING_UNLOCK(ha);
4063                 DEVICE_QUEUE_UNLOCK(tq);
4064                 rval = FC_ABORTED;
4065         } else {
4066                 ql_request_q_t  *req_q;
4067                 request_t       *pio;
4068                 uint32_t        index;
4069 
4070                 REQUEST_RING_UNLOCK(ha);
4071                 DEVICE_QUEUE_UNLOCK(tq);
4072 
4073                 INTR_LOCK(ha);
4074                 sp->flags |= SRB_ABORTING;
4075                 if (sp->handle != 0) {
4076                         index = sp->handle & OSC_INDEX_MASK;
4077                         if (ha->outstanding_cmds[index] == sp) {
4078                                 ha->outstanding_cmds[index] =
4079                                     QL_ABORTED_SRB(ha);
4080                         }
4081                         if (ha->req_q[1] != NULL && sp->rsp_q_number != 0) {
4082                                 req_q = ha->req_q[1];
4083                         } else {
4084                                 req_q = ha->req_q[0];
4085                         }
4086                         pio = sp->request_ring_ptr;
4087                         if (sp->handle ==
4088                             ddi_get32(req_q->req_ring.acc_handle,
4089                             &pio->handle)) {
4090                                 EL(ha, "inflight sp=%ph, handle=%xh, "
4091                                     "invalidated\n", (void *)sp, sp->handle);
4092                                 for (index = 0; index < sp->req_cnt; index++) {
4093                                         ddi_put8(req_q->req_ring.acc_handle,
4094                                             &pio->entry_type,
4095                                             ABORTED_ENTRY_TYPE);
4096                                         pio++;
4097                                         if (pio == (request_t *)
4098                                             ((uintptr_t)req_q->req_ring.bp +
4099                                             req_q->req_ring.size)) {
4100                                                 pio = req_q->req_ring.bp;
4101                                         }
4102                                 }
4103                         }
4104                         /* Decrement outstanding commands on device. */
4105                         if (tq->outcnt != 0) {
4106                                 tq->outcnt--;
4107                         }
4108                         if (sp->flags & SRB_FCP_CMD_PKT &&
4109                             lq->lun_outcnt != 0) {
4110                                 lq->lun_outcnt--;
4111                         }
4112                         /* Remove command from watchdog queue. */
4113                         if (sp->flags & SRB_WATCHDOG_ENABLED) {
4114                                 ql_remove_link(&tq->wdg, &sp->wdg);
4115                                 sp->flags &= ~SRB_WATCHDOG_ENABLED;
4116                         }
4117                         /* Release device queue lock. */
4118                         INTR_UNLOCK(ha);
4119 
4120                         (void) ql_abort_command(ha, sp);
4121                         sp->handle = 0;
4122                 } else {
4123                         /* Release device queue lock. */
4124                         INTR_UNLOCK(ha);
4125                 }
4126 
4127                 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
4128                 sp->flags |= SRB_ISP_COMPLETED;
4129                 pkt->pkt_reason = CS_ABORTED;
4130                 rval = FC_ABORTED;
4131         }
4132 
4133         QL_PRINT_3(ha, "done\n");
4134 
4135         return (rval);
4136 }
4137 
4138 /*
4139  * ql_reset
4140  *      Reset link or hardware.
4141  *
4142  * Input:
4143  *      fca_handle = handle setup by ql_bind_port().
4144  *      cmd = reset type command.
4145  *
4146  * Returns:
4147  *      FC_SUCCESS - reset has successfully finished.
4148  *      FC_UNBOUND - the fca_handle specified is not bound.
4149  *      FC_FAILURE - reset failed.
4150  *
4151  * Context:
4152  *      Kernel context.
4153  */
4154 static int
4155 ql_reset(opaque_t fca_handle, uint32_t cmd)
4156 {
4157         ql_adapter_state_t      *ha;
4158         int                     rval = FC_SUCCESS, rval2;
4159 
4160         ha = ql_fca_handle_to_state(fca_handle);
4161         if (ha == NULL) {
4162                 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
4163                     (void *)fca_handle);
4164                 return (FC_UNBOUND);
4165         }
4166 
4167         QL_PRINT_3(ha, "started, cmd=%d\n", cmd);
4168 
4169         if (ha->task_daemon_flags & (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE |
4170             DRIVER_STALL | ISP_ABORT_NEEDED | LOOP_RESYNC_NEEDED)) {
4171                 EL(ha, "driver stalled, FC_TRAN_BUSY, dtf=%xh\n",
4172                     ha->task_daemon_flags);
4173                 return (FC_TRAN_BUSY);
4174         }
4175 
4176         switch (cmd) {
4177         case FC_FCA_CORE:
4178                 /* dump firmware core if specified. */
4179                 if (ha->vp_index == 0) {
4180                         if (ql_dump_firmware(ha) != QL_SUCCESS) {
4181                                 EL(ha, "failed, FC_FAILURE\n");
4182                                 rval = FC_FAILURE;
4183                         }
4184                 }
4185                 break;
4186         case FC_FCA_LINK_RESET:
4187                 if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
4188                         if (ql_loop_reset(ha) != QL_SUCCESS) {
4189                                 EL(ha, "failed, FC_FAILURE-2\n");
4190                                 rval = FC_FAILURE;
4191                         }
4192                 }
4193                 break;
4194         case FC_FCA_RESET_CORE:
4195         case FC_FCA_RESET:
 
4246                                 ha->state |= FC_STATE_ONLINE;
4247                         }
4248                 }
4249 
4250                 TASK_DAEMON_LOCK(ha);
4251                 ha->task_daemon_flags |= FC_STATE_CHANGE;
4252                 TASK_DAEMON_UNLOCK(ha);
4253 
4254                 ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
4255 
4256                 break;
4257         default:
4258                 EL(ha, "unknown cmd=%xh\n", cmd);
4259                 break;
4260         }
4261 
4262         if (rval != FC_SUCCESS) {
4263                 EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
4264         } else {
4265                 /*EMPTY*/
4266                 QL_PRINT_3(ha, "done\n");
4267         }
4268 
4269         return (rval);
4270 }
4271 
4272 /*
4273  * ql_port_manage
4274  *      Perform port management or diagnostics.
4275  *
4276  * Input:
4277  *      fca_handle = handle setup by ql_bind_port().
4278  *      cmd = pointer to command structure.
4279  *
4280  * Returns:
4281  *      FC_SUCCESS - the request completed successfully.
4282  *      FC_FAILURE - the request did not complete successfully.
4283  *      FC_UNBOUND - the fca_handle specified is not bound.
4284  *
4285  * Context:
4286  *      Kernel context.
 
4290 {
4291         clock_t                 timer;
4292         uint16_t                index;
4293         uint32_t                *bp;
4294         port_id_t               d_id;
4295         ql_link_t               *link;
4296         ql_adapter_state_t      *ha, *pha;
4297         ql_tgt_t                *tq;
4298         dma_mem_t               buffer_xmt, buffer_rcv;
4299         size_t                  length;
4300         uint32_t                cnt;
4301         char                    buf[80];
4302         lbp_t                   *lb;
4303         ql_mbx_data_t           mr;
4304         app_mbx_cmd_t           *mcp;
4305         int                     i0;
4306         uint8_t                 *bptr;
4307         int                     rval2, rval = FC_SUCCESS;
4308         uint32_t                opcode;
4309         uint32_t                set_flags = 0;
4310         fc_fca_p2p_info_t       *p2p_info;
4311 
4312         ha = ql_fca_handle_to_state(fca_handle);
4313         if (ha == NULL) {
4314                 QL_PRINT_2(NULL, ": failed, no adapter=%ph\n",
4315                     (void *)fca_handle);
4316                 return (FC_UNBOUND);
4317         }
4318         pha = ha->pha;
4319 
4320 #ifdef  QL_DEBUG_LEVEL_10
4321         if (cmd->pm_cmd_code != FC_PORT_GET_FW_REV) {
4322                 QL_PRINT_10(ha, "started=%xh\n", cmd->pm_cmd_code);
4323         }
4324 #endif
4325 
4326         if (ha->task_daemon_flags & (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE |
4327             DRIVER_STALL | ISP_ABORT_NEEDED | LOOP_RESYNC_NEEDED)) {
4328                 EL(ha, "driver stalled, FC_TRAN_BUSY, dtf=%xh\n",
4329                     ha->task_daemon_flags);
4330                 return (FC_TRAN_BUSY);
4331         }
4332 
4333         switch (cmd->pm_cmd_code) {
4334         case FC_PORT_BYPASS:
4335                 d_id.b24 = *cmd->pm_cmd_buf;
4336                 tq = ql_d_id_to_queue(ha, d_id);
4337                 if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
4338                         EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
4339                         rval = FC_FAILURE;
4340                 }
4341                 break;
4342         case FC_PORT_UNBYPASS:
4343                 d_id.b24 = *cmd->pm_cmd_buf;
4344                 tq = ql_d_id_to_queue(ha, d_id);
4345                 if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
4346                         EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
4347                         rval = FC_FAILURE;
4348                 }
4349                 break;
 
4399         case FC_PORT_GET_DUMP:
4400                 QL_DUMP_LOCK(pha);
4401                 if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
4402                         EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
4403                             "length=%lxh\n", cmd->pm_data_len);
4404                         cmd->pm_data_len = pha->risc_dump_size;
4405                         rval = FC_FAILURE;
4406                 } else if (pha->ql_dump_state & QL_DUMPING) {
4407                         EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
4408                         rval = FC_TRAN_BUSY;
4409                 } else if (pha->ql_dump_state & QL_DUMP_VALID) {
4410                         (void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
4411                         pha->ql_dump_state |= QL_DUMP_UPLOADED;
4412                 } else {
4413                         EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
4414                         rval = FC_FAILURE;
4415                 }
4416                 QL_DUMP_UNLOCK(pha);
4417                 break;
4418         case FC_PORT_FORCE_DUMP:
4419                 if (ql_dump_firmware(ha) != QL_SUCCESS) {
4420                         EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
4421                         rval = FC_FAILURE;
4422                 }
4423                 break;
4424         case FC_PORT_GET_DUMP_SIZE:
4425                 bp = (uint32_t *)cmd->pm_data_buf;
4426                 *bp = pha->risc_dump_size;
4427                 break;
4428         case FC_PORT_DIAG:
4429                 EL(ha, "diag cmd=%xh\n", cmd->pm_cmd_flags);
4430 
4431                 /* Wait for suspension to end. */
4432                 for (timer = 0; timer < 3000 &&
4433                     pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
4434                         ql_delay(ha, 10000);
4435                 }
4436 
4437                 if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
4438                         EL(ha, "failed, FC_TRAN_BUSY-2\n");
4439                         rval = FC_TRAN_BUSY;
4440                         break;
4441                 }
4442 
4443                 if ((rval2 = ql_stall_driver(ha, 0)) != QL_SUCCESS) {
4444                         EL(ha, "stall_driver status=%xh, FC_TRAN_BUSY\n",
4445                             rval2);
4446                         ql_restart_driver(ha);
4447                         rval = FC_TRAN_BUSY;
4448                         break;
4449                 }
4450 
4451                 switch (cmd->pm_cmd_flags) {
4452                 case QL_DIAG_EXEFMW:
4453                         if (ql_start_firmware(ha) != QL_SUCCESS) {
4454                                 EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
4455                                 rval = FC_FAILURE;
4456                         }
4457                         break;
4458                 case QL_DIAG_CHKCMDQUE:
4459                         for (i0 = 1, cnt = 0; i0 < pha->osc_max_cnt;
4460                             i0++) {
4461                                 cnt += (pha->outstanding_cmds[i0] != NULL);
4462                         }
4463                         if (cnt != 0) {
4464                                 EL(ha, "failed, QL_DIAG_CHKCMDQUE "
4465                                     "FC_FAILURE\n");
4466                                 rval = FC_FAILURE;
4467                         }
4468                         break;
4469                 case QL_DIAG_FMWCHKSUM:
4470                         if (ql_verify_checksum(ha) != QL_SUCCESS) {
4471                                 EL(ha, "failed, QL_DIAG_FMWCHKSUM "
4472                                     "FC_FAILURE\n");
4473                                 rval = FC_FAILURE;
4474                         }
4475                         break;
4476                 case QL_DIAG_SLFTST:
4477                         if (ql_online_selftest(ha) != QL_SUCCESS) {
4478                                 EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
4479                                 rval = FC_FAILURE;
 
4494                                     cmd->pm_stat_buf,
4495                                     (size_t)cmd->pm_stat_len);
4496                                 cmd->pm_stat_len =
4497                                     sizeof (ql_adapter_revlvl_t);
4498                         }
4499                         break;
4500                 case QL_DIAG_LPBMBX:
4501 
4502                         if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
4503                                 EL(ha, "failed, QL_DIAG_LPBMBX "
4504                                     "FC_INVALID_REQUEST, pmlen=%lxh, "
4505                                     "reqd=%lxh\n", cmd->pm_data_len,
4506                                     sizeof (struct app_mbx_cmd));
4507                                 rval = FC_INVALID_REQUEST;
4508                                 break;
4509                         }
4510                         /*
4511                          * Don't do the wrap test on a 2200 when the
4512                          * firmware is running.
4513                          */
4514                         if (!CFG_IST(ha, CFG_CTRL_22XX)) {
4515                                 mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4516                                 mr.mb[1] = mcp->mb[1];
4517                                 mr.mb[2] = mcp->mb[2];
4518                                 mr.mb[3] = mcp->mb[3];
4519                                 mr.mb[4] = mcp->mb[4];
4520                                 mr.mb[5] = mcp->mb[5];
4521                                 mr.mb[6] = mcp->mb[6];
4522                                 mr.mb[7] = mcp->mb[7];
4523 
4524                                 bcopy(&mr.mb[0], &mr.mb[10],
4525                                     sizeof (uint16_t) * 8);
4526 
4527                                 if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4528                                         EL(ha, "failed, QL_DIAG_LPBMBX "
4529                                             "FC_FAILURE\n");
4530                                         rval = FC_FAILURE;
4531                                         break;
4532                                 } else {
4533                                         for (i0 = 1; i0 < 8; i0++) {
4534                                                 if (mr.mb[i0] !=
 
4572                         if (ql_get_dma_mem(ha, &buffer_xmt,
4573                             (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4574                             QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4575                                 EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4576                                 rval = FC_NOMEM;
4577                                 break;
4578                         }
4579                         if (ql_get_dma_mem(ha, &buffer_rcv,
4580                             (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4581                             QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4582                                 EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4583                                 rval = FC_NOMEM;
4584                                 break;
4585                         }
4586                         ddi_rep_put8(buffer_xmt.acc_handle,
4587                             (uint8_t *)cmd->pm_data_buf,
4588                             (uint8_t *)buffer_xmt.bp,
4589                             cmd->pm_data_len, DDI_DEV_AUTOINCR);
4590 
4591                         /* 22xx's adapter must be in loop mode for test. */
4592                         if (CFG_IST(ha, CFG_CTRL_22XX)) {
4593                                 bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4594                                 if (ha->flags & POINT_TO_POINT ||
4595                                     (ha->task_daemon_flags & LOOP_DOWN &&
4596                                     *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4597                                         cnt = *bptr;
4598                                         *bptr = (uint8_t)
4599                                             (*bptr & ~(BIT_6|BIT_5|BIT_4));
4600                                         (void) ql_abort_isp(ha);
4601                                         *bptr = (uint8_t)cnt;
4602                                 }
4603                         }
4604 
4605                         /* Shutdown IP. */
4606                         if (pha->flags & IP_INITIALIZED) {
4607                                 (void) ql_shutdown_ip(pha);
4608                         }
4609 
4610                         lb = (lbp_t *)cmd->pm_cmd_buf;
4611                         lb->transfer_count =
4612                             (uint32_t)cmd->pm_data_len;
4613                         lb->transfer_segment_count = 0;
4614                         lb->receive_segment_count = 0;
4615                         lb->transfer_data_address =
4616                             buffer_xmt.cookie.dmac_address;
4617                         lb->receive_data_address =
4618                             buffer_rcv.cookie.dmac_address;
4619 
4620                         if (CFG_IST(ha, CFG_LOOP_POINT_SUPPORT)) {
4621                                 (void) ql_set_loop_point(ha, lb->options);
4622                         }
4623 
4624                         if (ql_loop_back(ha, 0, lb,
4625                             buffer_xmt.cookie.dmac_notused,
4626                             buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4627                                 bzero((void *)cmd->pm_stat_buf,
4628                                     cmd->pm_stat_len);
4629                                 ddi_rep_get8(buffer_rcv.acc_handle,
4630                                     (uint8_t *)cmd->pm_stat_buf,
4631                                     (uint8_t *)buffer_rcv.bp,
4632                                     cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4633                                 rval = FC_SUCCESS;
4634                         } else {
4635                                 EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4636                                 rval = FC_FAILURE;
4637                         }
4638 
4639                         if (CFG_IST(ha, CFG_LOOP_POINT_SUPPORT)) {
4640                                 (void) ql_set_loop_point(ha, 0);
4641                         }
4642 
4643                         ql_free_phys(ha, &buffer_xmt);
4644                         ql_free_phys(ha, &buffer_rcv);
4645 
4646                         /* Needed to recover the f/w */
4647                         set_flags |= ISP_ABORT_NEEDED;
4648 
4649                         /* Restart IP if it was shutdown. */
4650                         if (pha->flags & IP_ENABLED &&
4651                             !(pha->flags & IP_INITIALIZED)) {
4652                                 (void) ql_initialize_ip(pha);
4653                                 ql_isp_rcvbuf(pha);
4654                         }
4655 
4656                         break;
4657                 case QL_DIAG_ECHO: {
4658                         /*
4659                          * issue an echo command with a user supplied
4660                          * data pattern and destination address
4661                          */
4662                         echo_t          echo;           /* temp echo struct */
 
4692                                 EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4693                                     " cmdl1=%lxh, statl2=%lxh\n",
4694                                     cmd->pm_cmd_len, cmd->pm_stat_len);
4695                                 rval = FC_TOOMANY;
4696                                 break;
4697                         }
4698                         /* add four bytes for the opcode */
4699                         echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4700 
4701                         /*
4702                          * are we 32 or 64 bit addressed???
4703                          * We need to get the appropriate
4704                          * DMA and set the command options;
4705                          * 64 bit (bit 6) or 32 bit
4706                          * (no bit 6) addressing.
4707                          * while we are at it lets ask for
4708                          * real echo (bit 15)
4709                          */
4710                         echo.options = BIT_15;
4711                         if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) &&
4712                             !(CFG_IST(ha, CFG_FCOE_SUPPORT))) {
4713                                 echo.options = (uint16_t)
4714                                     (echo.options | BIT_6);
4715                         }
4716 
4717                         /*
4718                          * Set up the DMA mappings for the
4719                          * output and input data buffers.
4720                          * First the output buffer
4721                          */
4722                         if (ql_get_dma_mem(ha, &buffer_xmt,
4723                             (uint32_t)(cmd->pm_data_len + 4),
4724                             LITTLE_ENDIAN_DMA,
4725                             QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4726                                 EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4727                                 rval = FC_NOMEM;
4728                                 break;
4729                         }
4730                         echo.transfer_data_address = buffer_xmt.cookie;
4731 
4732                         /* Next the input buffer */
 
4781                                 rval = FC_FAILURE;
4782                         }
4783 
4784                         /* Restart IP if it was shutdown. */
4785                         if (pha->flags & IP_ENABLED &&
4786                             !(pha->flags & IP_INITIALIZED)) {
4787                                 (void) ql_initialize_ip(pha);
4788                                 ql_isp_rcvbuf(pha);
4789                         }
4790                         /* free up our DMA buffers */
4791                         ql_free_phys(ha, &buffer_xmt);
4792                         ql_free_phys(ha, &buffer_rcv);
4793                         break;
4794                 }
4795                 default:
4796                         EL(ha, "unknown=%xh, FC_PORT_DIAG "
4797                             "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4798                         rval = FC_INVALID_REQUEST;
4799                         break;
4800                 }
4801                 ql_restart_driver(ha);
4802                 break;
4803         case FC_PORT_LINK_STATE:
4804                 /* Check for name equal to null. */
4805                 for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4806                     index++) {
4807                         if (cmd->pm_cmd_buf[index] != 0) {
4808                                 break;
4809                         }
4810                 }
4811 
4812                 /* If name not null. */
4813                 if (index < 8 && cmd->pm_cmd_len >= 8) {
4814                         /* Locate device queue. */
4815                         tq = NULL;
4816                         for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4817                             tq == NULL; index++) {
4818                                 for (link = ha->dev[index].first; link != NULL;
4819                                     link = link->next) {
4820                                         tq = link->base_address;
4821 
 
4826                                                 tq = NULL;
4827                                         }
4828                                 }
4829                         }
4830 
4831                         if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4832                                 cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4833                                 cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4834                         } else {
4835                                 cnt = FC_PORT_SPEED_MASK(ha->state) |
4836                                     FC_STATE_OFFLINE;
4837                                 cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4838                                 cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4839                         }
4840                 } else {
4841                         cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4842                         cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4843                 }
4844                 break;
4845         case FC_PORT_INITIALIZE:
4846                 if ((rval2 = ql_stall_driver(ha, 0)) != QL_SUCCESS) {
4847                         EL(ha, "stall_driver status=%xh, FC_TRAN_BUSY\n",
4848                             rval2);
4849                         ql_restart_driver(ha);
4850                         rval = FC_TRAN_BUSY;
4851                         break;
4852                 }
4853                 if (cmd->pm_cmd_len >= 8) {
4854                         tq = NULL;
4855                         for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4856                             tq == NULL; index++) {
4857                                 for (link = ha->dev[index].first; link != NULL;
4858                                     link = link->next) {
4859                                         tq = link->base_address;
4860 
4861                                         if (bcmp((void *)&tq->port_name[0],
4862                                             (void *)cmd->pm_cmd_buf, 8) == 0) {
4863                                                 if (!VALID_DEVICE_ID(ha,
4864                                                     tq->loop_id)) {
4865                                                         tq = NULL;
4866                                                 }
4867                                                 break;
4868                                         } else {
4869                                                 tq = NULL;
4870                                         }
4871                                 }
4872                         }
4873 
4874                         if (tq == NULL || ql_target_reset(ha, tq,
4875                             ha->loop_reset_delay) != QL_SUCCESS) {
4876                                 EL(ha, "failed, FC_PORT_INITIALIZE "
4877                                     "FC_FAILURE\n");
4878                                 rval = FC_FAILURE;
4879                         }
4880                 } else {
4881                         EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4882                             "clen=%lxh\n", cmd->pm_cmd_len);
4883 
4884                         rval = FC_FAILURE;
4885                 }
4886                 ql_restart_driver(ha);
4887                 break;
4888         case FC_PORT_RLS:
4889                 if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4890                         EL(ha, "failed, buffer size passed: %lxh, "
4891                             "req: %lxh\n", cmd->pm_data_len,
4892                             (sizeof (fc_rls_acc_t)));
4893                         rval = FC_FAILURE;
4894                 } else if (LOOP_NOT_READY(pha)) {
4895                         EL(ha, "loop NOT ready\n");
4896                         bzero(cmd->pm_data_buf, cmd->pm_data_len);
4897                 } else if (ql_get_link_status(ha, ha->loop_id,
4898                     cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
4899                         EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4900                         rval = FC_FAILURE;
4901 #ifdef _BIG_ENDIAN
4902                 } else {
4903                         fc_rls_acc_t            *rls;
4904 
4905                         rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4906                         LITTLE_ENDIAN_32(&rls->rls_link_fail);
4907                         LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4908                         LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4909                         LITTLE_ENDIAN_32(&rls->rls_prim_seq_err);
4910                         LITTLE_ENDIAN_32(&rls->rls_invalid_word);
4911                         LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4912 #endif /* _BIG_ENDIAN */
4913                 }
4914                 break;
4915         case FC_PORT_GET_NODE_ID:
4916                 if (ql_get_rnid_params(ha, cmd->pm_data_len,
4917                     cmd->pm_data_buf) != QL_SUCCESS) {
4918                         EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4919                         rval = FC_FAILURE;
4920                 }
4921                 break;
4922         case FC_PORT_SET_NODE_ID:
4923                 if (ql_set_rnid_params(ha, cmd->pm_data_len,
4924                     cmd->pm_data_buf) != QL_SUCCESS) {
4925                         EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4926                         rval = FC_FAILURE;
4927                 }
4928                 break;
4929         case FC_PORT_DOWNLOAD_FCODE:
4930                 if ((rval2 = ql_stall_driver(ha, 0)) != QL_SUCCESS) {
4931                         EL(ha, "stall_driver status=%xh, FC_TRAN_BUSY\n",
4932                             rval2);
4933                         ql_restart_driver(ha);
4934                         rval = FC_TRAN_BUSY;
4935                         break;
4936                 }
4937                 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
4938                         rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4939                             (uint32_t)cmd->pm_data_len);
4940                 } else {
4941                         if (cmd->pm_data_buf[0] == 4 &&
4942                             cmd->pm_data_buf[8] == 0 &&
4943                             cmd->pm_data_buf[9] == 0x10 &&
4944                             cmd->pm_data_buf[10] == 0 &&
4945                             cmd->pm_data_buf[11] == 0) {
4946                                 rval = ql_24xx_load_flash(ha,
4947                                     (uint8_t *)cmd->pm_data_buf,
4948                                     (uint32_t)cmd->pm_data_len,
4949                                     ha->flash_fw_addr << 2);
4950                         } else {
4951                                 rval = ql_24xx_load_flash(ha,
4952                                     (uint8_t *)cmd->pm_data_buf,
4953                                     (uint32_t)cmd->pm_data_len, 0);
4954                         }
4955                 }
4956 
4957                 if (rval != QL_SUCCESS) {
4958                         EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4959                         rval = FC_FAILURE;
4960                 } else {
4961                         rval = FC_SUCCESS;
4962                 }
4963                 ql_reset_chip(ha);
4964                 set_flags |= ISP_ABORT_NEEDED;
4965                 ql_restart_driver(ha);
4966                 break;
4967 
4968         case FC_PORT_GET_P2P_INFO:
4969 
4970                 bzero(cmd->pm_data_buf, cmd->pm_data_len);
4971                 if (cmd->pm_data_len < sizeof (fc_fca_p2p_info_t)) {
4972                         EL(ha, "inadequate data length")
4973                         rval = FC_NOMEM;
4974                         break;
4975                 }
4976 
4977                 p2p_info = (fc_fca_p2p_info_t *)cmd->pm_data_buf;
4978 
4979                 if ((ha->topology & QL_N_PORT) &&
4980                     (ha->flags & POINT_TO_POINT)) {
4981                         p2p_info->fca_d_id = ha->d_id.b24;
4982                         p2p_info->d_id = ha->n_port->d_id.b24;
4983 
4984                         bcopy((void *) &ha->n_port->port_name[0],
4985                             (caddr_t)&p2p_info->pwwn, 8);
4986                         bcopy((void *) &ha->n_port->node_name[0],
4987                             (caddr_t)&p2p_info->nwwn, 8);
4988                         rval = FC_SUCCESS;
4989 
4990                         EL(ha, "P2P HID=%xh, d_id=%xh, WWPN=%02x%02x%02x%02x"
4991                             "%02x%02x%02x%02x : "
4992                             "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
4993                             p2p_info->fca_d_id, p2p_info->d_id,
4994                             ha->n_port->port_name[0],
4995                             ha->n_port->port_name[1], ha->n_port->port_name[2],
4996                             ha->n_port->port_name[3], ha->n_port->port_name[4],
4997                             ha->n_port->port_name[5], ha->n_port->port_name[6],
4998                             ha->n_port->port_name[7], ha->n_port->node_name[0],
4999                             ha->n_port->node_name[1], ha->n_port->node_name[2],
5000                             ha->n_port->node_name[3], ha->n_port->node_name[4],
5001                             ha->n_port->node_name[5], ha->n_port->node_name[6],
5002                             ha->n_port->node_name[7]);
5003                         break;
5004                 } else {
5005                         EL(ha, "No p2p info reported in non n2n topology\n");
5006                         rval = FC_BADCMD;
5007                 }
5008                 break;
5009 
5010         case FC_PORT_DOWNLOAD_FW:
5011                 EL(ha, "unsupported=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
5012                 rval = FC_BADCMD;
5013                 break;
5014         default:
5015                 EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
5016                 rval = FC_BADCMD;
5017                 break;
5018         }
5019 
5020         /* Wait for suspension to end. */
5021         ql_awaken_task_daemon(ha, NULL, set_flags, DRIVER_STALL);
5022         timer = 0;
5023 
5024         while (timer++ < 3000 &&
5025             ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
5026                 ql_delay(ha, 10000);
5027         }
5028 
5029         if (rval != FC_SUCCESS) {
5030                 EL(ha, "failed, rval = %xh\n", rval);
5031         } else {
5032                 /*EMPTY*/
5033                 QL_PRINT_3(ha, "done\n");
5034         }
5035 
5036         return (rval);
5037 }
5038 
5039 static opaque_t
5040 ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
5041 {
5042         port_id_t               id;
5043         ql_adapter_state_t      *ha;
5044         ql_tgt_t                *tq;
5045 
5046         id.r.rsvd_1 = 0;
5047         id.b24 = d_id.port_id;
5048 
5049         ha = ql_fca_handle_to_state(fca_handle);
5050         if (ha == NULL) {
5051                 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
5052                     (void *)fca_handle);
5053                 return (NULL);
5054         }
5055         QL_PRINT_3(ha, "started, d_id=%xh\n", id.b24);
5056 
5057         tq = ql_d_id_to_queue(ha, id);
5058 
5059         if (tq == NULL && id.b24 != 0 && id.b24 != FS_BROADCAST) {
5060                 EL(ha, "failed, no tq available for d_id: %xh\n", id.b24);
5061         } else {
5062                 /*EMPTY*/
5063                 QL_PRINT_3(ha, "done\n");
5064         }
5065         return (tq);
5066 }
5067 
5068 /* ************************************************************************ */
5069 /*                      FCA Driver Local Support Functions.                 */
5070 /* ************************************************************************ */
5071 
5072 /*
5073  * ql_cmd_setup
5074  *      Verifies proper command.
5075  *
5076  * Input:
5077  *      fca_handle = handle setup by ql_bind_port().
5078  *      pkt = pointer to fc_packet.
5079  *      rval = pointer for return value.
5080  *
5081  * Returns:
5082  *      Adapter state pointer, NULL = failure.
5083  *
5084  * Context:
5085  *      Kernel context.
5086  */
5087 static ql_adapter_state_t *
5088 ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
5089 {
5090         ql_adapter_state_t      *ha, *pha;
5091         ql_srb_t                *sp = (ql_srb_t *)pkt->pkt_fca_private;
5092         ql_tgt_t                *tq;
5093         port_id_t               d_id;
5094 
5095         pkt->pkt_resp_resid = 0;
5096         pkt->pkt_data_resid = 0;
5097 
5098         /* check that the handle is assigned by this FCA */
5099         ha = ql_fca_handle_to_state(fca_handle);
5100         if (ha == NULL) {
5101                 *rval = FC_UNBOUND;
5102                 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
5103                     (void *)fca_handle);
5104                 return (NULL);
5105         }
5106         pha = ha->pha;
5107 
5108         QL_PRINT_3(ha, "started\n");
5109 
5110         if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
5111                 return (ha);
5112         }
5113 
5114         if (!(pha->flags & ONLINE)) {
5115                 pkt->pkt_state = FC_PKT_LOCAL_RJT;
5116                 pkt->pkt_reason = FC_REASON_HW_ERROR;
5117                 *rval = FC_TRANSPORT_ERROR;
5118                 EL(ha, "failed, not online hf=%xh\n", pha->flags);
5119                 return (NULL);
5120         }
5121 
5122         /* Exit on loop down. */
5123         if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
5124             pha->task_daemon_flags & LOOP_DOWN &&
5125             pha->loop_down_timer <= pha->loop_down_abort_time) {
5126                 pkt->pkt_state = FC_PKT_PORT_OFFLINE;
5127                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5128                 *rval = FC_OFFLINE;
 
5138                         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5139                         tq = ql_d_id_to_queue(ha, d_id);
5140 
5141                         pkt->pkt_fca_device = (opaque_t)tq;
5142                 }
5143 
5144                 if (tq != NULL) {
5145                         DEVICE_QUEUE_LOCK(tq);
5146                         if (tq->flags & (TQF_RSCN_RCVD |
5147                             TQF_NEED_AUTHENTICATION)) {
5148                                 *rval = FC_DEVICE_BUSY;
5149                                 DEVICE_QUEUE_UNLOCK(tq);
5150                                 EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
5151                                     tq->flags, tq->d_id.b24);
5152                                 return (NULL);
5153                         }
5154                         DEVICE_QUEUE_UNLOCK(tq);
5155                 }
5156         }
5157 
5158         /* Check for packet already running. */
5159         if (sp->handle != 0) {
5160                 *rval = FC_DEVICE_BUSY;
5161                 cmn_err(CE_WARN, "%s(%d) already running pkt=%p, sp=%p, "
5162                     "sp->pkt=%p, sp->hdl=%x, spf=%x, cq=%p\n", QL_NAME,
5163                     ha->instance, (void *)pkt, (void *)sp, (void *)sp->pkt,
5164                     sp->handle, sp->flags, (void *)sp->cmd.head);
5165                 return (NULL);
5166         }
5167         if (ha->rsp_queues_cnt > 1) {
5168                 ADAPTER_STATE_LOCK(ha);
5169                 sp->rsp_q_number = ha->rsp_q_number++;
5170                 if (ha->rsp_q_number == ha->rsp_queues_cnt) {
5171                         ha->rsp_q_number = 0;
5172                 }
5173                 ADAPTER_STATE_UNLOCK(ha);
5174         } else {
5175                 sp->rsp_q_number = 0;
5176         }
5177 
5178         /*
5179          * Check DMA pointers.
5180          */
5181         *rval = DDI_SUCCESS;
5182         if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
5183                 QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
5184 
5185                 *rval = qlc_fm_check_dma_handle(ha, pkt->pkt_cmd_dma);
5186                 if (*rval == DDI_FM_OK) {
5187                         *rval = qlc_fm_check_acc_handle(ha,
5188                             pkt->pkt_cmd_acc);
5189                 }
5190         }
5191 
5192         if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
5193             pkt->pkt_rsplen != 0) {
5194                 QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
5195 
5196                 *rval = qlc_fm_check_dma_handle(ha, pkt->pkt_resp_dma);
5197                 if (*rval == DDI_FM_OK) {
5198                         *rval = qlc_fm_check_acc_handle(ha,
5199                             pkt->pkt_resp_acc);
5200                 }
5201         }
5202 
5203         /*
5204          * Minimum branch conditional; Change it with care.
5205          */
5206         if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
5207             (pkt->pkt_datalen != 0)) != 0) {
5208                 QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
5209 
5210                 *rval = qlc_fm_check_dma_handle(ha, pkt->pkt_data_dma);
5211                 if (*rval == DDI_FM_OK) {
5212                         *rval = qlc_fm_check_acc_handle(ha,
5213                             pkt->pkt_data_acc);
5214                 }
5215         }
5216 
5217         if (*rval != DDI_FM_OK) {
5218                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5219                 pkt->pkt_reason = FC_REASON_DMA_ERROR;
5220                 pkt->pkt_expln = FC_EXPLN_NONE;
5221                 pkt->pkt_action = FC_ACTION_RETRYABLE;
5222 
5223                 /* Do command callback. */
5224                 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
5225                         ql_io_comp(sp);
5226                 }
5227                 *rval = FC_BADPACKET;
5228                 EL(ha, "failed, bad DMA pointers\n");
5229                 return (NULL);
5230         }
5231 
5232         if (sp->magic_number != QL_FCA_BRAND) {
5233                 *rval = FC_BADPACKET;
5234                 EL(ha, "failed, magic number=%xh\n", sp->magic_number);
5235                 return (NULL);
5236         }
5237         *rval = FC_SUCCESS;
5238 
5239         QL_PRINT_3(ha, "done\n");
5240 
5241         return (ha);
5242 }
5243 
5244 /*
5245  * ql_els_plogi
5246  *      Issue a extended link service port login request.
5247  *
5248  * Input:
5249  *      ha = adapter state pointer.
5250  *      pkt = pointer to fc_packet.
5251  *
5252  * Returns:
5253  *      FC_SUCCESS - the packet was accepted for transport.
5254  *      FC_TRANSPORT_ERROR - a transport error occurred.
5255  *
5256  * Context:
5257  *      Kernel context.
5258  */
5259 static int
5260 ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5261 {
5262         ql_tgt_t                *tq = NULL;
5263         port_id_t               d_id;
5264         la_els_logi_t           acc;
5265         class_svc_param_t       *class3_param;
5266         int                     ret;
5267         int                     rval = FC_SUCCESS;
5268 
5269         QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
5270 
5271         TASK_DAEMON_LOCK(ha);
5272         if (!(ha->task_daemon_flags & STATE_ONLINE)) {
5273                 TASK_DAEMON_UNLOCK(ha);
5274                 QL_PRINT_3(ha, "offline done\n");
5275                 return (FC_OFFLINE);
5276         }
5277         TASK_DAEMON_UNLOCK(ha);
5278 
5279         bzero(&acc, sizeof (acc));
5280         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5281 
5282         ret = QL_SUCCESS;
5283 
5284         if (CFG_IST(ha, CFG_N2N_SUPPORT) && ha->topology & QL_N_PORT) {
5285                 /*
5286                  * In p2p topology it sends a PLOGI after determining
5287                  * it has the N_Port login initiative.
5288                  */
5289                 ret = ql_p2p_plogi(ha, pkt);
5290         }
5291         if (ret == QL_CONSUMED) {
5292                 return (ret);
5293         }
5294 
5295         switch (ret = ql_login_port(ha, d_id)) {
5296         case QL_SUCCESS:
5297                 tq = ql_d_id_to_queue(ha, d_id);
5298                 break;
5299 
5300         case QL_LOOP_ID_USED:
5301                 if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
5302                         tq = ql_d_id_to_queue(ha, d_id);
5303                 }
5304                 break;
 
5310         if (ret != QL_SUCCESS) {
5311                 /*
5312                  * Invalidate this entry so as to seek a fresh loop ID
5313                  * in case firmware reassigns it to something else
5314                  */
5315                 tq = ql_d_id_to_queue(ha, d_id);
5316                 if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
5317                         tq->loop_id = PORT_NO_LOOP_ID;
5318                 }
5319         } else if (tq) {
5320                 (void) ql_get_port_database(ha, tq, PDF_ADISC);
5321         }
5322 
5323         if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
5324             (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
5325 
5326                 /* Build ACC. */
5327                 acc.ls_code.ls_code = LA_ELS_ACC;
5328                 acc.common_service.fcph_version = 0x2006;
5329                 acc.common_service.cmn_features = 0x8800;
5330                 acc.common_service.rx_bufsize =
5331                     ha->loginparams.common_service.rx_bufsize;
5332                 acc.common_service.conc_sequences = 0xff;
5333                 acc.common_service.relative_offset = 0x03;
5334                 acc.common_service.e_d_tov = 0x7d0;
5335 
5336                 bcopy((void *)&tq->port_name[0],
5337                     (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5338                 bcopy((void *)&tq->node_name[0],
5339                     (void *)&acc.node_ww_name.raw_wwn[0], 8);
5340 
5341                 class3_param = (class_svc_param_t *)&acc.class_3;
5342                 class3_param->class_valid_svc_opt = 0x8000;
5343                 class3_param->recipient_ctl = tq->class3_recipient_ctl;
5344                 class3_param->rcv_data_size = tq->class3_rcv_data_size;
5345                 class3_param->conc_sequences = tq->class3_conc_sequences;
5346                 class3_param->open_sequences_per_exch =
5347                     tq->class3_open_sequences_per_exch;
5348 
5349                 if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
5350                         acc.ls_code.ls_code = LA_ELS_RJT;
5351                         pkt->pkt_state = FC_PKT_TRAN_BSY;
5352                         pkt->pkt_reason = FC_REASON_XCHG_BSY;
5353                         EL(ha, "LA_ELS_RJT, FC_REASON_XCHG_BSY\n");
5354                         rval = FC_TRAN_BUSY;
5355                 } else {
5356                         DEVICE_QUEUE_LOCK(tq);
5357                         tq->logout_sent = 0;
5358                         tq->flags &= ~TQF_NEED_AUTHENTICATION;
5359                         if (CFG_IST(ha, CFG_IIDMA_SUPPORT)) {
5360                                 tq->flags |= TQF_IIDMA_NEEDED;
5361                         }
5362                         DEVICE_QUEUE_UNLOCK(tq);
5363 
5364                         if (CFG_IST(ha, CFG_IIDMA_SUPPORT)) {
5365                                 TASK_DAEMON_LOCK(ha);
5366                                 ha->task_daemon_flags |= TD_IIDMA_NEEDED;
5367                                 TASK_DAEMON_UNLOCK(ha);
5368                         }
5369 
5370                         pkt->pkt_state = FC_PKT_SUCCESS;
5371                 }
5372         } else {
5373                 /* Build RJT. */
5374                 acc.ls_code.ls_code = LA_ELS_RJT;
5375 
5376                 switch (ret) {
5377                 case QL_FUNCTION_TIMEOUT:
5378                         pkt->pkt_state = FC_PKT_TIMEOUT;
5379                         pkt->pkt_reason = FC_REASON_HW_ERROR;
5380                         break;
5381 
5382                 case QL_MEMORY_ALLOC_FAILED:
5383                         pkt->pkt_state = FC_PKT_LOCAL_BSY;
5384                         pkt->pkt_reason = FC_REASON_NOMEM;
 
5403         }
5404 
5405         if (tq != NULL) {
5406                 DEVICE_QUEUE_LOCK(tq);
5407                 tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
5408                 if (rval == FC_TRAN_BUSY) {
5409                         if (tq->d_id.b24 != BROADCAST_ADDR) {
5410                                 tq->flags |= TQF_NEED_AUTHENTICATION;
5411                         }
5412                 }
5413                 DEVICE_QUEUE_UNLOCK(tq);
5414         }
5415 
5416         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5417             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5418 
5419         if (rval != FC_SUCCESS) {
5420                 EL(ha, "failed, rval = %xh\n", rval);
5421         } else {
5422                 /*EMPTY*/
5423                 QL_PRINT_3(ha, "done\n");
5424         }
5425         return (rval);
5426 }
5427 
5428 /*
5429  * ql_p2p_plogi
5430  *      Start an extended link service port login request using
5431  *      an ELS Passthru iocb.
5432  *
5433  * Input:
5434  *      ha = adapter state pointer.
5435  *      pkt = pointer to fc_packet.
5436  *
5437  * Returns:
5438  *      QL_CONSUMMED - the iocb was queued for transport.
5439  *
5440  * Context:
5441  *      Kernel context.
5442  */
5443 static int
5444 ql_p2p_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5445 {
5446         uint16_t        id;
5447         ql_tgt_t        tmp;
5448         ql_tgt_t        *tq = &tmp;
5449         int             rval;
5450         port_id_t       d_id;
5451         ql_srb_t        *sp = (ql_srb_t *)pkt->pkt_fca_private;
5452         uint16_t        loop_id;
5453 
5454         tq->d_id.b.al_pa = 0;
5455         tq->d_id.b.area = 0;
5456         tq->d_id.b.domain = 0;
5457 
5458         /*
5459          * Verify that the port database hasn't moved beneath our feet by
5460          * switching to the appropriate n_port_handle if necessary.  This is
5461          * less unplesant than the error recovery if the wrong one is used.
5462          */
5463         for (id = 0; id <= LAST_LOCAL_LOOP_ID; id++) {
5464                 tq->loop_id = id;
5465                 rval = ql_get_port_database(ha, tq, PDF_NONE);
5466                 EL(ha, "rval=%xh, id=%x\n", rval, id);
5467                 /* check all the ones not logged in for possible use */
5468                 if (rval == QL_NOT_LOGGED_IN) {
5469                         if (tq->master_state == PD_STATE_PLOGI_PENDING) {
5470                                 ha->n_port->n_port_handle = tq->loop_id;
5471                                 EL(ha, "loop_id=%xh, master state=%x\n",
5472                                     tq->loop_id, tq->master_state);
5473                                 break;
5474                         }
5475                         /*
5476                          * Use a 'port unavailable' entry only
5477                          * if we used it before.
5478                          */
5479                         if (tq->master_state == PD_STATE_PORT_UNAVAILABLE) {
5480                                 /* if the port_id matches, reuse it */
5481                                 if (pkt->pkt_cmd_fhdr.d_id == tq->d_id.b24) {
5482                                         EL(ha, "n_port_handle loop_id=%xh, "
5483                                             "master state=%xh\n",
5484                                             tq->loop_id, tq->master_state);
5485                                         break;
5486                                 } else if (tq->loop_id ==
5487                                     ha->n_port->n_port_handle) {
5488                                     /* avoid a lint error */
5489                                         uint16_t *hndl;
5490                                         uint16_t val;
5491 
5492                                         hndl = &ha->n_port->n_port_handle;
5493                                         val = *hndl;
5494                                         val++;
5495                                         val++;
5496                                         *hndl = val;
5497                                 }
5498                         EL(ha, "rval=%xh, id=%d, n_port_handle loop_id=%xh, "
5499                             "master state=%x\n", rval, id, tq->loop_id,
5500                             tq->master_state);
5501                         }
5502 
5503                 }
5504                 if (rval == QL_SUCCESS) {
5505                         if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
5506                                 ha->n_port->n_port_handle = tq->loop_id;
5507                                 EL(ha, "n_port_handle =%xh, master state=%x\n",
5508                                     tq->loop_id, tq->master_state);
5509                                 break;
5510                         }
5511                         EL(ha, "rval=%xh, id=%d, n_port_handle loop_id=%xh, "
5512                             "master state=%x\n", rval, id, tq->loop_id,
5513                             tq->master_state);
5514                 }
5515         }
5516         (void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0, DDI_DMA_SYNC_FORDEV);
5517 
5518         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5519 
5520         /*
5521          * In case fw does not have the loop id ready, driver assume 0 is
5522          * used since this is p2p and there is only one remote port.
5523          */
5524         if (id == LAST_LOCAL_LOOP_ID + 1) {
5525                 EL(ha, "out of range loop id; rval=%xh, id=%xh, d_id=%xh\n",
5526                     rval, id, d_id.b24);
5527         } else {
5528                 EL(ha, "remote port loop_id '%x' has been logged in, d_id=%x\n",
5529                     id, d_id.b24);
5530         }
5531 
5532         tq = ql_d_id_to_queue(ha, d_id);
5533 
5534         /*
5535          * LV could use any d_id it likes.
5536          * tq may not be available yet.
5537          */
5538         if (tq == NULL) {
5539                 if (id != LAST_LOCAL_LOOP_ID + 1) {
5540                         loop_id = id;
5541                 } else {
5542                         loop_id = 0;
5543                 }
5544                 /* Acquire adapter state lock. */
5545                 ADAPTER_STATE_LOCK(ha);
5546 
5547                 tq = ql_dev_init(ha, d_id, loop_id);
5548 
5549                 ADAPTER_STATE_UNLOCK(ha);
5550         }
5551 
5552         /*
5553          * Lun0 should always allocated since tq is
5554          * derived from lun queue in ql_els_passthru_entry
5555          * in the interrupt handler.
5556          */
5557         sp->lun_queue = ql_lun_queue(ha, tq, 0);
5558 
5559         DEVICE_QUEUE_LOCK(tq);
5560         ql_timeout_insert(ha, tq, sp);
5561         DEVICE_QUEUE_UNLOCK(tq);
5562 
5563         ql_start_iocb(ha, sp);
5564 
5565         return (QL_CONSUMED);
5566 }
5567 
5568 
5569 /*
5570  * ql_els_flogi
5571  *      Issue a extended link service fabric login request.
5572  *
5573  * Input:
5574  *      ha = adapter state pointer.
5575  *      pkt = pointer to fc_packet.
5576  *
5577  * Returns:
5578  *      FC_SUCCESS - the packet was accepted for transport.
5579  *      FC_TRANSPORT_ERROR - a transport error occurred.
5580  *
5581  * Context:
5582  *      Kernel context.
5583  */
5584 static int
5585 ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5586 {
5587         ql_tgt_t                *tq = NULL;
5588         port_id_t               d_id;
5589         la_els_logi_t           acc;
5590         class_svc_param_t       *class3_param;
5591         int                     rval = FC_SUCCESS;
5592         int                     accept = 0;
5593 
5594         QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
5595 
5596         bzero(&acc, sizeof (acc));
5597         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5598 
5599         if (CFG_IST(ha, CFG_N2N_SUPPORT) && ha->topology & QL_N_PORT) {
5600                 /*
5601                  * d_id of zero in a FLOGI accept response in a point to point
5602                  * topology triggers evaluation of N Port login initiative.
5603                  */
5604                 pkt->pkt_resp_fhdr.d_id = 0;
5605                 /*
5606                  * An N_Port already logged in with the firmware
5607                  * will have the only database entry.
5608                  */
5609                 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
5610                         tq = ql_loop_id_to_queue(ha, ha->n_port->n_port_handle);
5611                 }
5612 
5613                 if (tq != NULL) {
5614                         /*
5615                          * If the target port has initiative send
5616                          * up a PLOGI about the new device.
5617                          */
5618                         if (ql_wwn_cmp(ha, (la_wwn_t *)tq->port_name,
5619                             (la_wwn_t *)ha->loginparams.nport_ww_name.raw_wwn)
5620                             == 1) {
5621                                 ha->send_plogi_timer = 3;
5622                         } else {
5623                                 ha->send_plogi_timer = 0;
5624                         }
5625                         pkt->pkt_resp_fhdr.s_id = tq->d_id.b24;
5626                 } else {
5627                         /*
5628                          * An N_Port not logged in with the firmware will not
5629                          * have a database entry.  We accept anyway and rely
5630                          * on a PLOGI from the upper layers to set the d_id
5631                          * and s_id.
5632                          */
5633                         accept = 1;
5634                 }
5635         } else {
5636                 tq = ql_d_id_to_queue(ha, d_id);
5637         }
5638         if ((tq != NULL) || (accept != NULL)) {
5639                 /* Build ACC. */
5640                 pkt->pkt_state = FC_PKT_SUCCESS;
5641                 class3_param = (class_svc_param_t *)&acc.class_3;
5642 
5643                 acc.ls_code.ls_code = LA_ELS_ACC;
5644                 acc.common_service.fcph_version = 0x2006;
5645                 if (ha->topology & QL_N_PORT) {
5646                         /* clear F_Port indicator */
5647                         acc.common_service.cmn_features = 0x0800;
5648                 } else {
5649                         acc.common_service.cmn_features = 0x1b00;
5650                 }
5651                 acc.common_service.rx_bufsize =
5652                     ha->loginparams.common_service.rx_bufsize;
5653                 acc.common_service.conc_sequences = 0xff;
5654                 acc.common_service.relative_offset = 0x03;
5655                 acc.common_service.e_d_tov = 0x7d0;
5656                 if (accept) {
5657                         /* Use the saved N_Port WWNN and WWPN */
5658                         if (ha->n_port != NULL) {
5659                                 bcopy((void *)&ha->n_port->port_name[0],
5660                                     (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5661                                 bcopy((void *)&ha->n_port->node_name[0],
5662                                     (void *)&acc.node_ww_name.raw_wwn[0], 8);
5663                                 /* mark service options invalid */
5664                                 class3_param->class_valid_svc_opt = 0x0800;
5665                         } else {
5666                                 EL(ha, "ha->n_port is NULL\n");
5667                                 /* Build RJT. */
5668                                 acc.ls_code.ls_code = LA_ELS_RJT;
5669 
5670                                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5671                                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5672                         }
 
5684                             tq->class3_conc_sequences;
5685                         class3_param->open_sequences_per_exch =
5686                             tq->class3_open_sequences_per_exch;
5687                 }
5688         } else {
5689                 /* Build RJT. */
5690                 acc.ls_code.ls_code = LA_ELS_RJT;
5691 
5692                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5693                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5694                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5695         }
5696 
5697         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5698             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5699 
5700         if (rval != FC_SUCCESS) {
5701                 EL(ha, "failed, rval = %xh\n", rval);
5702         } else {
5703                 /*EMPTY*/
5704                 QL_PRINT_3(ha, "done\n");
5705         }
5706         return (rval);
5707 }
5708 
5709 /*
5710  * ql_els_logo
5711  *      Issue a extended link service logout request.
5712  *
5713  * Input:
5714  *      ha = adapter state pointer.
5715  *      pkt = pointer to fc_packet.
5716  *
5717  * Returns:
5718  *      FC_SUCCESS - the packet was accepted for transport.
5719  *      FC_TRANSPORT_ERROR - a transport error occurred.
5720  *
5721  * Context:
5722  *      Kernel context.
5723  */
5724 static int
5725 ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5726 {
5727         port_id_t       d_id;
5728         ql_tgt_t        *tq;
5729         la_els_logo_t   acc;
5730 
5731         QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
5732 
5733         bzero(&acc, sizeof (acc));
5734         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5735 
5736         tq = ql_d_id_to_queue(ha, d_id);
5737         if (tq) {
5738                 DEVICE_QUEUE_LOCK(tq);
5739                 if (tq->d_id.b24 == BROADCAST_ADDR) {
5740                         DEVICE_QUEUE_UNLOCK(tq);
5741                         return (FC_SUCCESS);
5742                 }
5743 
5744                 tq->flags |= TQF_NEED_AUTHENTICATION;
5745 
5746                 do {
5747                         DEVICE_QUEUE_UNLOCK(tq);
5748                         (void) ql_abort_device(ha, tq, 1);
5749 
5750                         /*
5751                          * Wait for commands to drain in F/W (doesn't
 
5759                 DEVICE_QUEUE_UNLOCK(tq);
5760         }
5761 
5762         if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5763                 /* Build ACC. */
5764                 acc.ls_code.ls_code = LA_ELS_ACC;
5765 
5766                 pkt->pkt_state = FC_PKT_SUCCESS;
5767         } else {
5768                 /* Build RJT. */
5769                 acc.ls_code.ls_code = LA_ELS_RJT;
5770 
5771                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5772                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5773                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5774         }
5775 
5776         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5777             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5778 
5779         QL_PRINT_3(ha, "done\n");
5780 
5781         return (FC_SUCCESS);
5782 }
5783 
5784 /*
5785  * ql_els_prli
5786  *      Issue a extended link service process login request.
5787  *
5788  * Input:
5789  *      ha = adapter state pointer.
5790  *      pkt = pointer to fc_packet.
5791  *
5792  * Returns:
5793  *      FC_SUCCESS - the packet was accepted for transport.
5794  *      FC_TRANSPORT_ERROR - a transport error occurred.
5795  *
5796  * Context:
5797  *      Kernel context.
5798  */
5799 static int
5800 ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5801 {
5802         ql_tgt_t                *tq;
5803         port_id_t               d_id;
5804         la_els_prli_t           acc;
5805         prli_svc_param_t        *param;
5806         ql_srb_t                *sp = (ql_srb_t *)pkt->pkt_fca_private;
5807         int                     rval = FC_SUCCESS;
5808 
5809         QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
5810 
5811         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5812 
5813         tq = ql_d_id_to_queue(ha, d_id);
5814         if (tq != NULL) {
5815                 (void) ql_get_port_database(ha, tq, PDF_NONE);
5816 
5817                 if ((ha->topology & QL_N_PORT) &&
5818                     (tq->master_state == PD_STATE_PLOGI_COMPLETED)) {
5819 
5820                         /* always set lun_queue */
5821                         sp->lun_queue = ql_lun_queue(ha, tq, 0);
5822 
5823                         DEVICE_QUEUE_LOCK(tq);
5824                         ql_timeout_insert(ha, tq, sp);
5825                         DEVICE_QUEUE_UNLOCK(tq);
5826                         ql_start_iocb(ha, sp);
5827                         rval = QL_CONSUMED;
5828                 } else {
5829                         /* Build ACC. */
5830                         bzero(&acc, sizeof (acc));
5831                         acc.ls_code = LA_ELS_ACC;
5832                         acc.page_length = 0x10;
5833                         acc.payload_length = tq->prli_payload_length;
5834 
5835                         param = (prli_svc_param_t *)&acc.service_params[0];
5836                         param->type = 0x08;
5837                         param->rsvd = 0x00;
5838                         param->process_assoc_flags = tq->prli_svc_param_word_0;
5839                         param->process_flags = tq->prli_svc_param_word_3;
5840 
5841                         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5842                             (uint8_t *)pkt->pkt_resp, sizeof (acc),
5843                             DDI_DEV_AUTOINCR);
5844 
5845                         pkt->pkt_state = FC_PKT_SUCCESS;
5846                 }
5847         } else {
5848                 /* in case of P2P, tq might not have been created yet */
5849                 if (ha->topology & QL_N_PORT) {
5850 
5851                         /* Acquire adapter state lock. */
5852                         ADAPTER_STATE_LOCK(ha);
5853                         tq = ql_dev_init(ha, d_id, ha->n_port->n_port_handle);
5854                         ADAPTER_STATE_UNLOCK(ha);
5855 
5856                         /* always alloc lun #0 */
5857                         sp->lun_queue = ql_lun_queue(ha, tq, 0);
5858                         bcopy((void *)&ha->n_port->port_name[0],
5859                             (void *) &tq->port_name[0], 8);
5860                         bcopy((void *)&ha->n_port->node_name[0],
5861                             (void *) &tq->node_name[0], 8);
5862 
5863                         DEVICE_QUEUE_LOCK(tq);
5864                         ql_timeout_insert(ha, tq, sp);
5865                         DEVICE_QUEUE_UNLOCK(tq);
5866 
5867                         ql_start_iocb(ha, sp);
5868                         rval = QL_CONSUMED;
5869 
5870                 } else {
5871 
5872                         la_els_rjt_t rjt;
5873 
5874                         /* Build RJT. */
5875                         bzero(&rjt, sizeof (rjt));
5876                         rjt.ls_code.ls_code = LA_ELS_RJT;
5877 
5878                         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5879                             (uint8_t *)pkt->pkt_resp, sizeof (rjt),
5880                             DDI_DEV_AUTOINCR);
5881 
5882                         pkt->pkt_state = FC_PKT_TRAN_ERROR;
5883                         pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5884                         EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5885                 }
5886         }
5887 
5888         if ((rval != FC_SUCCESS) && (rval != QL_CONSUMED)) {
5889                 EL(ha, "failed, rval = %xh\n", rval);
5890         } else {
5891                 /*EMPTY*/
5892                 QL_PRINT_3(ha, "done\n");
5893         }
5894         return (rval);
5895 }
5896 
5897 /*
5898  * ql_els_prlo
5899  *      Issue a extended link service process logout request.
5900  *
5901  * Input:
5902  *      ha = adapter state pointer.
5903  *      pkt = pointer to fc_packet.
5904  *
5905  * Returns:
5906  *      FC_SUCCESS - the packet was accepted for transport.
5907  *      FC_TRANSPORT_ERROR - a transport error occurred.
5908  *
5909  * Context:
5910  *      Kernel context.
5911  */
5912 /* ARGSUSED */
5913 static int
5914 ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5915 {
5916         la_els_prli_t   acc;
5917 
5918         QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
5919 
5920         /* Build ACC. */
5921         ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5922             (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5923 
5924         acc.ls_code = LA_ELS_ACC;
5925         acc.service_params[2] = 1;
5926 
5927         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5928             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5929 
5930         pkt->pkt_state = FC_PKT_SUCCESS;
5931 
5932         QL_PRINT_3(ha, "done\n");
5933 
5934         return (FC_SUCCESS);
5935 }
5936 
5937 /*
5938  * ql_els_adisc
5939  *      Issue a extended link service address discovery request.
5940  *
5941  * Input:
5942  *      ha = adapter state pointer.
5943  *      pkt = pointer to fc_packet.
5944  *
5945  * Returns:
5946  *      FC_SUCCESS - the packet was accepted for transport.
5947  *      FC_TRANSPORT_ERROR - a transport error occurred.
5948  *
5949  * Context:
5950  *      Kernel context.
5951  */
5952 static int
5953 ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5954 {
5955         ql_dev_id_list_t        *list;
5956         uint32_t                list_size;
5957         ql_link_t               *link;
5958         ql_tgt_t                *tq;
5959         ql_lun_t                *lq;
5960         port_id_t               d_id;
5961         la_els_adisc_t          acc;
5962         uint16_t                index, loop_id;
5963         ql_mbx_data_t           mr;
5964 
5965         QL_PRINT_3(ha, "started\n");
5966 
5967         bzero(&acc, sizeof (acc));
5968         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5969 
5970         /*
5971          * MBC_GET_PORT_DATABASE causes ADISC to go out to
5972          * the device from the firmware
5973          */
5974         index = ql_alpa_to_index[d_id.b.al_pa];
5975         tq = NULL;
5976         for (link = ha->dev[index].first; link != NULL; link = link->next) {
5977                 tq = link->base_address;
5978                 if (tq->d_id.b24 == d_id.b24) {
5979                         break;
5980                 } else {
5981                         tq = NULL;
5982                 }
5983         }
5984 
5985         if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
 
6040                 bcopy((void *)&tq->port_name[0],
6041                     (void *)&acc.port_wwn.raw_wwn[0], 8);
6042                 bcopy((void *)&tq->node_name[0],
6043                     (void *)&acc.node_wwn.raw_wwn[0], 8);
6044 
6045                 acc.nport_id.port_id = tq->d_id.b24;
6046 
6047                 pkt->pkt_state = FC_PKT_SUCCESS;
6048         } else {
6049                 /* Build RJT. */
6050                 acc.ls_code.ls_code = LA_ELS_RJT;
6051 
6052                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6053                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6054                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6055         }
6056 
6057         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6058             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6059 
6060         QL_PRINT_3(ha, "done\n");
6061 
6062         return (FC_SUCCESS);
6063 }
6064 
6065 /*
6066  * ql_els_linit
6067  *      Issue a extended link service loop initialize request.
6068  *
6069  * Input:
6070  *      ha = adapter state pointer.
6071  *      pkt = pointer to fc_packet.
6072  *
6073  * Returns:
6074  *      FC_SUCCESS - the packet was accepted for transport.
6075  *      FC_TRANSPORT_ERROR - a transport error occurred.
6076  *
6077  * Context:
6078  *      Kernel context.
6079  */
6080 static int
6081 ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
6082 {
6083         ddi_dma_cookie_t        *cp;
6084         uint32_t                cnt;
6085         conv_num_t              n;
6086         port_id_t               d_id;
6087 
6088         QL_PRINT_3(ha, "started\n");
6089 
6090         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6091         if (ha->topology & QL_FABRIC_CONNECTION) {
6092                 fc_linit_req_t els;
6093                 lfa_cmd_t lfa;
6094 
6095                 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
6096                     (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
6097 
6098                 /* Setup LFA mailbox command data. */
6099                 bzero((void *)&lfa, sizeof (lfa_cmd_t));
6100 
6101                 lfa.resp_buffer_length[0] = 4;
6102 
6103                 cp = pkt->pkt_resp_cookie;
6104                 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
6105                         n.size64 = (uint64_t)cp->dmac_laddress;
6106                         LITTLE_ENDIAN_64(&n.size64);
6107                 } else {
6108                         n.size32[0] = LSD(cp->dmac_laddress);
6109                         LITTLE_ENDIAN_32(&n.size32[0]);
6110                         n.size32[1] = MSD(cp->dmac_laddress);
6111                         LITTLE_ENDIAN_32(&n.size32[1]);
 
6130                 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
6131                         pkt->pkt_state = FC_PKT_TRAN_ERROR;
6132                 } else {
6133                         pkt->pkt_state = FC_PKT_SUCCESS;
6134                 }
6135         } else {
6136                 fc_linit_resp_t rjt;
6137 
6138                 /* Build RJT. */
6139                 bzero(&rjt, sizeof (rjt));
6140                 rjt.ls_code.ls_code = LA_ELS_RJT;
6141 
6142                 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
6143                     (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
6144 
6145                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6146                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6147                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6148         }
6149 
6150         QL_PRINT_3(ha, "done\n");
6151 
6152         return (FC_SUCCESS);
6153 }
6154 
6155 /*
6156  * ql_els_lpc
6157  *      Issue a extended link service loop control request.
6158  *
6159  * Input:
6160  *      ha = adapter state pointer.
6161  *      pkt = pointer to fc_packet.
6162  *
6163  * Returns:
6164  *      FC_SUCCESS - the packet was accepted for transport.
6165  *      FC_TRANSPORT_ERROR - a transport error occurred.
6166  *
6167  * Context:
6168  *      Kernel context.
6169  */
6170 static int
6171 ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
6172 {
6173         ddi_dma_cookie_t        *cp;
6174         uint32_t                cnt;
6175         conv_num_t              n;
6176         port_id_t               d_id;
6177 
6178         QL_PRINT_3(ha, "started\n");
6179 
6180         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6181         if (ha->topology & QL_FABRIC_CONNECTION) {
6182                 ql_lpc_t els;
6183                 lfa_cmd_t lfa;
6184 
6185                 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
6186                     (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
6187 
6188                 /* Setup LFA mailbox command data. */
6189                 bzero((void *)&lfa, sizeof (lfa_cmd_t));
6190 
6191                 lfa.resp_buffer_length[0] = 4;
6192 
6193                 cp = pkt->pkt_resp_cookie;
6194                 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
6195                         n.size64 = (uint64_t)(cp->dmac_laddress);
6196                         LITTLE_ENDIAN_64(&n.size64);
6197                 } else {
6198                         n.size32[0] = cp->dmac_address;
6199                         LITTLE_ENDIAN_32(&n.size32[0]);
6200                         n.size32[1] = 0;
6201                 }
6202 
6203                 /* Set buffer address. */
6204                 for (cnt = 0; cnt < 8; cnt++) {
6205                         lfa.resp_buffer_address[cnt] = n.size8[cnt];
6206                 }
6207 
6208                 lfa.subcommand_length[0] = 20;
6209                 n.size32[0] = d_id.b24;
6210                 LITTLE_ENDIAN_32(&n.size32[0]);
6211                 lfa.addr[0] = n.size8[0];
6212                 lfa.addr[1] = n.size8[1];
6213                 lfa.addr[2] = n.size8[2];
6214                 lfa.subcommand[1] = 0x71;
6215                 lfa.payload[4] = els.port_control;
6216                 bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 16);
6217 
6218                 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
6219                         pkt->pkt_state = FC_PKT_TRAN_ERROR;
6220                 } else {
6221                         pkt->pkt_state = FC_PKT_SUCCESS;
6222                 }
6223         } else {
6224                 ql_lpc_resp_t rjt;
6225 
6226                 /* Build RJT. */
6227                 bzero(&rjt, sizeof (rjt));
6228                 rjt.ls_code.ls_code = LA_ELS_RJT;
6229 
6230                 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
6231                     (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
6232 
6233                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6234                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6235                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6236         }
6237 
6238         QL_PRINT_3(ha, "done\n");
6239 
6240         return (FC_SUCCESS);
6241 }
6242 
6243 /*
6244  * ql_els_lsts
6245  *      Issue a extended link service loop status request.
6246  *
6247  * Input:
6248  *      ha = adapter state pointer.
6249  *      pkt = pointer to fc_packet.
6250  *
6251  * Returns:
6252  *      FC_SUCCESS - the packet was accepted for transport.
6253  *      FC_TRANSPORT_ERROR - a transport error occurred.
6254  *
6255  * Context:
6256  *      Kernel context.
6257  */
6258 static int
6259 ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
6260 {
6261         ddi_dma_cookie_t        *cp;
6262         uint32_t                cnt;
6263         conv_num_t              n;
6264         port_id_t               d_id;
6265 
6266         QL_PRINT_3(ha, "started\n");
6267 
6268         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6269         if (ha->topology & QL_FABRIC_CONNECTION) {
6270                 fc_lsts_req_t els;
6271                 lfa_cmd_t lfa;
6272 
6273                 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
6274                     (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
6275 
6276                 /* Setup LFA mailbox command data. */
6277                 bzero((void *)&lfa, sizeof (lfa_cmd_t));
6278 
6279                 lfa.resp_buffer_length[0] = 84;
6280 
6281                 cp = pkt->pkt_resp_cookie;
6282                 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
6283                         n.size64 = cp->dmac_laddress;
6284                         LITTLE_ENDIAN_64(&n.size64);
6285                 } else {
6286                         n.size32[0] = cp->dmac_address;
6287                         LITTLE_ENDIAN_32(&n.size32[0]);
6288                         n.size32[1] = 0;
6289                 }
 
6304                 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
6305                         pkt->pkt_state = FC_PKT_TRAN_ERROR;
6306                 } else {
6307                         pkt->pkt_state = FC_PKT_SUCCESS;
6308                 }
6309         } else {
6310                 fc_lsts_resp_t rjt;
6311 
6312                 /* Build RJT. */
6313                 bzero(&rjt, sizeof (rjt));
6314                 rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
6315 
6316                 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
6317                     (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
6318 
6319                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6320                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6321                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6322         }
6323 
6324         QL_PRINT_3(ha, "done\n");
6325 
6326         return (FC_SUCCESS);
6327 }
6328 
6329 /*
6330  * ql_els_scr
6331  *      Issue a extended link service state change registration request.
6332  *
6333  * Input:
6334  *      ha = adapter state pointer.
6335  *      pkt = pointer to fc_packet.
6336  *
6337  * Returns:
6338  *      FC_SUCCESS - the packet was accepted for transport.
6339  *      FC_TRANSPORT_ERROR - a transport error occurred.
6340  *
6341  * Context:
6342  *      Kernel context.
6343  */
6344 static int
6345 ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
6346 {
6347         fc_scr_resp_t   acc;
6348 
6349         QL_PRINT_3(ha, "started\n");
6350 
6351         bzero(&acc, sizeof (acc));
6352         if (ha->topology & QL_FABRIC_CONNECTION) {
6353                 fc_scr_req_t els;
6354 
6355                 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
6356                     (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
6357 
6358                 if (ql_send_change_request(ha, els.scr_func) ==
6359                     QL_SUCCESS) {
6360                         /* Build ACC. */
6361                         acc.scr_acc = LA_ELS_ACC;
6362 
6363                         pkt->pkt_state = FC_PKT_SUCCESS;
6364                 } else {
6365                         /* Build RJT. */
6366                         acc.scr_acc = LA_ELS_RJT;
6367 
6368                         pkt->pkt_state = FC_PKT_TRAN_ERROR;
6369                         pkt->pkt_reason = FC_REASON_HW_ERROR;
6370                         EL(ha, "LA_ELS_RJT, FC_REASON_HW_ERROR\n");
6371                 }
6372         } else {
6373                 /* Build RJT. */
6374                 acc.scr_acc = LA_ELS_RJT;
6375 
6376                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6377                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6378                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6379         }
6380 
6381         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6382             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6383 
6384         QL_PRINT_3(ha, "done\n");
6385 
6386         return (FC_SUCCESS);
6387 }
6388 
6389 /*
6390  * ql_els_rscn
6391  *      Issue a extended link service register state
6392  *      change notification request.
6393  *
6394  * Input:
6395  *      ha = adapter state pointer.
6396  *      pkt = pointer to fc_packet.
6397  *
6398  * Returns:
6399  *      FC_SUCCESS - the packet was accepted for transport.
6400  *      FC_TRANSPORT_ERROR - a transport error occurred.
6401  *
6402  * Context:
6403  *      Kernel context.
6404  */
6405 static int
6406 ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
6407 {
6408         ql_rscn_resp_t  acc;
6409 
6410         QL_PRINT_3(ha, "started\n");
6411 
6412         bzero(&acc, sizeof (acc));
6413         if (ha->topology & QL_FABRIC_CONNECTION) {
6414                 /* Build ACC. */
6415                 acc.scr_acc = LA_ELS_ACC;
6416 
6417                 pkt->pkt_state = FC_PKT_SUCCESS;
6418         } else {
6419                 /* Build RJT. */
6420                 acc.scr_acc = LA_ELS_RJT;
6421 
6422                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6423                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6424                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6425         }
6426 
6427         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6428             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6429 
6430         QL_PRINT_3(ha, "done\n");
6431 
6432         return (FC_SUCCESS);
6433 }
6434 
6435 /*
6436  * ql_els_farp_req
6437  *      Issue FC Address Resolution Protocol (FARP)
6438  *      extended link service request.
6439  *
6440  *      Note: not supported.
6441  *
6442  * Input:
6443  *      ha = adapter state pointer.
6444  *      pkt = pointer to fc_packet.
6445  *
6446  * Returns:
6447  *      FC_SUCCESS - the packet was accepted for transport.
6448  *      FC_TRANSPORT_ERROR - a transport error occurred.
6449  *
6450  * Context:
6451  *      Kernel context.
6452  */
6453 /* ARGSUSED */
6454 static int
6455 ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
6456 {
6457         ql_acc_rjt_t    acc;
6458 
6459         QL_PRINT_3(ha, "started\n");
6460 
6461         bzero(&acc, sizeof (acc));
6462 
6463         /* Build ACC. */
6464         acc.ls_code.ls_code = LA_ELS_ACC;
6465 
6466         pkt->pkt_state = FC_PKT_SUCCESS;
6467 
6468         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6469             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6470 
6471         QL_PRINT_3(ha, "done\n");
6472 
6473         return (FC_SUCCESS);
6474 }
6475 
6476 /*
6477  * ql_els_farp_reply
6478  *      Issue FC Address Resolution Protocol (FARP)
6479  *      extended link service reply.
6480  *
6481  *      Note: not supported.
6482  *
6483  * Input:
6484  *      ha = adapter state pointer.
6485  *      pkt = pointer to fc_packet.
6486  *
6487  * Returns:
6488  *      FC_SUCCESS - the packet was accepted for transport.
6489  *      FC_TRANSPORT_ERROR - a transport error occurred.
6490  *
6491  * Context:
6492  *      Kernel context.
6493  */
6494 /* ARGSUSED */
6495 static int
6496 ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
6497 {
6498         ql_acc_rjt_t    acc;
6499 
6500         QL_PRINT_3(ha, "started\n");
6501 
6502         bzero(&acc, sizeof (acc));
6503 
6504         /* Build ACC. */
6505         acc.ls_code.ls_code = LA_ELS_ACC;
6506 
6507         pkt->pkt_state = FC_PKT_SUCCESS;
6508 
6509         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6510             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6511 
6512         QL_PRINT_3(ha, "done\n");
6513 
6514         return (FC_SUCCESS);
6515 }
6516 
6517 static int
6518 ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
6519 {
6520         uchar_t                 *rnid_acc;
6521         port_id_t               d_id;
6522         ql_link_t               *link;
6523         ql_tgt_t                *tq;
6524         uint16_t                index;
6525         la_els_rnid_acc_t       acc;
6526         la_els_rnid_t           *req;
6527         size_t                  req_len;
6528 
6529         QL_PRINT_3(ha, "started\n");
6530 
6531         req_len = FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
6532         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6533         index = ql_alpa_to_index[d_id.b.al_pa];
6534 
6535         tq = NULL;
6536         for (link = ha->dev[index].first; link != NULL; link = link->next) {
6537                 tq = link->base_address;
6538                 if (tq->d_id.b24 == d_id.b24) {
6539                         break;
6540                 } else {
6541                         tq = NULL;
6542                 }
6543         }
6544 
6545         /* Allocate memory for rnid status block */
6546         rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
6547 
6548         bzero(&acc, sizeof (acc));
6549 
6550         req = (la_els_rnid_t *)pkt->pkt_cmd;
6551         if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6552             (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
6553             (caddr_t)rnid_acc) != QL_SUCCESS)) {
6554 
6555                 kmem_free(rnid_acc, req_len);
6556                 acc.ls_code.ls_code = LA_ELS_RJT;
6557 
6558                 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6559                     (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6560 
6561                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6562                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6563                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6564 
6565                 return (FC_FAILURE);
6566         }
6567 
6568         acc.ls_code.ls_code = LA_ELS_ACC;
6569         bcopy(rnid_acc, &acc.hdr, sizeof (fc_rnid_hdr_t));
6570         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6571             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6572 
6573         kmem_free(rnid_acc, req_len);
6574         pkt->pkt_state = FC_PKT_SUCCESS;
6575 
6576         QL_PRINT_3(ha, "done\n");
6577 
6578         return (FC_SUCCESS);
6579 }
6580 
6581 static int
6582 ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
6583 {
6584         fc_rls_acc_t            *rls_acc;
6585         port_id_t               d_id;
6586         ql_link_t               *link;
6587         ql_tgt_t                *tq;
6588         uint16_t                index;
6589         la_els_rls_acc_t        acc;
6590 
6591         QL_PRINT_3(ha, "started\n");
6592 
6593         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6594         index = ql_alpa_to_index[d_id.b.al_pa];
6595 
6596         tq = NULL;
6597         for (link = ha->dev[index].first; link != NULL; link = link->next) {
6598                 tq = link->base_address;
6599                 if (tq->d_id.b24 == d_id.b24) {
6600                         break;
6601                 } else {
6602                         tq = NULL;
6603                 }
6604         }
6605 
6606         /* Allocate memory for link error status block */
6607         rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
6608 
6609         bzero(&acc, sizeof (la_els_rls_acc_t));
6610 
6611         if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
 
6626         }
6627 
6628         LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
6629         LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
6630         LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
6631         LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
6632         LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
6633 
6634         acc.ls_code.ls_code = LA_ELS_ACC;
6635         acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
6636         acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
6637         acc.rls_link_params.rls_sig_loss = rls_acc->rls_sig_loss;
6638         acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
6639         acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
6640         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6641             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6642 
6643         kmem_free(rls_acc, sizeof (*rls_acc));
6644         pkt->pkt_state = FC_PKT_SUCCESS;
6645 
6646         QL_PRINT_3(ha, "done\n");
6647 
6648         return (FC_SUCCESS);
6649 }
6650 
6651 static int
6652 ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
6653 {
6654         port_id_t       d_id;
6655         ql_srb_t        *sp;
6656         fc_unsol_buf_t  *ubp;
6657         ql_link_t       *link, *next_link;
6658         int             rval = FC_SUCCESS;
6659         int             cnt = 5;
6660 
6661         QL_PRINT_3(ha, "started\n");
6662 
6663         /*
6664          * we need to ensure that q->outcnt == 0, otherwise
6665          * any cmd completed with PKT_PORT_OFFLINE after PLOGI
6666          * will confuse ulps.
6667          */
6668 
6669         DEVICE_QUEUE_LOCK(tq);
6670         do {
6671                 /*
6672                  * wait for the cmds to get drained. If they
6673                  * don't get drained then the transport will
6674                  * retry PLOGI after few secs.
6675                  */
6676                 if (tq->outcnt != 0) {
6677                         rval = FC_TRAN_BUSY;
6678                         DEVICE_QUEUE_UNLOCK(tq);
6679                         ql_delay(ha, 10000);
6680                         DEVICE_QUEUE_LOCK(tq);
6681                         cnt--;
6682                         if (!cnt) {
6683                                 cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
6684                                     " for %xh outcount %xh", QL_NAME,
6685                                     ha->instance, tq->d_id.b24, tq->outcnt);
6686                         }
6687                 } else {
6688                         rval = FC_SUCCESS;
6689                         break;
6690                 }
6691         } while (cnt > 0);
6692         DEVICE_QUEUE_UNLOCK(tq);
6693 
6694         /*
6695          * return, if busy or if the plogi was asynchronous.
6696          */
6697         if ((rval != FC_SUCCESS) ||
6698             (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
6699             pkt->pkt_comp)) {
6700                 QL_PRINT_3(ha, "done, busy or async\n");
6701                 return (rval);
6702         }
6703 
6704         /*
6705          * Let us give daemon sufficient time and hopefully
6706          * when transport retries PLOGI, it would have flushed
6707          * callback queue.
6708          */
6709         TASK_DAEMON_LOCK(ha);
6710         for (link = ha->unsol_callback_queue.first; link != NULL;
6711             link = next_link) {
6712                 next_link = link->next;
6713                 sp = link->base_address;
6714                 if (sp->flags & SRB_UB_CALLBACK) {
6715                         ubp = ha->ub_array[sp->handle];
6716                         d_id.b24 = ubp->ub_frame.s_id;
6717                 } else {
6718                         d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
6719                 }
6720                 if (tq->d_id.b24 == d_id.b24) {
6721                         cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
6722                             ha->instance, tq->d_id.b24);
6723                         rval = FC_TRAN_BUSY;
6724                         break;
6725                 }
6726         }
6727         TASK_DAEMON_UNLOCK(ha);
6728 
6729         QL_PRINT_3(ha, "done\n");
6730 
6731         return (rval);
6732 }
6733 
6734 /*
6735  * ql_login_port
6736  *      Logs in a device if not already logged in.
6737  *
6738  * Input:
6739  *      ha = adapter state pointer.
6740  *      d_id = 24 bit port ID.
6741  *      DEVICE_QUEUE_LOCK must be released.
6742  *
6743  * Returns:
6744  *      QL local function return status code.
6745  *
6746  * Context:
6747  *      Kernel context.
6748  */
6749 static int
6750 ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6751 {
6752         ql_adapter_state_t      *vha;
6753         ql_link_t               *link;
6754         uint16_t                index;
6755         ql_tgt_t                *tq, *tq2;
6756         uint16_t                loop_id, first_loop_id, last_loop_id;
6757         int                     rval = QL_SUCCESS;
6758 
6759         QL_PRINT_3(ha, "started, d_id=%xh\n", d_id.b24);
6760 
6761         /* Do not login vports */
6762         for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6763                 if (vha->d_id.b24 == d_id.b24) {
6764                         EL(ha, "failed=%xh, d_id=%xh vp_index=%xh\n",
6765                             QL_FUNCTION_FAILED, d_id.b24, vha->vp_index);
6766                         return (QL_FUNCTION_FAILED);
6767                 }
6768         }
6769 
6770         /* Get head queue index. */
6771         index = ql_alpa_to_index[d_id.b.al_pa];
6772 
6773         /* Check for device already has a queue. */
6774         tq = NULL;
6775         for (link = ha->dev[index].first; link != NULL; link = link->next) {
6776                 tq = link->base_address;
6777                 if (tq->d_id.b24 == d_id.b24) {
6778                         loop_id = tq->loop_id;
6779                         break;
6780                 } else {
6781                         tq = NULL;
6782                 }
6783         }
6784 
6785         /* Let's stop issuing any IO and unsolicited logo */
6786         if ((tq != NULL) && (!(ddi_in_panic()))) {
6787                 DEVICE_QUEUE_LOCK(tq);
6788                 tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6789                 tq->flags &= ~TQF_RSCN_RCVD;
6790                 DEVICE_QUEUE_UNLOCK(tq);
6791         }
6792         if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6793             !(tq->flags & TQF_FABRIC_DEVICE)) {
6794                 loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6795         }
6796 
6797         /* Special case for Nameserver */
6798         if (d_id.b24 == FS_NAME_SERVER) {
6799                 if (!(ha->topology & QL_FABRIC_CONNECTION)) {
6800                         EL(ha, "failed=%xh, d_id=%xh no fabric\n",
6801                             QL_FUNCTION_FAILED, d_id.b24);
6802                         return (QL_FUNCTION_FAILED);
6803                 }
6804 
6805                 loop_id = (uint16_t)(CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
6806                     SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6807                 if (tq == NULL) {
6808                         ADAPTER_STATE_LOCK(ha);
6809                         tq = ql_dev_init(ha, d_id, loop_id);
6810                         ADAPTER_STATE_UNLOCK(ha);
6811                         if (tq == NULL) {
6812                                 EL(ha, "failed=%xh, d_id=%xh\n",
6813                                     QL_FUNCTION_FAILED, d_id.b24);
6814                                 return (QL_FUNCTION_FAILED);
6815                         }
6816                 }
6817                 if (!(CFG_IST(ha, CFG_CTRL_82XX))) {
6818                         rval = ql_login_fabric_port(ha, tq, loop_id);
6819                         if (rval == QL_SUCCESS) {
6820                                 tq->loop_id = loop_id;
6821                                 tq->flags |= TQF_FABRIC_DEVICE;
6822                                 (void) ql_get_port_database(ha, tq, PDF_NONE);
6823                         }
6824                 }
6825         /* Check for device already logged in. */
6826         } else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6827                 if (tq->flags & TQF_FABRIC_DEVICE) {
6828                         rval = ql_login_fabric_port(ha, tq, loop_id);
6829                         if (rval == QL_PORT_ID_USED) {
6830                                 rval = QL_SUCCESS;
6831                         }
6832                 } else if (LOCAL_LOOP_ID(loop_id)) {
6833                         rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6834                             (tq->flags & TQF_INITIATOR_DEVICE ?
6835                             LLF_NONE : LLF_PLOGI));
6836                         if (rval == QL_SUCCESS) {
6837                                 DEVICE_QUEUE_LOCK(tq);
6838                                 tq->loop_id = loop_id;
6839                                 DEVICE_QUEUE_UNLOCK(tq);
6840                         }
6841                 }
6842         } else if (ha->topology & QL_FABRIC_CONNECTION) {
6843                 /* Locate unused loop ID. */
6844                 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
6845                         first_loop_id = 0;
6846                         last_loop_id = LAST_N_PORT_HDL;
6847                 } else if (ha->topology & QL_F_PORT) {
6848                         first_loop_id = 0;
6849                         last_loop_id = SNS_LAST_LOOP_ID;
6850                 } else {
6851                         first_loop_id = SNS_FIRST_LOOP_ID;
6852                         last_loop_id = SNS_LAST_LOOP_ID;
6853                 }
6854 
6855                 /* Acquire adapter state lock. */
6856                 ADAPTER_STATE_LOCK(ha);
6857 
6858                 tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6859                 if (tq == NULL) {
6860                         EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6861                             d_id.b24);
6862 
6863                         ADAPTER_STATE_UNLOCK(ha);
6864 
 
6901                         case QL_PORT_ID_USED:
6902                                 /*
6903                                  * use f/w handle and try to
6904                                  * login again.
6905                                  */
6906                                 ADAPTER_STATE_LOCK(ha);
6907                                 ha->pha->free_loop_id--;
6908                                 ADAPTER_STATE_UNLOCK(ha);
6909                                 loop_id = tq->loop_id;
6910                                 break;
6911 
6912                         case QL_SUCCESS:
6913                                 tq->flags |= TQF_FABRIC_DEVICE;
6914                                 (void) ql_get_port_database(ha,
6915                                     tq, PDF_NONE);
6916                                 index = 1;
6917                                 break;
6918 
6919                         case QL_LOOP_ID_USED:
6920                                 tq->loop_id = PORT_NO_LOOP_ID;
6921                                 ADAPTER_STATE_LOCK(ha);
6922                                 loop_id = ha->pha->free_loop_id++;
6923                                 ADAPTER_STATE_UNLOCK(ha);
6924                                 break;
6925 
6926                         case QL_ALL_IDS_IN_USE:
6927                                 tq->loop_id = PORT_NO_LOOP_ID;
6928                                 index = 1;
6929                                 break;
6930 
6931                         default:
6932                                 tq->loop_id = PORT_NO_LOOP_ID;
6933                                 index = 1;
6934                                 break;
6935                         }
6936 
6937                         ADAPTER_STATE_LOCK(ha);
6938                 }
6939 
6940                 ADAPTER_STATE_UNLOCK(ha);
6941         } else {
6942                 rval = QL_FUNCTION_FAILED;
6943         }
6944 
6945         if (rval != QL_SUCCESS) {
6946                 EL(ha, "failed, rval=%xh, d_id=%xh\n",
6947                     rval, d_id.b24);
6948         } else {
6949                 EL(ha, "d_id=%xh, loop_id=%xh, "
6950                     "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6951                     tq->loop_id, tq->port_name[0], tq->port_name[1],
6952                     tq->port_name[2], tq->port_name[3], tq->port_name[4],
6953                     tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6954         }
6955         return (rval);
6956 }
6957 
6958 /*
6959  * ql_login_fabric_port
6960  *      Issue login fabric port mailbox command.
6961  *
6962  * Input:
6963  *      ha:             adapter state pointer.
6964  *      tq:             target queue pointer.
6965  *      loop_id:        FC Loop ID.
6966  *
6967  * Returns:
6968  *      ql local function return status code.
6969  *
6970  * Context:
6971  *      Kernel context.
6972  */
6973 static int
6974 ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6975 {
6976         int             rval;
6977         int             index;
6978         int             retry = 0;
6979         port_id_t       d_id;
6980         ql_tgt_t        *newq;
6981         ql_mbx_data_t   mr;
6982 
6983         QL_PRINT_3(ha, "started, d_id=%xh\n", tq->d_id.b24);
6984 
6985         /*
6986          * QL_PARAMETER_ERROR also means the firmware is not able to allocate
6987          * PCB entry due to resource issues, or collision.
6988          */
6989         do {
6990                 rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6991                 if ((rval == QL_PARAMETER_ERROR) ||
6992                     ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6993                     mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6994                         retry++;
6995                         drv_usecwait(ha->plogi_params->retry_dly_usec);
6996                 } else {
6997                         break;
6998                 }
6999         } while (retry < ha->plogi_params->retry_cnt);
7000 
7001         switch (rval) {
7002         case QL_SUCCESS:
7003                 tq->loop_id = loop_id;
7004                 break;
7005 
7006         case QL_PORT_ID_USED:
7007                 /*
7008                  * This Loop ID should NOT be in use in drivers
7009                  */
7010                 newq = ql_loop_id_to_queue(ha, mr.mb[1]);
7011 
7012                 if (newq != NULL && newq != tq && tq->logout_sent == 0) {
7013                         cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
7014                             "dup loop_id=%xh, d_id=%xh", ha->instance,
7015                             newq->loop_id, newq->d_id.b24);
7016                         ql_send_logo(ha, newq, NULL);
7017                 }
7018 
7019                 tq->loop_id = mr.mb[1];
7020                 break;
7021 
7022         case QL_LOOP_ID_USED:
7023                 d_id.b.al_pa = LSB(mr.mb[2]);
7024                 d_id.b.area = MSB(mr.mb[2]);
7025                 d_id.b.domain = LSB(mr.mb[1]);
7026 
7027                 newq = ql_d_id_to_queue(ha, d_id);
7028                 if (newq && (newq->loop_id != loop_id)) {
7029                         /*
7030                          * This should NEVER ever happen; but this
7031                          * code is needed to bail out when the worst
7032                          * case happens - or as used to happen before
7033                          */
7034                         QL_PRINT_2(ha, "Loop ID is now "
7035                             "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
7036                             "new pairs: [%xh, unknown] and [%xh, %xh]\n",
7037                             tq->d_id.b24, loop_id,
7038                             newq->d_id.b24, newq->loop_id, tq->d_id.b24,
7039                             newq->d_id.b24, loop_id);
7040 
7041                         if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
7042                                 ADAPTER_STATE_LOCK(ha);
7043 
7044                                 index = ql_alpa_to_index[newq->d_id.b.al_pa];
7045                                 ql_add_link_b(&ha->dev[index], &newq->device);
7046 
7047                                 newq->d_id.b24 = d_id.b24;
7048 
7049                                 index = ql_alpa_to_index[d_id.b.al_pa];
7050                                 ql_add_link_b(&ha->dev[index], &newq->device);
7051 
7052                                 ADAPTER_STATE_UNLOCK(ha);
7053                         }
7054 
7055                         (void) ql_get_port_database(ha, newq, PDF_NONE);
7056 
7057                 }
 
7059                 /*
7060                  * Invalidate the loop ID for the
7061                  * us to obtain a new one.
7062                  */
7063                 tq->loop_id = PORT_NO_LOOP_ID;
7064                 break;
7065 
7066         case QL_ALL_IDS_IN_USE:
7067                 rval = QL_FUNCTION_FAILED;
7068                 EL(ha, "no loop id's available\n");
7069                 break;
7070 
7071         default:
7072                 if (rval == QL_COMMAND_ERROR) {
7073                         switch (mr.mb[1]) {
7074                         case 2:
7075                         case 3:
7076                                 rval = QL_MEMORY_ALLOC_FAILED;
7077                                 break;
7078 
7079                         case 0xd:
7080                         case 4:
7081                                 rval = QL_FUNCTION_TIMEOUT;
7082                                 break;
7083                         case 1:
7084                         case 5:
7085                         case 7:
7086                                 rval = QL_FABRIC_NOT_INITIALIZED;
7087                                 break;
7088                         default:
7089                                 EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
7090                                 break;
7091                         }
7092                 } else {
7093                         cmn_err(CE_WARN, "%s(%d): login fabric port failed"
7094                             " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
7095                             ha->instance, tq->d_id.b24, rval, mr.mb[1]);
7096                 }
7097                 break;
7098         }
7099 
7100         if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
7101             rval != QL_LOOP_ID_USED) {
7102                 EL(ha, "failed=%xh\n", rval);
7103         } else {
7104                 /*EMPTY*/
7105                 QL_PRINT_3(ha, "done\n");
7106         }
7107         return (rval);
7108 }
7109 
7110 /*
7111  * ql_logout_port
7112  *      Logs out a device if possible.
7113  *
7114  * Input:
7115  *      ha:     adapter state pointer.
7116  *      d_id:   24 bit port ID.
7117  *
7118  * Returns:
7119  *      QL local function return status code.
7120  *
7121  * Context:
7122  *      Kernel context.
7123  */
7124 static int
7125 ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
7126 {
7127         ql_link_t       *link;
7128         ql_tgt_t        *tq;
7129         uint16_t        index;
7130 
7131         QL_PRINT_3(ha, "started\n");
7132 
7133         /* Get head queue index. */
7134         index = ql_alpa_to_index[d_id.b.al_pa];
7135 
7136         /* Get device queue. */
7137         tq = NULL;
7138         for (link = ha->dev[index].first; link != NULL; link = link->next) {
7139                 tq = link->base_address;
7140                 if (tq->d_id.b24 == d_id.b24) {
7141                         break;
7142                 } else {
7143                         tq = NULL;
7144                 }
7145         }
7146 
7147         if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
7148                 (void) ql_logout_fabric_port(ha, tq);
7149                 tq->loop_id = PORT_NO_LOOP_ID;
7150         }
7151 
7152         QL_PRINT_3(ha, "done\n");
7153 
7154         return (QL_SUCCESS);
7155 }
7156 
7157 /*
7158  * ql_dev_init
7159  *      Initialize/allocate device queue.
7160  *
7161  * Input:
7162  *      ha:             adapter state pointer.
7163  *      d_id:           device destination ID
7164  *      loop_id:        device loop ID
7165  *      ADAPTER_STATE_LOCK must be already obtained.
7166  *
7167  * Returns:
7168  *      NULL = failure
7169  *
7170  * Context:
7171  *      Kernel context.
7172  */
7173 ql_tgt_t *
7174 ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
7175 {
7176         ql_link_t       *link;
7177         uint16_t        index;
7178         ql_tgt_t        *tq;
7179 
7180         QL_PRINT_3(ha, "started, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
7181 
7182         index = ql_alpa_to_index[d_id.b.al_pa];
7183 
7184         /* If device queue exists, set proper loop ID. */
7185         for (link = ha->dev[index].first; link != NULL; link = link->next) {
7186                 tq = link->base_address;
7187                 if (tq->d_id.b24 == d_id.b24) {
7188                         tq->loop_id = loop_id;
7189 
7190                         /* Reset port down retry count. */
7191                         tq->port_down_retry_count = ha->port_down_retry_count;
7192                         tq->qfull_retry_count = ha->qfull_retry_count;
7193 
7194                         break;
7195                 }
7196         }
7197 
7198         /* If device does not have queue. */
7199         if (link == NULL) {
7200                 tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
7201                 if (tq != NULL) {
7202                         /*
7203                          * mutex to protect the device queue,
7204                          * does not block interrupts.
7205                          */
7206                         mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
7207                             ha->intr_pri);
7208 
7209                         tq->d_id.b24 = d_id.b24;
7210                         tq->loop_id = loop_id;
7211                         tq->device.base_address = tq;
7212                         tq->iidma_rate = IIDMA_RATE_INIT;
7213 
7214                         /* Reset port down retry count. */
7215                         tq->port_down_retry_count = ha->port_down_retry_count;
7216                         tq->qfull_retry_count = ha->qfull_retry_count;
7217 
7218                         /* Add device to device queue. */
7219                         ql_add_link_b(&ha->dev[index], &tq->device);
7220                 }
7221         }
7222 
7223         if (tq == NULL) {
7224                 EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
7225         } else {
7226                 /*EMPTY*/
7227                 QL_PRINT_3(ha, "done\n");
7228         }
7229         return (tq);
7230 }
7231 
7232 /*
7233  * ql_dev_free
7234  *      Remove queue from device list and frees resources used by queue.
7235  *
7236  * Input:
7237  *      ha:     adapter state pointer.
7238  *      tq:     target queue pointer.
7239  *      ADAPTER_STATE_LOCK must be already obtained.
7240  *
7241  * Context:
7242  *      Kernel context.
7243  */
7244 void
7245 ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
7246 {
7247         ql_link_t       *link;
7248         uint16_t        index;
7249         ql_lun_t        *lq;
7250 
7251         QL_PRINT_3(ha, "started\n");
7252 
7253         for (link = tq->lun_queues.first; link != NULL; link = link->next) {
7254                 lq = link->base_address;
7255                 if (lq->cmd.first != NULL) {
7256                         EL(ha, "cmd %ph pending in lq=%ph, lun=%xh\n",
7257                             lq->cmd.first, lq, lq->lun_no);
7258                         return;
7259                 }
7260         }
7261 
7262         if (tq->outcnt == 0) {
7263                 /* Get head queue index. */
7264                 index = ql_alpa_to_index[tq->d_id.b.al_pa];
7265                 for (link = ha->dev[index].first; link != NULL;
7266                     link = link->next) {
7267                         if (link->base_address == tq) {
7268                                 ql_remove_link(&ha->dev[index], link);
7269 
7270                                 link = tq->lun_queues.first;
7271                                 while (link != NULL) {
7272                                         lq = link->base_address;
7273                                         link = link->next;
7274 
7275                                         ql_remove_link(&tq->lun_queues,
7276                                             &lq->link);
7277                                         kmem_free(lq, sizeof (ql_lun_t));
7278                                 }
7279 
7280                                 mutex_destroy(&tq->mutex);
7281                                 kmem_free(tq, sizeof (ql_tgt_t));
7282                                 break;
7283                         }
7284                 }
7285         }
7286 
7287         QL_PRINT_3(ha, "done\n");
7288 }
7289 
7290 /*
7291  * ql_lun_queue
7292  *      Allocate LUN queue if does not exists.
7293  *
7294  * Input:
7295  *      ha:     adapter state pointer.
7296  *      tq:             target queue.
7297  *      lun_addr:       LUN number.
7298  *
7299  * Returns:
7300  *      NULL = failure
7301  *
7302  * Context:
7303  *      Kernel context.
7304  */
7305 static ql_lun_t *
7306 ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint64_t lun_addr)
7307 {
7308         ql_lun_t        *lq;
7309         ql_link_t       *link;
7310         uint16_t        lun_no, lun_no_tmp;
7311         fcp_ent_addr_t  *fcp_ent_addr = (fcp_ent_addr_t *)&lun_addr;
7312 
7313         QL_PRINT_3(ha, "started\n");
7314 
7315         /* Fast path. */
7316         if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_addr ==
7317             lun_addr) {
7318                 QL_PRINT_3(ha, "fast done\n");
7319                 return (tq->last_lun_queue);
7320         }
7321 
7322         /* If device queue exists, set proper loop ID. */
7323         for (link = tq->lun_queues.first; link != NULL; link = link->next) {
7324                 lq = link->base_address;
7325                 if (lq->lun_addr == lun_addr) {
7326                         QL_PRINT_3(ha, "found done\n");
7327                         tq->last_lun_queue = lq;
7328                         return (lq);
7329                 }
7330         }
7331 
7332         /* Check the LUN addressing levels. */
7333         if (fcp_ent_addr->ent_addr_1 != 0 || fcp_ent_addr->ent_addr_2 != 0 ||
7334             fcp_ent_addr->ent_addr_3 != 0) {
7335                 EL(ha, "Unsupported LUN Addressing level=0x%llxh", lun_addr);
7336         }
7337 
7338         lun_no_tmp = CHAR_TO_SHORT(lobyte(fcp_ent_addr->ent_addr_0),
7339             hibyte(fcp_ent_addr->ent_addr_0));
7340 
7341         lun_no = lun_no_tmp & ~(QL_LUN_AM_MASK << 8);
7342 
7343         if (lun_no_tmp & (QL_LUN_AM_LUN << 8)) {
7344                 EL(ha, "Unsupported first level LUN Addressing method=%xh, "
7345                     "lun=%d(%xh)\n", lun_no_tmp & (QL_LUN_AM_MASK << 8),
7346                     lun_no, lun_no_tmp);
7347         }
7348 
7349         /* Create and initialize LUN queue. */
7350         lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
7351         if (lq != NULL) {
7352                 lq->link.base_address = lq;
7353                 lq->target_queue = tq;
7354                 lq->lun_addr = lun_addr;
7355                 lq->lun_no = lun_no;
7356 
7357                 DEVICE_QUEUE_LOCK(tq);
7358                 ql_add_link_b(&tq->lun_queues, &lq->link);
7359                 DEVICE_QUEUE_UNLOCK(tq);
7360                 tq->last_lun_queue = lq;
7361         }
7362 
7363         QL_PRINT_3(ha, "done\n");
7364 
7365         return (lq);
7366 }
7367 
7368 /*
7369  * ql_fcp_scsi_cmd
7370  *      Process fibre channel (FCP) SCSI protocol commands.
7371  *
7372  * Input:
7373  *      ha = adapter state pointer.
7374  *      pkt = pointer to fc_packet.
7375  *      sp = srb pointer.
7376  *
7377  * Returns:
7378  *      FC_SUCCESS - the packet was accepted for transport.
7379  *      FC_TRANSPORT_ERROR - a transport error occurred.
7380  *
7381  * Context:
7382  *      Kernel context.
7383  */
7384 static int
7385 ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
7386 {
7387         port_id_t       d_id;
7388         ql_tgt_t        *tq;
7389         uint64_t        *ptr;
7390         uint64_t        fcp_ent_addr = 0;
7391 
7392         QL_PRINT_3(ha, "started\n");
7393 
7394         tq = (ql_tgt_t *)pkt->pkt_fca_device;
7395         if (tq == NULL) {
7396                 d_id.r.rsvd_1 = 0;
7397                 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7398                 tq = ql_d_id_to_queue(ha, d_id);
7399         }
7400 
7401         sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
7402         fcp_ent_addr = *(uint64_t *)(&sp->fcp->fcp_ent_addr);
7403         if (tq != NULL &&
7404             (sp->lun_queue = ql_lun_queue(ha, tq, fcp_ent_addr)) != NULL) {
7405 
7406                 /*
7407                  * zero out FCP response; 24 Bytes
7408                  */
7409                 ptr = (uint64_t *)pkt->pkt_resp;
7410                 *ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
7411 
7412                 /* Handle task management function. */
7413                 if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
7414                     sp->fcp->fcp_cntl.cntl_clr_aca |
7415                     sp->fcp->fcp_cntl.cntl_reset_tgt |
7416                     sp->fcp->fcp_cntl.cntl_reset_lun |
7417                     sp->fcp->fcp_cntl.cntl_clr_tsk |
7418                     sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
7419                         ql_task_mgmt(ha, tq, pkt, sp);
7420                 } else {
7421                         ha->pha->xioctl->IosRequested++;
7422                         ha->pha->xioctl->BytesRequested += (uint32_t)
7423                             sp->fcp->fcp_data_len;
7424 
7425                         /*
7426                          * Setup for commands with data transfer
7427                          */
7428                         sp->iocb = ha->fcp_cmd;
7429                         sp->req_cnt = 1;
7430                         if (sp->fcp->fcp_data_len != 0) {
7431                                 /*
7432                                  * FCP data is bound to pkt_data_dma
7433                                  */
7434                                 if (sp->fcp->fcp_cntl.cntl_write_data) {
7435                                         (void) ddi_dma_sync(pkt->pkt_data_dma,
7436                                             0, 0, DDI_DMA_SYNC_FORDEV);
7437                                 }
7438 
7439                                 /* Setup IOCB count. */
7440                                 if (pkt->pkt_data_cookie_cnt > ha->cmd_segs &&
7441                                     (!CFG_IST(ha, CFG_CTRL_82XX) ||
7442                                     sp->sg_dma.dma_handle == NULL)) {
7443                                         uint32_t        cnt;
7444 
7445                                         cnt = pkt->pkt_data_cookie_cnt -
7446                                             ha->cmd_segs;
7447                                         sp->req_cnt = (uint16_t)
7448                                             (cnt / ha->cmd_cont_segs);
7449                                         if (cnt % ha->cmd_cont_segs) {
7450                                                 sp->req_cnt = (uint16_t)
7451                                                     (sp->req_cnt + 2);
7452                                         } else {
7453                                                 sp->req_cnt++;
7454                                         }
7455                                 }
7456                         }
7457                         QL_PRINT_3(ha, "done\n");
7458 
7459                         return (ql_start_cmd(ha, tq, pkt, sp));
7460                 }
7461         } else {
7462                 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7463                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7464 
7465                 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7466                         ql_io_comp(sp);
7467                 }
7468         }
7469 
7470         QL_PRINT_3(ha, "done\n");
7471 
7472         return (FC_SUCCESS);
7473 }
7474 
7475 /*
7476  * ql_task_mgmt
7477  *      Task management function processor.
7478  *
7479  * Input:
7480  *      ha:     adapter state pointer.
7481  *      tq:     target queue pointer.
7482  *      pkt:    pointer to fc_packet.
7483  *      sp:     SRB pointer.
7484  *
7485  * Context:
7486  *      Kernel context.
7487  */
7488 static void
7489 ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7490     ql_srb_t *sp)
7491 {
7492         fcp_rsp_t               *fcpr;
7493         struct fcp_rsp_info     *rsp;
7494         ql_lun_t                *lq = sp->lun_queue;
7495 
7496         QL_PRINT_3(ha, "started\n");
7497 
7498         fcpr = (fcp_rsp_t *)pkt->pkt_resp;
7499         rsp = (struct fcp_rsp_info *)(pkt->pkt_resp + sizeof (fcp_rsp_t));
7500 
7501         bzero(fcpr, pkt->pkt_rsplen);
7502 
7503         fcpr->fcp_u.fcp_status.rsp_len_set = 1;
7504         fcpr->fcp_response_len = 8;
7505 
7506         if (sp->fcp->fcp_cntl.cntl_clr_aca) {
7507                 if (ql_clear_aca(ha, tq, lq) != QL_SUCCESS) {
7508                         rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7509                 }
7510         } else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
7511                 if (ql_lun_reset(ha, tq, lq) != QL_SUCCESS) {
7512                         rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7513                 }
7514         } else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
7515                 if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
7516                     QL_SUCCESS) {
7517                         rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7518                 }
7519         } else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
7520                 if (ql_clear_task_set(ha, tq, lq) != QL_SUCCESS) {
7521                         rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7522                 }
7523         } else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
7524                 if (ql_abort_task_set(ha, tq, lq) != QL_SUCCESS) {
7525                         rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7526                 }
7527         } else {
7528                 rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
7529         }
7530 
7531         pkt->pkt_state = FC_PKT_SUCCESS;
7532 
7533         /* Do command callback. */
7534         if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7535                 ql_io_comp(sp);
7536         }
7537 
7538         QL_PRINT_3(ha, "done\n");
7539 }
7540 
7541 /*
7542  * ql_fcp_ip_cmd
7543  *      Process fibre channel (FCP) Internet (IP) protocols commands.
7544  *
7545  * Input:
7546  *      ha:     adapter state pointer.
7547  *      pkt:    pointer to fc_packet.
7548  *      sp:     SRB pointer.
7549  *
7550  * Returns:
7551  *      FC_SUCCESS - the packet was accepted for transport.
7552  *      FC_TRANSPORT_ERROR - a transport error occurred.
7553  *
7554  * Context:
7555  *      Kernel context.
7556  */
7557 static int
7558 ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
7559 {
7560         port_id_t       d_id;
7561         ql_tgt_t        *tq;
7562 
7563         QL_PRINT_3(ha, "started\n");
7564 
7565         tq = (ql_tgt_t *)pkt->pkt_fca_device;
7566         if (tq == NULL) {
7567                 d_id.r.rsvd_1 = 0;
7568                 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7569                 tq = ql_d_id_to_queue(ha, d_id);
7570         }
7571 
7572         if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
7573                 /*
7574                  * IP data is bound to pkt_cmd_dma
7575                  */
7576                 (void) ddi_dma_sync(pkt->pkt_cmd_dma,
7577                     0, 0, DDI_DMA_SYNC_FORDEV);
7578 
7579                 /* Setup IOCB count. */
7580                 sp->iocb = ha->ip_cmd;
7581                 if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
7582                         uint32_t        cnt;
7583 
7584                         cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
7585                         sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
7586                         if (cnt % ha->cmd_cont_segs) {
7587                                 sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7588                         } else {
7589                                 sp->req_cnt++;
7590                         }
7591                 } else {
7592                         sp->req_cnt = 1;
7593                 }
7594                 QL_PRINT_3(ha, "done\n");
7595 
7596                 return (ql_start_cmd(ha, tq, pkt, sp));
7597         } else {
7598                 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7599                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7600 
7601                 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7602                         ql_io_comp(sp);
7603         }
7604 
7605         QL_PRINT_3(ha, "done\n");
7606 
7607         return (FC_SUCCESS);
7608 }
7609 
7610 /*
7611  * ql_fc_services
7612  *      Process fibre channel services (name server).
7613  *
7614  * Input:
7615  *      ha:     adapter state pointer.
7616  *      pkt:    pointer to fc_packet.
7617  *
7618  * Returns:
7619  *      FC_SUCCESS - the packet was accepted for transport.
7620  *      FC_TRANSPORT_ERROR - a transport error occurred.
7621  *
7622  * Context:
7623  *      Kernel context.
7624  */
7625 static int
7626 ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
7627 {
7628         uint32_t        cnt;
7629         fc_ct_header_t  hdr;
7630         la_els_rjt_t    rjt;
7631         port_id_t       d_id;
7632         ql_tgt_t        *tq;
7633         ql_srb_t        *sp;
7634         int             rval;
7635 
7636         QL_PRINT_3(ha, "started\n");
7637 
7638         ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
7639             (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
7640 
7641         bzero(&rjt, sizeof (rjt));
7642 
7643         /* Do some sanity checks */
7644         cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
7645             sizeof (fc_ct_header_t));
7646         if (cnt > (uint32_t)pkt->pkt_rsplen) {
7647                 EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
7648                     pkt->pkt_rsplen);
7649                 return (FC_ELS_MALFORMED);
7650         }
7651 
7652         switch (hdr.ct_fcstype) {
7653         case FCSTYPE_DIRECTORY:
7654         case FCSTYPE_MGMTSERVICE:
7655 
7656                 /* An FCA must make sure that the header is in big endian */
7657                 ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
7658 
7659                 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7660                 tq = ql_d_id_to_queue(ha, d_id);
7661                 sp = (ql_srb_t *)pkt->pkt_fca_private;
7662 
7663                 if (tq == NULL ||
7664                     (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
7665                         pkt->pkt_state = FC_PKT_LOCAL_RJT;
7666                         pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7667                         rval = QL_SUCCESS;
7668                         break;
7669                 }
7670 
7671                 if (tq->flags & TQF_LOGIN_NEEDED) {
7672                         DEVICE_QUEUE_LOCK(tq);
7673                         tq->flags &= ~TQF_LOGIN_NEEDED;
7674                         DEVICE_QUEUE_UNLOCK(tq);
7675                         (void) ql_login_fport(ha, tq, tq->loop_id, LFF_NONE,
7676                             NULL);
7677                 }
7678                 /*
7679                  * Services data is bound to pkt_cmd_dma
7680                  */
7681                 (void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
7682                     DDI_DMA_SYNC_FORDEV);
7683 
7684                 sp->flags |= SRB_MS_PKT;
7685                 sp->retry_count = 32;
7686 
7687                 /* Setup IOCB count. */
7688                 sp->iocb = ha->ms_cmd;
7689                 if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
7690                         cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
7691                         sp->req_cnt =
7692                             (uint16_t)(cnt / ha->cmd_cont_segs);
7693                         if (cnt % ha->cmd_cont_segs) {
7694                                 sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7695                         } else {
7696                                 sp->req_cnt++;
7697                         }
7698                 } else {
7699                         sp->req_cnt = 1;
7700                 }
7701                 rval = ql_start_cmd(ha, tq, pkt, sp);
7702 
7703                 QL_PRINT_3(ha, "done, ql_start_cmd=%xh\n", rval);
7704 
7705                 return (rval);
7706 
7707         default:
7708                 EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7709                 rval = QL_FUNCTION_PARAMETER_ERROR;
7710                 break;
7711         }
7712 
7713         if (rval != QL_SUCCESS) {
7714                 /* Build RJT. */
7715                 rjt.ls_code.ls_code = LA_ELS_RJT;
7716                 rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7717 
7718                 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7719                     (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7720 
7721                 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7722                 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7723                 EL(ha, "LA_ELS_RJT, FC_REASON_UNSUPPORTED\n");
7724         }
7725 
7726         /* Do command callback. */
7727         if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7728                 ql_io_comp((ql_srb_t *)pkt->pkt_fca_private);
7729         }
7730 
7731         QL_PRINT_3(ha, "done\n");
7732 
7733         return (FC_SUCCESS);
7734 }
7735 
7736 /*
7737  * ql_cthdr_endian
7738  *      Change endianess of ct passthrough header and payload.
7739  *
7740  * Input:
7741  *      acc_handle:     DMA buffer access handle.
7742  *      ct_hdr:         Pointer to header.
7743  *      restore:        Restore first flag.
7744  *
7745  * Context:
7746  *      Interrupt or Kernel context, no mailbox commands allowed.
7747  */
7748 void
7749 ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7750     boolean_t restore)
7751 {
 
7821  * ql_start_cmd
7822  *      Finishes starting fibre channel protocol (FCP) command.
7823  *
7824  * Input:
7825  *      ha:     adapter state pointer.
7826  *      tq:     target queue pointer.
7827  *      pkt:    pointer to fc_packet.
7828  *      sp:     SRB pointer.
7829  *
7830  * Context:
7831  *      Kernel context.
7832  */
7833 static int
7834 ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7835     ql_srb_t *sp)
7836 {
7837         int             rval = FC_SUCCESS;
7838         time_t          poll_wait = 0;
7839         ql_lun_t        *lq = sp->lun_queue;
7840 
7841         QL_PRINT_3(ha, "started\n");
7842 
7843         sp->handle = 0;
7844 
7845         /* Set poll for finish. */
7846         if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7847                 sp->flags |= SRB_POLL;
7848                 if (pkt->pkt_timeout == 0) {
7849                         pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7850                 }
7851         }
7852 
7853         /* Acquire device queue lock. */
7854         DEVICE_QUEUE_LOCK(tq);
7855 
7856         /*
7857          * If we need authentication, report device busy to
7858          * upper layers to retry later
7859          */
7860         if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7861                 DEVICE_QUEUE_UNLOCK(tq);
 
7877                 sp->isp_timeout = 0;
7878         }
7879 
7880         /* If a polling command setup wait time. */
7881         if (sp->flags & SRB_POLL) {
7882                 if (sp->flags & SRB_WATCHDOG_ENABLED) {
7883                         poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7884                 } else {
7885                         poll_wait = pkt->pkt_timeout;
7886                 }
7887         }
7888 
7889         if (ha->pha->flags & ABORT_CMDS_LOOP_DOWN_TMO &&
7890             (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7891                 /* Set ending status. */
7892                 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7893 
7894                 /* Call done routine to handle completions. */
7895                 sp->cmd.next = NULL;
7896                 DEVICE_QUEUE_UNLOCK(tq);
7897                 ql_done(&sp->cmd, B_FALSE);
7898         } else {
7899                 if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7900                         int do_lip = 0;
7901 
7902                         DEVICE_QUEUE_UNLOCK(tq);
7903 
7904                         ADAPTER_STATE_LOCK(ha);
7905                         if ((do_lip = ha->pha->lip_on_panic) == 0) {
7906                                 ha->pha->lip_on_panic++;
7907                         }
7908                         ADAPTER_STATE_UNLOCK(ha);
7909 
7910                         if (!do_lip) {
7911 
7912                                 /*
7913                                  * That Qlogic F/W performs PLOGI, PRLI, etc
7914                                  * is helpful here. If a PLOGI fails for some
7915                                  * reason, you would get CS_PORT_LOGGED_OUT
7916                                  * or some such error; and we should get a
7917                                  * careful polled mode login kicked off inside
 
7925                         }
7926 
7927                         ql_start_iocb(ha, sp);
7928                 } else {
7929                         /* Add the command to the device queue */
7930                         if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7931                                 ql_add_link_t(&lq->cmd, &sp->cmd);
7932                         } else {
7933                                 ql_add_link_b(&lq->cmd, &sp->cmd);
7934                         }
7935 
7936                         sp->flags |= SRB_IN_DEVICE_QUEUE;
7937 
7938                         /* Check whether next message can be processed */
7939                         ql_next(ha, lq);
7940                 }
7941         }
7942 
7943         /* If polling, wait for finish. */
7944         if (poll_wait) {
7945                 if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS &&
7946                     pkt->pkt_state == FC_PKT_SUCCESS) {
7947                         pkt->pkt_state = FC_PKT_TIMEOUT;
7948                         pkt->pkt_reason = FC_REASON_HW_ERROR;
7949                 }
7950 
7951                 if (pkt->pkt_state != FC_PKT_SUCCESS) {
7952                         EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7953                         rval = FC_TRANSPORT_ERROR;
7954                 }
7955 
7956                 if (ddi_in_panic()) {
7957                         if (pkt->pkt_state != FC_PKT_SUCCESS) {
7958                                 port_id_t d_id;
7959 
7960                                 /*
7961                                  * successful LOGIN implies by design
7962                                  * that PRLI also succeeded for disks
7963                                  * Note also that there is no special
7964                                  * mailbox command to send PRLI.
7965                                  */
7966                                 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7967                                 (void) ql_login_port(ha, d_id);
7968                         }
7969                 }
7970 
7971                 (void) qlc_fm_check_pkt_dma_handle(ha, sp);
7972                 /*
7973                  * This should only happen during CPR dumping
7974                  */
7975                 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7976                     pkt->pkt_comp) {
7977                         sp->flags &= ~SRB_POLL;
7978                         (*pkt->pkt_comp)(pkt);
7979                 }
7980         }
7981 
7982         QL_PRINT_3(ha, "done\n");
7983 
7984         return (rval);
7985 }
7986 
7987 /*
7988  * ql_poll_cmd
7989  *      Polls commands for completion.
7990  *
7991  * Input:
7992  *      ha = adapter state pointer.
7993  *      sp = SRB command pointer.
7994  *      poll_wait = poll wait time in seconds.
7995  *
7996  * Returns:
7997  *      QL local function return status code.
7998  *
7999  * Context:
8000  *      Kernel context.
8001  */
8002 static int
8003 ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
8004 {
8005         uint32_t                index;
8006         int                     rval = QL_SUCCESS;
8007         time_t                  msecs_left = poll_wait * 100;   /* 10ms inc */
8008         ql_adapter_state_t      *ha = vha->pha;
8009 
8010         QL_PRINT_3(ha, "started\n");
8011 
8012         while (sp->flags & SRB_POLL) {
8013 
8014                 if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
8015                     ha->idle_timer >= 15 || ddi_in_panic() ||
8016                     curthread->t_flag & T_INTR_THREAD) {
8017 
8018                         /* If waiting for restart, do it now. */
8019                         if (ha->port_retry_timer != 0) {
8020                                 ADAPTER_STATE_LOCK(ha);
8021                                 ha->port_retry_timer = 0;
8022                                 ADAPTER_STATE_UNLOCK(ha);
8023 
8024                                 TASK_DAEMON_LOCK(ha);
8025                                 ha->task_daemon_flags |= PORT_RETRY_NEEDED;
8026                                 TASK_DAEMON_UNLOCK(ha);
8027                         }
8028 
8029                         ADAPTER_STATE_LOCK(ha);
8030                         ha->flags |= POLL_INTR;
8031                         ADAPTER_STATE_UNLOCK(ha);
8032 
8033                         if (INTERRUPT_PENDING(ha)) {
8034                                 (void) ql_isr_aif((caddr_t)ha, 0);
8035                                 INTR_LOCK(ha);
8036                                 ha->intr_claimed = TRUE;
8037                                 INTR_UNLOCK(ha);
8038                         }
8039                         if (ha->flags & NO_INTR_HANDSHAKE) {
8040                                 for (index = 0; index < ha->rsp_queues_cnt;
8041                                     index++) {
8042                                         (void) ql_isr_aif((caddr_t)ha,
8043                                             (caddr_t)((uintptr_t)(index + 1)));
8044                                 }
8045                         }
8046 
8047                         ADAPTER_STATE_LOCK(ha);
8048                         ha->flags &= ~POLL_INTR;
8049                         ADAPTER_STATE_UNLOCK(ha);
8050 
8051                         /*
8052                          * Call task thread function in case the
8053                          * daemon is not running.
8054                          */
8055                         TASK_DAEMON_LOCK(ha);
8056 
8057                         if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
8058                             QL_TASK_PENDING(ha)) {
8059                                 ql_task_thread(ha);
8060                         }
8061 
8062                         TASK_DAEMON_UNLOCK(ha);
8063                 }
8064 
8065                 if (msecs_left == 0) {
8066                         if (rval == QL_SUCCESS) {
8067                                 EL(ha, "timeout\n");
8068                                 rval = QL_FUNCTION_TIMEOUT;
8069                                 if (ql_abort_io(ha, sp) == QL_SUCCESS) {
8070                                         sp->pkt->pkt_reason = CS_ABORTED;
8071                                         sp->cmd.next = NULL;
8072                                         ql_done(&sp->cmd, B_FALSE);
8073                                         break;
8074                                 }
8075                                 sp->flags |= SRB_COMMAND_TIMEOUT;
8076                                 EL(ha, "abort failed, isp_abort_needed\n");
8077                                 ql_awaken_task_daemon(ha, NULL,
8078                                     ISP_ABORT_NEEDED, 0);
8079                                 msecs_left = 30 * 100;
8080                         } else {
8081                                 break;
8082                         }
8083                 }
8084 
8085                 /*
8086                  * Polling interval is 10 milli seconds; Increasing
8087                  * the polling interval to seconds since disk IO
8088                  * timeout values are ~60 seconds is tempting enough,
8089                  * but CPR dump time increases, and so will the crash
8090                  * dump time; Don't toy with the settings without due
8091                  * consideration for all the scenarios that will be
8092                  * impacted.
8093                  */
8094                 ql_delay(ha, 10000);
8095                 msecs_left -= 10;
8096         }
8097 
8098         QL_PRINT_3(ha, "done\n");
8099 
8100         return (rval);
8101 }
8102 
8103 /*
8104  * ql_next
8105  *      Retrieve and process next job in the device queue.
8106  *
8107  * Input:
8108  *      ha:     adapter state pointer.
8109  *      lq:     LUN queue pointer.
8110  *      DEVICE_QUEUE_LOCK must be already obtained.
8111  *
8112  * Output:
8113  *      Releases DEVICE_QUEUE_LOCK upon exit.
8114  *
8115  * Context:
8116  *      Interrupt or Kernel context, no mailbox commands allowed.
8117  */
8118 void
8119 ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
8120 {
8121         ql_srb_t                *sp;
8122         ql_link_t               *link;
8123         ql_tgt_t                *tq = lq->target_queue;
8124         ql_adapter_state_t      *ha = vha->pha;
8125 
8126         QL_PRINT_3(ha, "started\n");
8127 
8128         if (ddi_in_panic()) {
8129                 DEVICE_QUEUE_UNLOCK(tq);
8130                 QL_PRINT_3(ha, "panic/active exit\n");
8131                 return;
8132         }
8133 
8134         while ((link = lq->cmd.first) != NULL) {
8135                 sp = link->base_address;
8136 
8137                 /* Exit if can not start commands. */
8138                 if (DRIVER_SUSPENDED(ha) ||
8139                     (ha->flags & ONLINE) == 0 ||
8140                     !VALID_DEVICE_ID(ha, tq->loop_id) ||
8141                     tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
8142                     TQF_QUEUE_SUSPENDED)) {
8143                         EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
8144                             "haf=%xh, loop_id=%xh sp=%ph\n", tq->d_id.b24,
8145                             ha->task_daemon_flags, tq->flags, sp->flags,
8146                             ha->flags, tq->loop_id, sp);
8147                         break;
8148                 }
8149 
8150                 /*
8151                  * Find out the LUN number for untagged command use.
8152                  * If there is an untagged command pending for the LUN,
8153                  * we would not submit another untagged command
8154                  * or if reached LUN execution throttle.
8155                  */
8156                 if (sp->flags & SRB_FCP_CMD_PKT) {
8157                         if (lq->flags & LQF_UNTAGGED_PENDING ||
8158                             lq->lun_outcnt >= ha->execution_throttle) {
8159                                 QL_PRINT_8(ha, "break, d_id=%xh, "
8160                                     "lf=%xh, lun_outcnt=%xh\n",
8161                                     tq->d_id.b24, lq->flags, lq->lun_outcnt);
8162                                 break;
8163                         }
8164                         if (sp->fcp->fcp_cntl.cntl_qtype ==
8165                             FCP_QTYPE_UNTAGGED) {
8166                                 /*
8167                                  * Set the untagged-flag for the LUN
8168                                  * so that no more untagged commands
8169                                  * can be submitted for this LUN.
8170                                  */
8171                                 lq->flags |= LQF_UNTAGGED_PENDING;
8172                         }
8173 
8174                         /* Count command as sent. */
8175                         lq->lun_outcnt++;
8176                 }
8177 
8178                 /* Remove srb from device queue. */
8179                 ql_remove_link(&lq->cmd, &sp->cmd);
8180                 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8181 
8182                 tq->outcnt++;
8183 
8184                 ql_start_iocb(vha, sp);
8185         }
8186 
8187         /* Release device queue lock. */
8188         DEVICE_QUEUE_UNLOCK(tq);
8189 
8190         QL_PRINT_3(ha, "done\n");
8191 }
8192 
8193 /*
8194  * ql_done
8195  *      Process completed commands.
8196  *
8197  * Input:
8198  *      link:   first command link in chain.
8199  *      cmplt:  do command complete call back.
8200  *
8201  * Context:
8202  *      Interrupt or Kernel context, no mailbox commands allowed.
8203  */
8204 void
8205 ql_done(ql_link_t *link, boolean_t cmplt)
8206 {
8207         ql_adapter_state_t      *ha;
8208         ql_link_t               *next_link;
8209         ql_srb_t                *sp;
8210         ql_tgt_t                *tq;
8211         ql_lun_t                *lq;
8212         uint64_t                set_flags;
8213 
8214         QL_PRINT_3(NULL, "started\n");
8215 
8216         for (; link != NULL; link = next_link) {
8217                 next_link = link->next;
8218                 sp = link->base_address;
8219                 link->prev = link->next = NULL;
8220                 link->head = NULL;
8221                 ha = sp->ha;
8222                 set_flags = 0;
8223 
8224                 if (sp->flags & SRB_UB_CALLBACK) {
8225                         QL_UB_LOCK(ha);
8226                         if (sp->flags & SRB_UB_IN_ISP) {
8227                                 if (ha->ub_outcnt != 0) {
8228                                         ha->ub_outcnt--;
8229                                 }
8230                                 if (ha->flags & IP_ENABLED) {
8231                                         set_flags |= NEED_UNSOLICITED_BUFFERS;
8232                                 }
8233                         }
8234                         QL_UB_UNLOCK(ha);
8235                         ql_awaken_task_daemon(ha, sp, set_flags, 0);
8236                 } else {
8237                         /* Free outstanding command slot. */
8238                         INTR_LOCK(ha);
8239                         if (sp->handle != 0) {
8240                                 EL(ha, "free sp=%ph, sp->hdl=%xh\n",
8241                                     (void *)sp, sp->handle);
8242                                 ha->pha->outstanding_cmds[
8243                                     sp->handle & OSC_INDEX_MASK] = NULL;
8244                                 sp->handle = 0;
8245                                 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
8246                         }
8247                         INTR_UNLOCK(ha);
8248 
8249                         /* Acquire device queue lock. */
8250                         lq = sp->lun_queue;
8251                         tq = lq->target_queue;
8252                         DEVICE_QUEUE_LOCK(tq);
8253 
8254                         /* Decrement outstanding commands on device. */
8255                         if (tq->outcnt != 0) {
8256                                 tq->outcnt--;
8257                         }
8258 
8259                         if (sp->flags & SRB_FCP_CMD_PKT) {
8260                                 if (sp->fcp->fcp_cntl.cntl_qtype ==
8261                                     FCP_QTYPE_UNTAGGED) {
8262                                         /*
8263                                          * Clear the flag for this LUN so that
8264                                          * untagged commands can be submitted
8265                                          * for it.
8266                                          */
8267                                         lq->flags &= ~LQF_UNTAGGED_PENDING;
 
8290                                 EL(ha, "fast abort modify change\n");
8291                                 sp->flags &= ~(SRB_RETRY);
8292                                 sp->pkt->pkt_reason = CS_TIMEOUT;
8293                         }
8294 
8295                         /* Place request back on top of target command queue */
8296                         if ((sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
8297                             !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
8298                             sp->flags & SRB_RETRY &&
8299                             (sp->flags & SRB_WATCHDOG_ENABLED &&
8300                             sp->wdg_q_time > 1)) {
8301                                 sp->flags &= ~(SRB_ISP_STARTED |
8302                                     SRB_ISP_COMPLETED | SRB_RETRY);
8303 
8304                                 /* Reset watchdog timer */
8305                                 sp->wdg_q_time = sp->init_wdg_q_time;
8306 
8307                                 /* Issue marker command on reset status. */
8308                                 if (!(ha->task_daemon_flags & LOOP_DOWN) &&
8309                                     (sp->pkt->pkt_reason == CS_RESET ||
8310                                     (CFG_IST(ha, CFG_ISP_FW_TYPE_2) &&
8311                                     sp->pkt->pkt_reason == CS_ABORTED))) {
8312                                         (void) ql_marker(ha, tq->loop_id, 0,
8313                                             MK_SYNC_ID);
8314                                 }
8315 
8316                                 ql_add_link_t(&lq->cmd, &sp->cmd);
8317                                 sp->flags |= SRB_IN_DEVICE_QUEUE;
8318                                 ql_next(ha, lq);
8319                         } else {
8320                                 /* Remove command from watchdog queue. */
8321                                 if (sp->flags & SRB_WATCHDOG_ENABLED) {
8322                                         ql_remove_link(&tq->wdg, &sp->wdg);
8323                                         sp->flags &= ~SRB_WATCHDOG_ENABLED;
8324                                 }
8325 
8326                                 if (lq->cmd.first != NULL) {
8327                                         ql_next(ha, lq);
8328                                 } else {
8329                                         /* Release LU queue specific lock. */
8330                                         DEVICE_QUEUE_UNLOCK(tq);
 
8332                                             NULL) {
8333                                                 ql_start_iocb(ha, NULL);
8334                                         }
8335                                 }
8336 
8337                                 /* Sync buffers if required.  */
8338                                 if (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT)) {
8339                                         (void) ddi_dma_sync(
8340                                             sp->pkt->pkt_resp_dma,
8341                                             0, 0, DDI_DMA_SYNC_FORCPU);
8342                                 }
8343 
8344                                 /* Map ISP completion codes. */
8345                                 sp->pkt->pkt_expln = FC_EXPLN_NONE;
8346                                 sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
8347                                 switch (sp->pkt->pkt_reason) {
8348                                 case CS_COMPLETE:
8349                                         sp->pkt->pkt_state = FC_PKT_SUCCESS;
8350                                         break;
8351                                 case CS_RESET:
8352                                         sp->pkt->pkt_state =
8353                                             FC_PKT_PORT_OFFLINE;
8354                                         sp->pkt->pkt_reason =
8355                                             FC_REASON_ABORTED;
8356                                         break;
8357                                 case CS_RESOUCE_UNAVAILABLE:
8358                                         sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
8359                                         sp->pkt->pkt_reason =
8360                                             FC_REASON_PKT_BUSY;
8361                                         break;
8362 
8363                                 case CS_TIMEOUT:
8364                                         sp->pkt->pkt_state = FC_PKT_TIMEOUT;
8365                                         sp->pkt->pkt_reason =
8366                                             FC_REASON_HW_ERROR;
8367                                         break;
8368                                 case CS_DATA_OVERRUN:
8369                                         sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
8370                                         sp->pkt->pkt_reason =
8371                                             FC_REASON_OVERRUN;
 
8390                                         break;
8391 
8392                                 case CS_ABORTED:
8393                                         DEVICE_QUEUE_LOCK(tq);
8394                                         if (tq->flags & (TQF_RSCN_RCVD |
8395                                             TQF_NEED_AUTHENTICATION)) {
8396                                                 sp->pkt->pkt_state =
8397                                                     FC_PKT_PORT_OFFLINE;
8398                                                 sp->pkt->pkt_reason =
8399                                                     FC_REASON_LOGIN_REQUIRED;
8400                                         } else {
8401                                                 sp->pkt->pkt_state =
8402                                                     FC_PKT_LOCAL_RJT;
8403                                                 sp->pkt->pkt_reason =
8404                                                     FC_REASON_ABORTED;
8405                                         }
8406                                         DEVICE_QUEUE_UNLOCK(tq);
8407                                         break;
8408 
8409                                 case CS_TRANSPORT:
8410                                 case CS_DEV_NOT_READY:
8411                                         sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
8412                                         sp->pkt->pkt_reason =
8413                                             FC_PKT_TRAN_ERROR;
8414                                         break;
8415 
8416                                 case CS_DATA_UNDERRUN:
8417                                         sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
8418                                         sp->pkt->pkt_reason =
8419                                             FC_REASON_UNDERRUN;
8420                                         break;
8421                                 case CS_DMA_ERROR:
8422                                 case CS_BAD_PAYLOAD:
8423                                 case CS_UNKNOWN:
8424                                 case CS_CMD_FAILED:
8425                                 default:
8426                                         sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
8427                                         sp->pkt->pkt_reason =
8428                                             FC_REASON_HW_ERROR;
8429                                         break;
8430                                 }
8431 
8432                                 (void) qlc_fm_check_pkt_dma_handle(ha, sp);
8433 
8434                                 /* Now call the pkt completion callback */
8435                                 if (sp->flags & SRB_POLL) {
8436                                         sp->flags &= ~SRB_POLL;
8437                                 } else if (cmplt == B_TRUE &&
8438                                     sp->pkt->pkt_comp) {
8439                                         (sp->pkt->pkt_comp)(sp->pkt);
8440                                 } else {
8441                                         ql_io_comp(sp);
8442                                 }
8443                         }
8444                 }
8445         }
8446 
8447         QL_PRINT_3(ha, "done\n");
8448 }
8449 
8450 /*
8451  * ql_awaken_task_daemon
8452  *      Adds command completion callback to callback queue and/or
8453  *      awakens task daemon thread.
8454  *
8455  * Input:
8456  *      ha:             adapter state pointer.
8457  *      sp:             srb pointer.
8458  *      set_flags:      task daemon flags to set.
8459  *      reset_flags:    task daemon flags to reset.
8460  *
8461  * Context:
8462  *      Interrupt or Kernel context, no mailbox commands allowed.
8463  */
8464 void
8465 ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
8466     uint64_t set_flags, uint64_t reset_flags)
8467 {
8468         ql_adapter_state_t      *ha = vha->pha;
8469 
8470         QL_PRINT_3(ha, "started, sp=%p set_flags=%llx reset_flags=%llx\n",
8471             sp, set_flags, reset_flags);
8472 
8473         /* Acquire task daemon lock. */
8474         TASK_DAEMON_LOCK(ha);
8475 
8476         if (set_flags) {
8477                 ha->task_daemon_flags |= set_flags;
8478         }
8479         if (reset_flags) {
8480                 ha->task_daemon_flags &= ~reset_flags;
8481         }
8482 
8483         if (!(ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG)) {
8484                 EL(ha, "done, not alive dtf=%xh\n", ha->task_daemon_flags);
8485                 TASK_DAEMON_UNLOCK(ha);
8486                 return;
8487         }
8488 
8489         if (sp != NULL) {
8490                 if (sp->flags & SRB_UB_CALLBACK) {
8491                         ql_add_link_b(&ha->unsol_callback_queue, &sp->cmd);
8492                 } else {
8493                         EL(ha, "sp=%p, spf=%xh is not SRB_UB_CALLBACK",
8494                             sp->flags);
8495                 }
8496         }
8497 
8498         if (!ha->driver_thread_awake) {
8499                 QL_PRINT_3(ha, "driver_thread_awake\n");
8500                 cv_broadcast(&ha->cv_task_daemon);
8501         }
8502 
8503         TASK_DAEMON_UNLOCK(ha);
8504 
8505         QL_PRINT_3(ha, "done\n");
8506 }
8507 
8508 /*
8509  * ql_task_daemon
8510  *      Thread that is awaken by the driver when a
8511  *      background needs to be done.
8512  *
8513  * Input:
8514  *      arg = adapter state pointer.
8515  *
8516  * Context:
8517  *      Kernel context.
8518  */
8519 static void
8520 ql_task_daemon(void *arg)
8521 {
8522         ql_adapter_state_t      *ha = (void *)arg;
8523 
8524         QL_PRINT_3(ha, "started\n");
8525 
8526         /* Acquire task daemon lock. */
8527         TASK_DAEMON_LOCK(ha);
8528 
8529         while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
8530                 ql_task_thread(ha);
8531 
8532                 /*
8533                  * Before we wait on the conditional variable, we
8534                  * need to check if STOP_FLG is set for us to terminate
8535                  */
8536                 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
8537                         break;
8538                 }
8539 
8540                 QL_PRINT_3(ha, "Going to sleep\n");
8541                 ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
8542 
8543                 /* If killed, stop task daemon */
8544                 if (cv_wait_sig(&ha->cv_task_daemon,
8545                     &ha->task_daemon_mutex) == 0) {
8546                         QL_PRINT_10(ha, "killed\n");
8547                         break;
8548                 }
8549 
8550                 QL_PRINT_3(ha, "Awakened\n");
8551                 ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
8552         }
8553 
8554         ha->task_daemon_flags &= ~(TASK_DAEMON_SLEEPING_FLG |
8555             TASK_DAEMON_ALIVE_FLG);
8556 
8557         TASK_DAEMON_UNLOCK(ha);
8558 
8559         QL_PRINT_3(ha, "done\n");
8560 }
8561 
8562 /*
8563  * ql_task_thread
8564  *      Thread run by daemon.
8565  *
8566  * Input:
8567  *      ha = adapter state pointer.
8568  *      TASK_DAEMON_LOCK must be acquired prior to call.
8569  *
8570  * Context:
8571  *      Kernel context.
8572  */
8573 static void
8574 ql_task_thread(ql_adapter_state_t *ha)
8575 {
8576         boolean_t               loop_again;
8577         ql_srb_t                *sp;
8578         ql_link_t               *link;
8579         caddr_t                 msg;
8580         ql_adapter_state_t      *vha;
8581 
8582         ha->driver_thread_awake++;
8583         do {
8584                 loop_again = B_FALSE;
8585 
8586                 if (ha->sf != ha->flags ||
8587                     (ha->task_daemon_flags & ~DTF_EL_MSG_SKIP_FLGS) != ha->df ||
8588                     ha->cf != ha->cfg_flags) {
8589                         ha->sf = ha->flags;
8590                         ha->df = ha->task_daemon_flags & ~DTF_EL_MSG_SKIP_FLGS;
8591                         ha->cf = ha->cfg_flags;
8592                         EL(ha, "df=%xh, sf=%xh, cf=%xh\n",
8593                             ha->df, ha->sf, ha->cf);
8594                 }
8595 
8596                 QL_PM_LOCK(ha);
8597                 if (ha->power_level != PM_LEVEL_D0) {
8598                         QL_PM_UNLOCK(ha);
8599                         ha->task_daemon_flags |= DRIVER_STALL |
8600                             TASK_DAEMON_STALLED_FLG;
8601                         break;
8602                 }
8603                 QL_PM_UNLOCK(ha);
8604 
8605                 if (ha->flags & ADAPTER_SUSPENDED) {
8606                         ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8607                         break;
8608                 }
8609 
8610                 /* Handle FW IDC events. */
8611                 while (ha->flags & (IDC_STALL_NEEDED | IDC_RESTART_NEEDED |
8612                     IDC_ACK_NEEDED)) {
8613                         TASK_DAEMON_UNLOCK(ha);
8614                         ql_idc(ha);
8615                         TASK_DAEMON_LOCK(ha);
8616                         loop_again = B_TRUE;
8617                 }
8618 
8619                 if (ha->task_daemon_flags &
8620                     (TASK_DAEMON_STOP_FLG | DRIVER_STALL) ||
8621                     !(ha->flags & ONLINE)) {
8622                         ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8623                         break;
8624                 }
8625                 ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
8626 
8627                 /* Store error log. */
8628                 if (ha->errlog[0] != 0 &&
8629                     !(ha->task_daemon_flags & ISP_ABORT_NEEDED)) {
8630                         TASK_DAEMON_UNLOCK(ha);
8631                         (void) ql_flash_errlog(ha, ha->errlog[0],
8632                             ha->errlog[1], ha->errlog[2], ha->errlog[3]);
8633                         ha->errlog[0] = 0;
8634                         TASK_DAEMON_LOCK(ha);
8635                         loop_again = B_TRUE;
8636                 }
8637 
8638                 /* Idle Check. */
8639                 if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
8640                         ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
8641                         if (!DRIVER_SUSPENDED(ha)) {
8642                                 TASK_DAEMON_UNLOCK(ha);
8643                                 ql_idle_check(ha);
8644                                 TASK_DAEMON_LOCK(ha);
8645                                 loop_again = B_TRUE;
8646                         }
8647                 }
8648 
8649                 /* Crystal+ port#0 bypass transition */
8650                 if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
8651                         ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
8652                         TASK_DAEMON_UNLOCK(ha);
8653                         (void) ql_initiate_lip(ha);
8654                         TASK_DAEMON_LOCK(ha);
8655                         loop_again = B_TRUE;
8656                 }
8657 
8658                 /* Abort queues needed. */
8659                 if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
8660                         ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
8661                         if (ha->flags & ABORT_CMDS_LOOP_DOWN_TMO) {
8662                                 TASK_DAEMON_UNLOCK(ha);
8663                                 ql_abort_queues(ha);
8664                                 TASK_DAEMON_LOCK(ha);
8665                                 loop_again = B_TRUE;
8666                         }
8667                 }
8668 
8669                 /* Not suspended, awaken waiting routines. */
8670                 if (!DRIVER_SUSPENDED(ha) &&
8671                     ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
8672                         ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
8673                         cv_broadcast(&ha->cv_dr_suspended);
8674                         loop_again = B_TRUE;
8675                 }
8676 
8677                 /* Handle RSCN changes. */
8678                 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8679                         if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
8680                                 vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
8681                                 TASK_DAEMON_UNLOCK(ha);
8682                                 (void) ql_handle_rscn_update(vha);
8683                                 TASK_DAEMON_LOCK(ha);
8684                                 loop_again = B_TRUE;
8685                         }
8686                 }
8687 
8688                 /* Handle state changes. */
8689                 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8690                         if (vha->task_daemon_flags & FC_STATE_CHANGE &&
8691                             !(ha->task_daemon_flags &
8692                             TASK_DAEMON_POWERING_DOWN)) {
8693                                 /* Report state change. */
8694                                 EL(vha, "state change = %xh\n", vha->state);
8695                                 vha->task_daemon_flags &= ~FC_STATE_CHANGE;
8696 
8697                                 if (vha->task_daemon_flags &
8698                                     COMMAND_WAIT_NEEDED) {
8699                                         vha->task_daemon_flags &=
8700                                             ~COMMAND_WAIT_NEEDED;
8701                                         if (!(ha->task_daemon_flags &
8702                                             COMMAND_WAIT_ACTIVE)) {
8703                                                 ha->task_daemon_flags |=
8704                                                     COMMAND_WAIT_ACTIVE;
8705                                                 TASK_DAEMON_UNLOCK(ha);
8706                                                 ql_cmd_wait(ha);
8707                                                 TASK_DAEMON_LOCK(ha);
8708                                                 ha->task_daemon_flags &=
8709                                                     ~COMMAND_WAIT_ACTIVE;
8710                                                 loop_again = B_TRUE;
8711                                         }
8712                                 }
8713 
8714                                 msg = NULL;
8715                                 if (FC_PORT_STATE_MASK(vha->state) ==
8716                                     FC_STATE_OFFLINE) {
8717                                         if (vha->task_daemon_flags &
8718                                             STATE_ONLINE) {
8719                                                 if (ha->topology &
8720                                                     QL_LOOP_CONNECTION) {
8721                                                         msg = "Loop OFFLINE";
8722                                                 } else {
8723                                                         msg = "Link OFFLINE";
8724                                                 }
8725                                         }
8726                                         vha->task_daemon_flags &=
8727                                             ~STATE_ONLINE;
8728                                 } else if (FC_PORT_STATE_MASK(vha->state) ==
8729                                     FC_STATE_LOOP) {
8730                                         if (!(vha->task_daemon_flags &
 
8733                                         }
8734                                         vha->task_daemon_flags |= STATE_ONLINE;
8735                                 } else if (FC_PORT_STATE_MASK(vha->state) ==
8736                                     FC_STATE_ONLINE) {
8737                                         if (!(vha->task_daemon_flags &
8738                                             STATE_ONLINE)) {
8739                                                 msg = "Link ONLINE";
8740                                         }
8741                                         vha->task_daemon_flags |= STATE_ONLINE;
8742                                 } else {
8743                                         msg = "Unknown Link state";
8744                                 }
8745 
8746                                 if (msg != NULL) {
8747                                         cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8748                                             "%s", QL_NAME, ha->instance,
8749                                             vha->vp_index, msg);
8750                                 }
8751 
8752                                 if (vha->flags & FCA_BOUND) {
8753                                         QL_PRINT_10(vha, "statec_"
8754                                             "cb state=%xh\n",
8755                                             vha->state);
8756                                         TASK_DAEMON_UNLOCK(ha);
8757                                         (vha->bind_info.port_statec_cb)
8758                                             (vha->bind_info.port_handle,
8759                                             vha->state);
8760                                         TASK_DAEMON_LOCK(ha);
8761                                         loop_again = B_TRUE;
8762                                 }
8763                         }
8764                 }
8765 
8766                 if (ha->task_daemon_flags & NEED_UNSOLICITED_BUFFERS &&
8767                     ha->task_daemon_flags & FIRMWARE_UP) {
8768                         /*
8769                          * The firmware needs more unsolicited
8770                          * buffers. We cannot allocate any new
8771                          * buffers unless the ULP module requests
8772                          * for new buffers. All we can do here is
8773                          * to give received buffers from the pool
8774                          * that is already allocated
8775                          */
8776                         ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8777                         TASK_DAEMON_UNLOCK(ha);
8778                         ql_isp_rcvbuf(ha);
8779                         TASK_DAEMON_LOCK(ha);
8780                         loop_again = B_TRUE;
8781                 }
8782 
8783                 if (ha->task_daemon_flags & WATCHDOG_NEEDED) {
8784                         ha->task_daemon_flags &= ~WATCHDOG_NEEDED;
8785                         TASK_DAEMON_UNLOCK(ha);
8786                         ql_watchdog(ha);
8787                         TASK_DAEMON_LOCK(ha);
8788                         loop_again = B_TRUE;
8789                 }
8790 
8791                 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8792                         TASK_DAEMON_UNLOCK(ha);
8793                         (void) ql_abort_isp(ha);
8794                         TASK_DAEMON_LOCK(ha);
8795                         loop_again = B_TRUE;
8796                 }
8797 
8798                 if (!(ha->task_daemon_flags & (COMMAND_WAIT_NEEDED |
8799                     ABORT_QUEUES_NEEDED | ISP_ABORT_NEEDED | LOOP_DOWN)) &&
8800                     ha->task_daemon_flags & FIRMWARE_UP) {
8801                         if (ha->task_daemon_flags & MARKER_NEEDED) {
8802                                 if (!(ha->task_daemon_flags & MARKER_ACTIVE)) {
8803                                         ha->task_daemon_flags |= MARKER_ACTIVE;
8804                                         ha->task_daemon_flags &= ~MARKER_NEEDED;
8805                                         TASK_DAEMON_UNLOCK(ha);
8806                                         for (vha = ha; vha != NULL;
8807                                             vha = vha->vp_next) {
8808                                                 (void) ql_marker(vha, 0, 0,
8809                                                     MK_SYNC_ALL);
8810                                         }
8811                                         TASK_DAEMON_LOCK(ha);
8812                                         ha->task_daemon_flags &= ~MARKER_ACTIVE;
8813                                         TASK_DAEMON_UNLOCK(ha);
8814                                         ql_restart_queues(ha);
8815                                         TASK_DAEMON_LOCK(ha);
8816                                         loop_again = B_TRUE;
8817                                 } else {
8818                                         ha->task_daemon_flags &= ~MARKER_NEEDED;
8819                                 }
8820                         }
8821 
8822                         if (ha->task_daemon_flags & LOOP_RESYNC_NEEDED) {
8823                                 if (!(ha->task_daemon_flags &
8824                                     LOOP_RESYNC_ACTIVE)) {
8825                                         ha->task_daemon_flags |=
8826                                             LOOP_RESYNC_ACTIVE;
8827                                         TASK_DAEMON_UNLOCK(ha);
8828                                         ql_loop_resync(ha);
8829                                         TASK_DAEMON_LOCK(ha);
8830                                         loop_again = B_TRUE;
8831                                 }
8832                         }
8833                 }
8834 
8835                 /* Port retry needed. */
8836                 if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8837                         ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8838                         ADAPTER_STATE_LOCK(ha);
8839                         ha->port_retry_timer = 0;
8840                         ADAPTER_STATE_UNLOCK(ha);
8841 
8842                         TASK_DAEMON_UNLOCK(ha);
8843                         ql_restart_queues(ha);
8844                         TASK_DAEMON_LOCK(ha);
8845                         loop_again = B_TRUE;
8846                 }
8847 
8848                 /* iiDMA setting needed? */
8849                 if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8850                         ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8851                         TASK_DAEMON_UNLOCK(ha);
8852                         ql_iidma(ha);
8853                         TASK_DAEMON_LOCK(ha);
8854                         loop_again = B_TRUE;
8855                 }
8856 
8857                 if (ha->task_daemon_flags & SEND_PLOGI) {
8858                         ha->task_daemon_flags &= ~SEND_PLOGI;
8859                         TASK_DAEMON_UNLOCK(ha);
8860                         (void) ql_n_port_plogi(ha);
8861                         TASK_DAEMON_LOCK(ha);
8862                         loop_again = B_TRUE;
8863                 }
8864 
8865                 if (ha->unsol_callback_queue.first != NULL) {
8866                         sp = (ha->unsol_callback_queue.first)->base_address;
8867                         link = &sp->cmd;
8868                         ql_remove_link(&ha->unsol_callback_queue, link);
8869                         TASK_DAEMON_UNLOCK(ha);
8870                         ql_unsol_callback(sp);
8871                         TASK_DAEMON_LOCK(ha);
8872                         loop_again = B_TRUE;
8873                 }
8874 
8875                 if (ha->task_daemon_flags & IDC_POLL_NEEDED) {
8876                         ha->task_daemon_flags &= ~IDC_POLL_NEEDED;
8877                         TASK_DAEMON_UNLOCK(ha);
8878                         ql_8021_idc_poll(ha);
8879                         TASK_DAEMON_LOCK(ha);
8880                         loop_again = B_TRUE;
8881                 }
8882 
8883                 if (ha->task_daemon_flags & LED_BLINK) {
8884                         ha->task_daemon_flags &= ~LED_BLINK;
8885                         TASK_DAEMON_UNLOCK(ha);
8886                         ql_blink_led(ha);
8887                         TASK_DAEMON_LOCK(ha);
8888                         loop_again = B_TRUE;
8889                 }
8890 
8891         } while (loop_again == B_TRUE);
8892 
8893         if (ha->driver_thread_awake) {
8894                 ha->driver_thread_awake--;
8895         }
8896         QL_PRINT_3(ha, "done\n");
8897 }
8898 
8899 /*
8900  * ql_idle_check
8901  *      Test for adapter is alive and well.
8902  *
8903  * Input:
8904  *      ha:     adapter state pointer.
8905  *
8906  * Context:
8907  *      Kernel context.
8908  */
8909 static void
8910 ql_idle_check(ql_adapter_state_t *ha)
8911 {
8912         int             rval;
8913         ql_mbx_data_t   mr;
8914 
8915         QL_PRINT_3(ha, "started\n");
8916 
8917         /* Firmware Ready Test. */
8918         rval = ql_get_firmware_state(ha, &mr);
8919         if (!DRIVER_SUSPENDED(ha) &&
8920             (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8921                 EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8922                 TASK_DAEMON_LOCK(ha);
8923                 if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8924                         EL(ha, "fstate_ready, isp_abort_needed\n");
8925                         ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8926                 }
8927                 TASK_DAEMON_UNLOCK(ha);
8928         }
8929 
8930         QL_PRINT_3(ha, "done\n");
8931 }
8932 
8933 /*
8934  * ql_unsol_callback
8935  *      Handle unsolicited buffer callbacks.
8936  *
8937  * Input:
8938  *      ha = adapter state pointer.
8939  *      sp = srb pointer.
8940  *
8941  * Context:
8942  *      Kernel context.
8943  */
8944 static void
8945 ql_unsol_callback(ql_srb_t *sp)
8946 {
8947         fc_affected_id_t        *af;
8948         fc_unsol_buf_t          *ubp;
8949         uchar_t                 r_ctl;
8950         uchar_t                 ls_code;
8951         ql_tgt_t                *tq;
8952         ql_adapter_state_t      *ha = sp->ha, *pha = sp->ha->pha;
8953 
8954         QL_PRINT_3(ha, "started\n");
8955 
8956         ubp = ha->ub_array[sp->handle];
8957         r_ctl = ubp->ub_frame.r_ctl;
8958         ls_code = ubp->ub_buffer[0];
8959 
8960         if (sp->lun_queue == NULL) {
8961                 tq = NULL;
8962         } else {
8963                 tq = sp->lun_queue->target_queue;
8964         }
8965 
8966         QL_UB_LOCK(ha);
8967         if (sp->flags & SRB_UB_FREE_REQUESTED ||
8968             pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8969                 sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8970                     SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8971                 sp->flags |= SRB_UB_IN_FCA;
8972                 QL_UB_UNLOCK(ha);
8973                 return;
8974         }
8975 
8976         /* Process RSCN */
8977         if (sp->flags & SRB_UB_RSCN) {
8978                 int sendup;
8979 
8980                 /*
8981                  * Defer RSCN posting until commands return
8982                  */
8983                 QL_UB_UNLOCK(ha);
8984 
8985                 af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8986 
8987                 /* Abort outstanding commands */
8988                 sendup = ql_process_rscn(ha, af);
8989                 if (sendup == 0) {
8990 
8991                         TASK_DAEMON_LOCK(ha);
8992                         ql_add_link_b(&pha->unsol_callback_queue, &sp->cmd);
8993                         TASK_DAEMON_UNLOCK(ha);
8994 
8995                         /*
8996                          * Wait for commands to drain in F/W (doesn't take
8997                          * more than a few milliseconds)
8998                          */
8999                         ql_delay(ha, 10000);
9000 
9001                         QL_PRINT_2(ha, "done rscn_sendup=0, "
9002                             "fmt=%xh, d_id=%xh\n",
9003                             af->aff_format, af->aff_d_id);
9004                         return;
9005                 }
9006 
9007                 QL_UB_LOCK(ha);
9008 
9009                 EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
9010                     af->aff_format, af->aff_d_id);
9011         }
9012 
9013         /* Process UNSOL LOGO */
9014         if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
9015                 QL_UB_UNLOCK(ha);
9016 
9017                 if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
9018                         TASK_DAEMON_LOCK(ha);
9019                         ql_add_link_b(&pha->unsol_callback_queue, &sp->cmd);
9020                         TASK_DAEMON_UNLOCK(ha);
9021                         QL_PRINT_2(ha, "logo_sendup=0, d_id=%xh"
9022                             "\n", tq->d_id.b24);
9023                         return;
9024                 }
9025 
9026                 QL_UB_LOCK(ha);
9027                 EL(ha, "sending unsol logout for %xh to transport\n",
9028                     ubp->ub_frame.s_id);
9029         }
9030 
9031         if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_PLOGI)) {
9032                 EL(ha, "sending unsol plogi for %xh to transport\n",
9033                     ubp->ub_frame.s_id);
9034         }
9035 
9036         sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
9037             SRB_UB_FCP);
9038 
9039         if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9040                 (void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
9041                     ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
9042         }
9043         QL_UB_UNLOCK(ha);
9044 
9045         (ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
9046             ubp, sp->ub_type);
9047 
9048         QL_PRINT_3(ha, "done\n");
9049 }
9050 
9051 /*
9052  * ql_send_logo
9053  *
9054  * Input:
9055  *      ha:     adapter state pointer.
9056  *      tq:     target queue pointer.
9057  *      done_q: done queue pointer.
9058  *
9059  * Context:
9060  *      Interrupt or Kernel context, no mailbox commands allowed.
9061  */
9062 void
9063 ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
9064 {
9065         fc_unsol_buf_t          *ubp;
9066         ql_srb_t                *sp;
9067         la_els_logo_t           *payload;
9068         ql_adapter_state_t      *ha = vha->pha;
9069 
9070         QL_PRINT_3(ha, "started, d_id=%xh\n", tq->d_id.b24);
9071 
9072         if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == FS_BROADCAST)) {
9073                 EL(ha, "no device, d_id=%xh\n", tq->d_id.b24);
9074                 return;
9075         }
9076 
9077         if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
9078             tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
9079 
9080                 /* Locate a buffer to use. */
9081                 ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
9082                 if (ubp == NULL) {
9083                         EL(vha, "Failed, get_unsolicited_buffer\n");
9084                         return;
9085                 }
9086 
9087                 DEVICE_QUEUE_LOCK(tq);
9088                 tq->flags |= TQF_NEED_AUTHENTICATION;
9089                 tq->logout_sent++;
9090                 DEVICE_QUEUE_UNLOCK(tq);
9091 
9092                 sp = ubp->ub_fca_private;
9093 
9094                 /* Set header. */
9095                 ubp->ub_frame.d_id = vha->d_id.b24;
9096                 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
9097                 ubp->ub_frame.s_id = tq->d_id.b24;
9098                 ubp->ub_frame.rsvd = 0;
9099                 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
9100                     F_CTL_SEQ_INITIATIVE;
9101                 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
9102                 ubp->ub_frame.seq_cnt = 0;
9103                 ubp->ub_frame.df_ctl = 0;
9104                 ubp->ub_frame.seq_id = 0;
9105                 ubp->ub_frame.rx_id = 0xffff;
9106                 ubp->ub_frame.ox_id = 0xffff;
9107 
9108                 /* set payload. */
9109                 payload = (la_els_logo_t *)ubp->ub_buffer;
9110                 bzero(payload, sizeof (la_els_logo_t));
9111                 /* Make sure ls_code in payload is always big endian */
 
9115                 ubp->ub_buffer[3] = 0;
9116                 bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
9117                     &payload->nport_ww_name.raw_wwn[0], 8);
9118                 payload->nport_id.port_id = tq->d_id.b24;
9119 
9120                 QL_UB_LOCK(ha);
9121                 sp->flags |= SRB_UB_CALLBACK;
9122                 QL_UB_UNLOCK(ha);
9123                 if (tq->lun_queues.first != NULL) {
9124                         sp->lun_queue = (tq->lun_queues.first)->base_address;
9125                 } else {
9126                         sp->lun_queue = ql_lun_queue(vha, tq, 0);
9127                 }
9128                 if (done_q) {
9129                         ql_add_link_b(done_q, &sp->cmd);
9130                 } else {
9131                         ql_awaken_task_daemon(ha, sp, 0, 0);
9132                 }
9133         }
9134 
9135         QL_PRINT_3(ha, "done\n");
9136 }
9137 
9138 static int
9139 ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9140 {
9141         port_id_t       d_id;
9142         ql_srb_t        *sp;
9143         ql_link_t       *link;
9144         int             sendup = 1;
9145 
9146         QL_PRINT_3(ha, "started\n");
9147 
9148         DEVICE_QUEUE_LOCK(tq);
9149         if (tq->outcnt) {
9150                 DEVICE_QUEUE_UNLOCK(tq);
9151                 sendup = 0;
9152                 (void) ql_abort_device(ha, tq, 1);
9153                 ql_delay(ha, 10000);
9154         } else {
9155                 DEVICE_QUEUE_UNLOCK(tq);
9156                 TASK_DAEMON_LOCK(ha);
9157 
9158                 for (link = ha->pha->unsol_callback_queue.first; link != NULL;
9159                     link = link->next) {
9160                         sp = link->base_address;
9161                         if (sp->flags & SRB_UB_CALLBACK) {
9162                                 continue;
9163                         }
9164                         d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
9165 
9166                         if (tq->d_id.b24 == d_id.b24) {
9167                                 sendup = 0;
9168                                 break;
9169                         }
9170                 }
9171 
9172                 TASK_DAEMON_UNLOCK(ha);
9173         }
9174 
9175         QL_PRINT_3(ha, "done\n");
9176 
9177         return (sendup);
9178 }
9179 
9180 static int
9181 ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
9182 {
9183         fc_unsol_buf_t          *ubp;
9184         ql_srb_t                *sp;
9185         la_els_logi_t           *payload;
9186         class_svc_param_t       *class3_param;
9187 
9188         QL_PRINT_3(ha, "started\n");
9189 
9190         if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
9191             LOOP_DOWN)) {
9192                 EL(ha, "Failed, tqf=%xh\n", tq->flags);
9193                 return (QL_FUNCTION_FAILED);
9194         }
9195 
9196         /* Locate a buffer to use. */
9197         ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
9198         if (ubp == NULL) {
9199                 EL(ha, "Failed\n");
9200                 return (QL_FUNCTION_FAILED);
9201         }
9202 
9203         QL_PRINT_3(ha, "Received LOGO from = %xh\n", tq->d_id.b24);
9204 
9205         EL(ha, "Emulate PLOGI from = %xh tq = %x\n", tq->d_id.b24, tq);
9206 
9207         sp = ubp->ub_fca_private;
9208 
9209         /* Set header. */
9210         ubp->ub_frame.d_id = ha->d_id.b24;
9211         ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
9212         ubp->ub_frame.s_id = tq->d_id.b24;
9213         ubp->ub_frame.rsvd = 0;
9214         ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
9215             F_CTL_SEQ_INITIATIVE;
9216         ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
9217         ubp->ub_frame.seq_cnt = 0;
9218         ubp->ub_frame.df_ctl = 0;
9219         ubp->ub_frame.seq_id = 0;
9220         ubp->ub_frame.rx_id = 0xffff;
9221         ubp->ub_frame.ox_id = 0xffff;
9222 
9223         /* set payload. */
9224         payload = (la_els_logi_t *)ubp->ub_buffer;
9225         bzero(payload, sizeof (la_els_logi_t));
9226 
9227         payload->ls_code.ls_code = LA_ELS_PLOGI;
9228         payload->common_service.fcph_version = 0x2006;
9229         payload->common_service.cmn_features =
9230             ha->topology & QL_N_PORT ? 0x8000 : 0x8800;
9231         payload->common_service.rx_bufsize =
9232             ha->loginparams.common_service.rx_bufsize;
9233         payload->common_service.conc_sequences = 0xff;
9234         payload->common_service.relative_offset = 0x03;
9235         payload->common_service.e_d_tov = 0x7d0;
9236 
9237         bcopy((void *)&tq->port_name[0],
9238             (void *)&payload->nport_ww_name.raw_wwn[0], 8);
9239 
9240         bcopy((void *)&tq->node_name[0],
9241             (void *)&payload->node_ww_name.raw_wwn[0], 8);
9242 
9243         class3_param = (class_svc_param_t *)&payload->class_3;
9244         class3_param->class_valid_svc_opt = 0x8000;
9245         class3_param->recipient_ctl = tq->class3_recipient_ctl;
9246         class3_param->rcv_data_size = tq->class3_rcv_data_size;
9247         class3_param->conc_sequences = tq->class3_conc_sequences;
9248         class3_param->open_sequences_per_exch =
9249             tq->class3_open_sequences_per_exch;
9250 
9251         QL_UB_LOCK(ha);
9252         sp->flags |= SRB_UB_CALLBACK;
9253         QL_UB_UNLOCK(ha);
9254 
9255         if (done_q) {
9256                 ql_add_link_b(done_q, &sp->cmd);
9257         } else {
9258                 ql_awaken_task_daemon(ha, sp, 0, 0);
9259         }
9260 
9261         QL_PRINT_3(ha, "done\n");
9262 
9263         return (QL_SUCCESS);
9264 }
9265 
9266 /*
9267  * Abort outstanding commands in the Firmware, clear internally
9268  * queued commands in the driver, Synchronize the target with
9269  * the Firmware
9270  */
9271 int
9272 ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
9273 {
9274         ql_link_t       *link, *link2;
9275         ql_lun_t        *lq;
9276         int             rval = QL_SUCCESS;
9277         ql_srb_t        *sp;
9278         ql_head_t       done_q = { NULL, NULL };
9279 
9280         QL_PRINT_10(ha, "started\n");
9281 
9282         /*
9283          * First clear, internally queued commands
9284          */
9285         DEVICE_QUEUE_LOCK(tq);
9286         for (link = tq->lun_queues.first; link != NULL; link = link->next) {
9287                 lq = link->base_address;
9288 
9289                 link2 = lq->cmd.first;
9290                 while (link2 != NULL) {
9291                         sp = link2->base_address;
9292                         link2 = link2->next;
9293 
9294                         /* Remove srb from device command queue. */
9295                         ql_remove_link(&lq->cmd, &sp->cmd);
9296                         sp->flags &= ~SRB_IN_DEVICE_QUEUE;
9297 
9298                         /* Set ending status. */
9299                         sp->pkt->pkt_reason = CS_ABORTED;
9300 
9301                         /* Call done routine to handle completions. */
9302                         ql_add_link_b(&done_q, &sp->cmd);
9303                 }
9304         }
9305         DEVICE_QUEUE_UNLOCK(tq);
9306 
9307         if (done_q.first != NULL) {
9308                 ql_done(done_q.first, B_FALSE);
9309         }
9310 
9311         if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
9312                 rval = ql_abort_target(ha, tq, 0);
9313         }
9314 
9315         if (rval != QL_SUCCESS) {
9316                 EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
9317         } else {
9318                 /*EMPTY*/
9319                 QL_PRINT_10(ha, "done\n");
9320         }
9321 
9322         return (rval);
9323 }
9324 
9325 /*
9326  * ql_rcv_rscn_els
9327  *      Processes received RSCN extended link service.
9328  *
9329  * Input:
9330  *      ha:     adapter state pointer.
9331  *      mb:     array containing input mailbox registers.
9332  *      done_q: done queue pointer.
9333  *
9334  * Context:
9335  *      Interrupt or Kernel context, no mailbox commands allowed.
9336  */
9337 void
9338 ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
9339 {
9340         fc_unsol_buf_t          *ubp;
9341         ql_srb_t                *sp;
9342         fc_rscn_t               *rn;
9343         fc_affected_id_t        *af;
9344         port_id_t               d_id;
9345 
9346         QL_PRINT_3(ha, "started\n");
9347 
9348         /* Locate a buffer to use. */
9349         ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
9350         if (ubp != NULL) {
9351                 sp = ubp->ub_fca_private;
9352 
9353                 /* Set header. */
9354                 ubp->ub_frame.d_id = ha->d_id.b24;
9355                 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
9356                 ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
9357                 ubp->ub_frame.rsvd = 0;
9358                 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
9359                     F_CTL_SEQ_INITIATIVE;
9360                 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
9361                 ubp->ub_frame.seq_cnt = 0;
9362                 ubp->ub_frame.df_ctl = 0;
9363                 ubp->ub_frame.seq_id = 0;
9364                 ubp->ub_frame.rx_id = 0xffff;
9365                 ubp->ub_frame.ox_id = 0xffff;
9366 
 
9375                 d_id.b.area = MSB(mb[2]);
9376                 d_id.b.domain = LSB(mb[1]);
9377                 af->aff_d_id = d_id.b24;
9378                 af->aff_format = MSB(mb[1]);
9379 
9380                 EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
9381                     af->aff_d_id);
9382 
9383                 ql_update_rscn(ha, af);
9384 
9385                 QL_UB_LOCK(ha);
9386                 sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
9387                 QL_UB_UNLOCK(ha);
9388                 ql_add_link_b(done_q, &sp->cmd);
9389         }
9390 
9391         if (ubp == NULL) {
9392                 EL(ha, "Failed, get_unsolicited_buffer\n");
9393         } else {
9394                 /*EMPTY*/
9395                 QL_PRINT_3(ha, "done\n");
9396         }
9397 }
9398 
9399 /*
9400  * ql_update_rscn
9401  *      Update devices from received RSCN.
9402  *
9403  * Input:
9404  *      ha:     adapter state pointer.
9405  *      af:     pointer to RSCN data.
9406  *
9407  * Context:
9408  *      Interrupt or Kernel context, no mailbox commands allowed.
9409  */
9410 static void
9411 ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9412 {
9413         ql_link_t       *link;
9414         uint16_t        index;
9415         ql_tgt_t        *tq;
9416 
9417         QL_PRINT_3(ha, "started\n");
9418 
9419         if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9420                 port_id_t d_id;
9421 
9422                 d_id.r.rsvd_1 = 0;
9423                 d_id.b24 = af->aff_d_id;
9424 
9425                 tq = ql_d_id_to_queue(ha, d_id);
9426                 if (tq) {
9427                         EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
9428                         DEVICE_QUEUE_LOCK(tq);
9429                         tq->flags |= TQF_RSCN_RCVD;
9430                         ql_requeue_pending_cmds(ha, tq);
9431                         DEVICE_QUEUE_UNLOCK(tq);
9432                 }
9433                 QL_PRINT_3(ha, "FC_RSCN_PORT_ADDRESS done\n");
9434 
9435                 return;
9436         }
9437 
9438         for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9439                 for (link = ha->dev[index].first; link != NULL;
9440                     link = link->next) {
9441                         tq = link->base_address;
9442 
9443                         switch (af->aff_format) {
9444                         case FC_RSCN_FABRIC_ADDRESS:
9445                                 if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9446                                         EL(ha, "SD_RSCN_RCVD %xh RFA\n",
9447                                             tq->d_id.b24);
9448                                         DEVICE_QUEUE_LOCK(tq);
9449                                         tq->flags |= TQF_RSCN_RCVD;
9450                                         ql_requeue_pending_cmds(ha, tq);
9451                                         DEVICE_QUEUE_UNLOCK(tq);
9452                                 }
9453                                 break;
9454 
9455                         case FC_RSCN_AREA_ADDRESS:
9456                                 if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
9457                                         EL(ha, "SD_RSCN_RCVD %xh RAA\n",
9458                                             tq->d_id.b24);
9459                                         DEVICE_QUEUE_LOCK(tq);
9460                                         tq->flags |= TQF_RSCN_RCVD;
9461                                         ql_requeue_pending_cmds(ha, tq);
9462                                         DEVICE_QUEUE_UNLOCK(tq);
9463                                 }
9464                                 break;
9465 
9466                         case FC_RSCN_DOMAIN_ADDRESS:
9467                                 if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
9468                                         EL(ha, "SD_RSCN_RCVD %xh RDA\n",
9469                                             tq->d_id.b24);
9470                                         DEVICE_QUEUE_LOCK(tq);
9471                                         tq->flags |= TQF_RSCN_RCVD;
9472                                         ql_requeue_pending_cmds(ha, tq);
9473                                         DEVICE_QUEUE_UNLOCK(tq);
9474                                 }
9475                                 break;
9476 
9477                         default:
9478                                 break;
9479                         }
9480                 }
9481         }
9482         QL_PRINT_3(ha, "done\n");
9483 }
9484 
9485 /*
9486  * ql_requeue_pending_cmds
9487  *      Requeue target commands from pending queue to LUN queue
9488  *
9489  * Input:
9490  *      ha:     adapter state pointer.
9491  *      tq:     target queue pointer.
9492  *      DEVICE_QUEUE_LOCK must be already obtained.
9493  *
9494  * Context:
9495  *      Interrupt or Kernel context, no mailbox commands allowed.
9496  */
9497 void
9498 ql_requeue_pending_cmds(ql_adapter_state_t *vha, ql_tgt_t *tq)
9499 {
9500         ql_link_t               *link;
9501         ql_srb_t                *sp;
9502         ql_lun_t                *lq;
9503         ql_adapter_state_t      *ha = vha->pha;
9504 
9505         QL_PRINT_3(ha, "started\n");
9506 
9507         REQUEST_RING_LOCK(ha);
9508         for (link = ha->pending_cmds.first; link != NULL; link = link->next) {
9509                 sp = link->base_address;
9510                 if ((lq = sp->lun_queue) == NULL || lq->target_queue != tq) {
9511                         continue;
9512                 }
9513                 ql_remove_link(&ha->pending_cmds, &sp->cmd);
9514 
9515                 if (tq->outcnt) {
9516                         tq->outcnt--;
9517                 }
9518                 if (sp->flags & SRB_FCP_CMD_PKT) {
9519                         if (sp->fcp->fcp_cntl.cntl_qtype ==
9520                             FCP_QTYPE_UNTAGGED) {
9521                                 lq->flags &= ~LQF_UNTAGGED_PENDING;
9522                         }
9523                         if (lq->lun_outcnt != 0) {
9524                                 lq->lun_outcnt--;
9525                         }
9526                 }
9527                 ql_add_link_t(&lq->cmd, &sp->cmd);
9528                 sp->flags |= SRB_IN_DEVICE_QUEUE;
9529         }
9530         REQUEST_RING_UNLOCK(ha);
9531 
9532         QL_PRINT_3(ha, "done\n");
9533 }
9534 
9535 /*
9536  * ql_process_rscn
9537  *
9538  * Input:
9539  *      ha:     adapter state pointer.
9540  *      af:     RSCN payload pointer.
9541  *
9542  * Context:
9543  *      Kernel context.
9544  */
9545 static int
9546 ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9547 {
9548         int             sendit;
9549         int             sendup = 1;
9550         ql_link_t       *link;
9551         uint16_t        index;
9552         ql_tgt_t        *tq;
9553 
9554         QL_PRINT_3(ha, "started\n");
9555 
9556         if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9557                 port_id_t d_id;
9558 
9559                 d_id.r.rsvd_1 = 0;
9560                 d_id.b24 = af->aff_d_id;
9561 
9562                 tq = ql_d_id_to_queue(ha, d_id);
9563                 if (tq) {
9564                         sendup = ql_process_rscn_for_device(ha, tq);
9565                 }
9566 
9567                 QL_PRINT_3(ha, "done\n");
9568 
9569                 return (sendup);
9570         }
9571 
9572         for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9573                 for (link = ha->dev[index].first; link != NULL;
9574                     link = link->next) {
9575 
9576                         tq = link->base_address;
9577                         if (tq == NULL) {
9578                                 continue;
9579                         }
9580 
9581                         switch (af->aff_format) {
9582                         case FC_RSCN_FABRIC_ADDRESS:
9583                                 if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9584                                         sendit = ql_process_rscn_for_device(
9585                                             ha, tq);
9586                                         if (sendup) {
9587                                                 sendup = sendit;
 
9602                                 break;
9603 
9604                         case FC_RSCN_DOMAIN_ADDRESS:
9605                                 if ((tq->d_id.b24 & 0xff0000) ==
9606                                     af->aff_d_id) {
9607                                         sendit = ql_process_rscn_for_device(
9608                                             ha, tq);
9609 
9610                                         if (sendup) {
9611                                                 sendup = sendit;
9612                                         }
9613                                 }
9614                                 break;
9615 
9616                         default:
9617                                 break;
9618                         }
9619                 }
9620         }
9621 
9622         QL_PRINT_3(ha, "done\n");
9623 
9624         return (sendup);
9625 }
9626 
9627 /*
9628  * ql_process_rscn_for_device
9629  *
9630  * Input:
9631  *      ha:     adapter state pointer.
9632  *      tq:     target queue pointer.
9633  *
9634  * Context:
9635  *      Kernel context.
9636  */
9637 static int
9638 ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9639 {
9640         int sendup = 1;
9641 
9642         QL_PRINT_3(ha, "started\n");
9643 
9644         DEVICE_QUEUE_LOCK(tq);
9645 
9646         /*
9647          * Let FCP-2 compliant devices continue I/Os
9648          * with their low level recoveries.
9649          */
9650         if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
9651             (tq->prli_svc_param_word_3 & PRLI_W3_RETRY)) {
9652                 /*
9653                  * Cause ADISC to go out
9654                  */
9655                 DEVICE_QUEUE_UNLOCK(tq);
9656 
9657                 (void) ql_get_port_database(ha, tq, PDF_NONE);
9658 
9659                 DEVICE_QUEUE_LOCK(tq);
9660                 tq->flags &= ~TQF_RSCN_RCVD;
9661 
9662         } else if (tq->loop_id != PORT_NO_LOOP_ID) {
9663                 if (tq->d_id.b24 != BROADCAST_ADDR) {
9664                         tq->flags |= TQF_NEED_AUTHENTICATION;
9665                 }
9666 
9667                 DEVICE_QUEUE_UNLOCK(tq);
9668 
9669                 (void) ql_abort_device(ha, tq, 1);
9670 
9671                 DEVICE_QUEUE_LOCK(tq);
9672 
9673                 if (tq->outcnt) {
9674                         EL(ha, "busy tq->outcnt=%d\n", tq->outcnt);
9675                         sendup = 0;
9676                 } else {
9677                         tq->flags &= ~TQF_RSCN_RCVD;
9678                 }
9679         } else {
9680                 tq->flags &= ~TQF_RSCN_RCVD;
9681         }
9682 
9683         if (sendup) {
9684                 if (tq->d_id.b24 != BROADCAST_ADDR) {
9685                         tq->flags |= TQF_NEED_AUTHENTICATION;
9686                 }
9687         }
9688 
9689         DEVICE_QUEUE_UNLOCK(tq);
9690 
9691         QL_PRINT_3(ha, "done\n");
9692 
9693         return (sendup);
9694 }
9695 
9696 static int
9697 ql_handle_rscn_update(ql_adapter_state_t *ha)
9698 {
9699         int                     rval;
9700         ql_tgt_t                *tq;
9701         uint16_t                index, loop_id;
9702         ql_dev_id_list_t        *list;
9703         uint32_t                list_size;
9704         port_id_t               d_id;
9705         ql_mbx_data_t           mr;
9706         ql_head_t               done_q = { NULL, NULL };
9707 
9708         QL_PRINT_3(ha, "started\n");
9709 
9710         list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
9711         list = kmem_zalloc(list_size, KM_SLEEP);
9712         if (list == NULL) {
9713                 rval = QL_MEMORY_ALLOC_FAILED;
9714                 EL(ha, "kmem_zalloc failed=%xh\n", rval);
9715                 return (rval);
9716         }
9717 
9718         /*
9719          * Get data from RISC code d_id list to init each device queue.
9720          */
9721         rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
9722         if (rval != QL_SUCCESS) {
9723                 kmem_free(list, list_size);
9724                 EL(ha, "get_id_list failed=%xh\n", rval);
9725                 return (rval);
9726         }
9727 
9728         /* Acquire adapter state lock. */
9729         ADAPTER_STATE_LOCK(ha);
9730 
9731         /* Check for new devices */
9732         for (index = 0; index < mr.mb[1]; index++) {
9733                 ql_dev_list(ha, list, index, &d_id, &loop_id);
9734 
9735                 if (VALID_DEVICE_ID(ha, loop_id)) {
9736                         d_id.r.rsvd_1 = 0;
9737 
9738                         tq = ql_d_id_to_queue(ha, d_id);
9739                         if (tq != NULL) {
9740                                 continue;
9741                         }
9742 
9743                         tq = ql_dev_init(ha, d_id, loop_id);
9744 
9745                         /* Test for fabric device. */
9746                         if (ha->topology & QL_F_PORT ||
9747                             d_id.b.domain != ha->d_id.b.domain ||
9748                             d_id.b.area != ha->d_id.b.area) {
9749                                 tq->flags |= TQF_FABRIC_DEVICE;
9750                         }
9751 
9752                         ADAPTER_STATE_UNLOCK(ha);
9753                         if (ql_get_port_database(ha, tq, PDF_NONE) !=
9754                             QL_SUCCESS) {
9755                                 tq->loop_id = PORT_NO_LOOP_ID;
9756                         }
9757                         ADAPTER_STATE_LOCK(ha);
9758 
9759                         /*
9760                          * Send up a PLOGI about the new device
9761                          */
9762                         if (VALID_DEVICE_ID(ha, tq->loop_id)) {
9763                                 (void) ql_send_plogi(ha, tq, &done_q);
9764                         }
9765                 }
9766         }
9767 
9768         /* Release adapter state lock. */
9769         ADAPTER_STATE_UNLOCK(ha);
9770 
9771         if (done_q.first != NULL) {
9772                 ql_done(done_q.first, B_FALSE);
9773         }
9774 
9775         kmem_free(list, list_size);
9776 
9777         if (rval != QL_SUCCESS) {
9778                 EL(ha, "failed=%xh\n", rval);
9779         } else {
9780                 /*EMPTY*/
9781                 QL_PRINT_3(ha, "done\n");
9782         }
9783 
9784         return (rval);
9785 }
9786 
9787 /*
9788  * ql_free_unsolicited_buffer
9789  *      Frees allocated buffer.
9790  *
9791  * Input:
9792  *      ha = adapter state pointer.
9793  *      index = buffer array index.
9794  *      ADAPTER_STATE_LOCK must be already obtained.
9795  *
9796  * Context:
9797  *      Kernel context.
9798  */
9799 static void
9800 ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9801 {
9802         ql_srb_t        *sp;
9803         int             status;
9804 
9805         QL_PRINT_3(ha, "started\n");
9806 
9807         sp = ubp->ub_fca_private;
9808         if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9809                 /* Disconnect IP from system buffers. */
9810                 if (ha->flags & IP_INITIALIZED) {
9811                         status = ql_shutdown_ip(ha);
9812                         if (status != QL_SUCCESS) {
9813                                 cmn_err(CE_WARN,
9814                                     "!Qlogic %s(%d): Failed to shutdown IP",
9815                                     QL_NAME, ha->instance);
9816                                 return;
9817                         }
9818 
9819                         ha->flags &= ~IP_ENABLED;
9820                 }
9821 
9822                 ql_free_phys(ha, &sp->ub_buffer);
9823         } else {
9824                 kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9825         }
9826 
9827         kmem_free(sp, sizeof (ql_srb_t));
9828         kmem_free(ubp, sizeof (fc_unsol_buf_t));
9829 
9830         QL_UB_LOCK(ha);
9831         if (ha->ub_allocated != 0) {
9832                 ha->ub_allocated--;
9833         }
9834         QL_UB_UNLOCK(ha);
9835 
9836         QL_PRINT_3(ha, "done\n");
9837 }
9838 
9839 /*
9840  * ql_get_unsolicited_buffer
9841  *      Locates a free unsolicited buffer.
9842  *
9843  * Input:
9844  *      ha = adapter state pointer.
9845  *      type = buffer type.
9846  *
9847  * Returns:
9848  *      Unsolicited buffer pointer.
9849  *
9850  * Context:
9851  *      Interrupt or Kernel context, no mailbox commands allowed.
9852  */
9853 fc_unsol_buf_t *
9854 ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9855 {
9856         fc_unsol_buf_t  *ubp;
9857         ql_srb_t        *sp;
9858         uint16_t        index;
9859 
9860         QL_PRINT_3(ha, "started\n");
9861 
9862         /* Locate a buffer to use. */
9863         ubp = NULL;
9864 
9865         QL_UB_LOCK(ha);
9866         for (index = 0; index < QL_UB_LIMIT; index++) {
9867                 ubp = ha->ub_array[index];
9868                 if (ubp != NULL) {
9869                         sp = ubp->ub_fca_private;
9870                         if ((sp->ub_type == type) &&
9871                             (sp->flags & SRB_UB_IN_FCA) &&
9872                             (!(sp->flags & (SRB_UB_CALLBACK |
9873                             SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9874                                 sp->flags |= SRB_UB_ACQUIRED;
9875                                 ubp->ub_resp_flags = 0;
9876                                 break;
9877                         }
9878                         ubp = NULL;
9879                 }
9880         }
9881         QL_UB_UNLOCK(ha);
9882 
9883         if (ubp) {
9884                 ubp->ub_resp_token = NULL;
9885                 ubp->ub_class = FC_TRAN_CLASS3;
9886         }
9887 
9888         QL_PRINT_3(ha, "done\n");
9889 
9890         return (ubp);
9891 }
9892 
9893 /*
9894  * ql_ub_frame_hdr
9895  *      Processes received unsolicited buffers from ISP.
9896  *
9897  * Input:
9898  *      ha:     adapter state pointer.
9899  *      tq:     target queue pointer.
9900  *      index:  unsolicited buffer array index.
9901  *      done_q: done queue pointer.
9902  *
9903  * Returns:
9904  *      ql local function return status code.
9905  *
9906  * Context:
9907  *      Interrupt or Kernel context, no mailbox commands allowed.
9908  */
9909 int
9910 ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9911     ql_head_t *done_q)
9912 {
9913         fc_unsol_buf_t  *ubp;
9914         ql_srb_t        *sp;
9915         uint16_t        loop_id;
9916         int             rval = QL_FUNCTION_FAILED;
9917 
9918         QL_PRINT_3(ha, "started\n");
9919 
9920         QL_UB_LOCK(ha);
9921         if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9922                 EL(ha, "Invalid buffer index=%xh\n", index);
9923                 QL_UB_UNLOCK(ha);
9924                 return (rval);
9925         }
9926 
9927         sp = ubp->ub_fca_private;
9928         if (sp->flags & SRB_UB_FREE_REQUESTED) {
9929                 EL(ha, "buffer freed index=%xh\n", index);
9930                 sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9931                     SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9932 
9933                 sp->flags |= SRB_UB_IN_FCA;
9934 
9935                 QL_UB_UNLOCK(ha);
9936                 return (rval);
9937         }
9938 
9939         if ((sp->handle == index) &&
9940             (sp->flags & SRB_UB_IN_ISP) &&
9941             (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9942             (!(sp->flags & SRB_UB_ACQUIRED))) {
9943                 /* set broadcast D_ID */
9944                 loop_id = (uint16_t)(CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
9945                     BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9946                 if (tq->ub_loop_id == loop_id) {
9947                         if (ha->topology & QL_FL_PORT) {
9948                                 ubp->ub_frame.d_id = 0x000000;
9949                         } else {
9950                                 ubp->ub_frame.d_id = FS_BROADCAST;
9951                         }
9952                 } else {
9953                         ubp->ub_frame.d_id = ha->d_id.b24;
9954                 }
9955                 ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9956                 ubp->ub_frame.rsvd = 0;
9957                 ubp->ub_frame.s_id = tq->d_id.b24;
9958                 ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9959                 ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9960                 ubp->ub_frame.df_ctl = 0;
9961                 ubp->ub_frame.seq_id = tq->ub_seq_id;
9962                 ubp->ub_frame.rx_id = 0xffff;
9963                 ubp->ub_frame.ox_id = 0xffff;
9964                 ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9965                     sp->ub_size : tq->ub_sequence_length;
9966                 ubp->ub_frame.ro = tq->ub_frame_ro;
9967 
9968                 tq->ub_sequence_length = (uint16_t)
9969                     (tq->ub_sequence_length - ubp->ub_bufsize);
9970                 tq->ub_frame_ro += ubp->ub_bufsize;
9971                 tq->ub_seq_cnt++;
9972 
9973                 if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9974                         if (tq->ub_seq_cnt == 1) {
9975                                 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9976                                     F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9977                         } else {
9978                                 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9979                                     F_CTL_END_SEQ;
9980                         }
9981                         tq->ub_total_seg_cnt = 0;
9982                 } else if (tq->ub_seq_cnt == 1) {
9983                         ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9984                             F_CTL_FIRST_SEQ;
9985                         ubp->ub_frame.df_ctl = 0x20;
9986                 }
9987 
9988                 QL_PRINT_3(ha, "ub_frame.d_id=%xh\n", ubp->ub_frame.d_id);
9989                 QL_PRINT_3(ha, "ub_frame.s_id=%xh\n", ubp->ub_frame.s_id);
9990                 QL_PRINT_3(ha, "ub_frame.seq_cnt=%xh\n", ubp->ub_frame.seq_cnt);
9991                 QL_PRINT_3(ha, "ub_frame.seq_id=%xh\n", ubp->ub_frame.seq_id);
9992                 QL_PRINT_3(ha, "ub_frame.ro=%xh\n", ubp->ub_frame.ro);
9993                 QL_PRINT_3(ha, "ub_frame.f_ctl=%xh\n", ubp->ub_frame.f_ctl);
9994                 QL_PRINT_3(ha, "ub_bufsize=%xh\n", ubp->ub_bufsize);
9995                 QL_DUMP_3(ubp->ub_buffer, 8,
9996                     ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9997 
9998                 sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9999                 ql_add_link_b(done_q, &sp->cmd);
10000                 rval = QL_SUCCESS;
10001         } else {
10002                 if (sp->handle != index) {
10003                         EL(ha, "Bad index=%xh, expect=%xh\n", index,
10004                             sp->handle);
10005                 }
10006                 if ((sp->flags & SRB_UB_IN_ISP) == 0) {
10007                         EL(ha, "buffer was already in driver, index=%xh\n",
10008                             index);
10009                 }
10010                 if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
10011                         EL(ha, "buffer was not an IP buffer, index=%xh\n",
10012                             index);
10013                 }
10014                 if (sp->flags & SRB_UB_ACQUIRED) {
10015                         EL(ha, "buffer was being used by driver, index=%xh\n",
10016                             index);
10017                 }
10018         }
10019         QL_UB_UNLOCK(ha);
10020 
10021         QL_PRINT_3(ha, "done\n");
10022 
10023         return (rval);
10024 }
10025 
10026 /*
10027  * ql_timer
10028  *      One second timer function.
10029  *
10030  * Input:
10031  *      ql_hba.first = first link in adapter list.
10032  *
10033  * Context:
10034  *      Interrupt context, no mailbox commands allowed.
10035  */
10036 static void
10037 ql_timer(void *arg)
10038 {
10039         ql_link_t               *link;
10040         uint64_t                set_flags;
10041         ql_adapter_state_t      *ha;
10042         static uint32_t         sec_cnt = 0;
10043 
10044         QL_PRINT_6(NULL, "started\n");
10045 
10046         /* Acquire global state lock. */
10047         GLOBAL_TIMER_LOCK();
10048         if (ql_timer_timeout_id == NULL) {
10049                 /* Release global state lock. */
10050                 GLOBAL_TIMER_UNLOCK();
10051                 return;
10052         }
10053 
10054         sec_cnt++;
10055         for (link = ql_hba.first; link != NULL; link = link->next) {
10056                 ha = link->base_address;
10057 
10058                 /* Skip adapter if suspended or stalled. */
10059                 if (ha->flags & ADAPTER_SUSPENDED ||
10060                     ha->task_daemon_flags & DRIVER_STALL ||
10061                     !(ha->task_daemon_flags & FIRMWARE_UP)) {
10062                         continue;
10063                 }
10064 
10065                 QL_PM_LOCK(ha);
10066                 if (ha->power_level != PM_LEVEL_D0) {
10067                         QL_PM_UNLOCK(ha);
10068                         continue;
10069                 }
10070                 ha->pm_busy++;
10071                 QL_PM_UNLOCK(ha);
10072 
10073                 set_flags = 0;
10074 
10075                 /* All completion treads busy, wake up a helper thread. */
10076                 if (ha->comp_thds_awake == ha->comp_thds_active &&
10077                     ha->comp_q.first != NULL) {
10078                         QL_PRINT_10(ha, "comp queue helper thrd started\n");
10079                         (void) timeout(ql_process_comp_queue, (void *)ha, 1);
10080                 }
10081 
10082                 /* Port retry timer handler. */
10083                 if (LOOP_READY(ha)) {
10084                         ADAPTER_STATE_LOCK(ha);
10085                         if (ha->port_retry_timer != 0) {
10086                                 ha->port_retry_timer--;
10087                                 if (ha->port_retry_timer == 0) {
10088                                         set_flags |= PORT_RETRY_NEEDED;
10089                                 }
10090                         }
10091                         ADAPTER_STATE_UNLOCK(ha);
10092                 }
10093 
10094                 /* Loop down timer handler. */
10095                 if (LOOP_RECONFIGURE(ha) == 0) {
10096                         if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
10097                                 ha->loop_down_timer--;
10098                                 /*
10099                                  * give the firmware loop down dump flag
10100                                  * a chance to work.
10101                                  */
10102                                 if (ha->loop_down_timer == LOOP_DOWN_RESET) {
10103                                         if (CFG_IST(ha,
10104                                             CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
10105                                                 ADAPTER_STATE_LOCK(ha);
10106                                                 ha->flags |= FW_DUMP_NEEDED;
10107                                                 ADAPTER_STATE_UNLOCK(ha);
10108                                         }
10109                                         EL(ha, "loop_down_reset, "
10110                                             "isp_abort_needed\n");
10111                                         set_flags |= ISP_ABORT_NEEDED;
10112                                 }
10113                         }
10114                         if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
10115                                 /* Command abort time handler. */
10116                                 if (ha->loop_down_timer ==
10117                                     ha->loop_down_abort_time) {
10118                                         ADAPTER_STATE_LOCK(ha);
10119                                         ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
10120                                         ADAPTER_STATE_UNLOCK(ha);
10121                                         set_flags |= ABORT_QUEUES_NEEDED;
10122                                         EL(ha, "loop_down_abort_time, "
10123                                             "abort_queues_needed\n");
10124                                 }
10125 
10126                                 /* Watchdog timer handler. */
10127                                 if (ha->watchdog_timer == 0) {
10128                                         ha->watchdog_timer = WATCHDOG_TIME;
10129                                 } else if (LOOP_READY(ha)) {
10130                                         ha->watchdog_timer--;
10131                                         if (ha->watchdog_timer == 0) {
10132                                                 set_flags |= WATCHDOG_NEEDED;
10133                                         }
10134                                 }
10135                         }
10136                 }
10137 
10138                 /* Idle timer handler. */
10139                 if (!DRIVER_SUSPENDED(ha)) {
10140                         if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
10141 #if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
10142                                 set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
10143 #endif
10144                                 ha->idle_timer = 0;
10145                         }
10146                         if (ha->send_plogi_timer != NULL) {
10147                                 ha->send_plogi_timer--;
10148                                 if (ha->send_plogi_timer == NULL) {
10149                                         set_flags |= SEND_PLOGI;
10150                                 }
10151                         }
10152                 }
10153 
10154                 if (CFG_IST(ha, CFG_CTRL_82XX) && ha->flags & ONLINE &&
10155                     !(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
10156                     ABORT_ISP_ACTIVE)) &&
10157                     !(sec_cnt % 2)) {
10158                         set_flags |= IDC_POLL_NEEDED;
10159                 }
10160 
10161                 if (ha->ledstate.BeaconState == BEACON_ON) {
10162                         set_flags |= LED_BLINK;
10163                 }
10164 
10165                 if (set_flags != 0) {
10166                         ql_awaken_task_daemon(ha, NULL, set_flags, 0);
10167                 }
10168 
10169                 /* Update the IO stats */
10170                 if (ha->xioctl->IOInputByteCnt >= 0x100000) {
10171                         ha->xioctl->IOInputMByteCnt +=
10172                             (ha->xioctl->IOInputByteCnt / 0x100000);
10173                         ha->xioctl->IOInputByteCnt %= 0x100000;
10174                 }
10175 
10176                 if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
10177                         ha->xioctl->IOOutputMByteCnt +=
10178                             (ha->xioctl->IOOutputByteCnt / 0x100000);
10179                         ha->xioctl->IOOutputByteCnt %= 0x100000;
10180                 }
10181 
10182                 QL_PM_LOCK(ha);
10183                 if (ha->pm_busy) {
10184                         ha->pm_busy--;
10185                 }
10186                 QL_PM_UNLOCK(ha);
10187         }
10188 
10189         /* Restart timer, if not being stopped. */
10190         if (ql_timer_timeout_id != NULL) {
10191                 ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
10192         }
10193 
10194         /* Release global state lock. */
10195         GLOBAL_TIMER_UNLOCK();
10196 
10197         QL_PRINT_6(ha, "done\n");
10198 }
10199 
10200 /*
10201  * ql_timeout_insert
10202  *      Function used to insert a command block onto the
10203  *      watchdog timer queue.
10204  *
10205  *      Note: Must insure that pkt_time is not zero
10206  *                      before calling ql_timeout_insert.
10207  *
10208  * Input:
10209  *      ha:     adapter state pointer.
10210  *      tq:     target queue pointer.
10211  *      sp:     SRB pointer.
10212  *      DEVICE_QUEUE_LOCK must be already obtained.
10213  *
10214  * Context:
10215  *      Kernel context.
10216  */
10217 /* ARGSUSED */
10218 static void
10219 ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
10220 {
10221         QL_PRINT_3(ha, "started\n");
10222 
10223         if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
10224                 sp->isp_timeout = (uint16_t)(sp->pkt->pkt_timeout);
10225                 /*
10226                  * The WATCHDOG_TIME must be rounded up + 1.  As an example,
10227                  * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
10228                  * will expire in the next watchdog call, which could be in
10229                  * 1 microsecond.
10230                  *
10231                  */
10232                 sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
10233                     WATCHDOG_TIME;
10234                 /*
10235                  * Added an additional 10 to account for the
10236                  * firmware timer drift which can occur with
10237                  * very long timeout values.
10238                  */
10239                 sp->wdg_q_time += 10;
10240 
10241                 /*
10242                  * Add 6 more to insure watchdog does not timeout at the same
10243                  * time as ISP RISC code timeout.
10244                  */
10245                 sp->wdg_q_time += 6;
10246 
10247                 /* Save initial time for resetting watchdog time. */
10248                 sp->init_wdg_q_time = sp->wdg_q_time;
10249 
10250                 /* Insert command onto watchdog queue. */
10251                 ql_add_link_b(&tq->wdg, &sp->wdg);
10252 
10253                 sp->flags |= SRB_WATCHDOG_ENABLED;
10254         } else {
10255                 sp->isp_timeout = 0;
10256                 sp->wdg_q_time = 0;
10257                 sp->init_wdg_q_time = 0;
10258         }
10259 
10260         QL_PRINT_3(ha, "done\n");
10261 }
10262 
10263 /*
10264  * ql_watchdog
10265  *      Timeout handler that runs in interrupt context. The
10266  *      ql_adapter_state_t * argument is the parameter set up when the
10267  *      timeout was initialized (state structure pointer).
10268  *      Function used to update timeout values and if timeout
10269  *      has occurred command will be aborted.
10270  *
10271  * Input:
10272  *      ha:     adapter state pointer.
10273  *
10274  * Context:
10275  *      Kernel context.
10276  */
10277 static void
10278 ql_watchdog(ql_adapter_state_t *ha)
10279 {
10280         ql_link_t               *link;
10281         ql_tgt_t                *tq;
10282         uint16_t                index;
10283         ql_adapter_state_t      *vha;
10284 
10285         QL_PRINT_6(ha, "started\n");
10286 
10287         for (vha = ha; vha != NULL; vha = vha->vp_next) {
10288                 /* Loop through all targets. */
10289                 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10290                         for (link = vha->dev[index].first; link != NULL;
10291                             link = link->next) {
10292                                 tq = link->base_address;
10293 
10294                                 /* Try to acquire device queue lock. */
10295                                 if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
10296                                         break;
10297                                 }
10298 
10299                                 if (!(CFG_IST(ha,
10300                                     CFG_ENABLE_LINK_DOWN_REPORTING)) &&
10301                                     (tq->port_down_retry_count == 0)) {
10302                                         /* Release device queue lock. */
10303                                         DEVICE_QUEUE_UNLOCK(tq);
10304                                         continue;
10305                                 }
10306                                 ql_wdg_tq_list(vha, tq);
10307                         }
10308                 }
10309         }
10310         ha->watchdog_timer = WATCHDOG_TIME;
10311 
10312         QL_PRINT_6(ha, "done\n");
10313 }
10314 
10315 /*
10316  * ql_wdg_tq_list
10317  *      Timeout handler that runs in interrupt context. The
10318  *      ql_adapter_state_t * argument is the parameter set up when the
10319  *      timeout was initialized (state structure pointer).
10320  *      Function used to update timeout values and if timeout
10321  *      has occurred command will be aborted.
10322  *
10323  * Input:
10324  *      ha:     adapter state pointer.
10325  *      tq:     target queue pointer.
10326  *      DEVICE_QUEUE_LOCK must be already obtained.
10327  *
10328  * Output:
10329  *      Releases DEVICE_QUEUE_LOCK upon exit.
10330  *
10331  * Context:
10332  *      Kernel context.
10333  */
10334 static void
10335 ql_wdg_tq_list(ql_adapter_state_t *ha, ql_tgt_t *tq)
10336 {
10337         ql_srb_t        *sp;
10338         ql_link_t       *link, *next_cmd;
10339         ql_lun_t        *lq;
10340         boolean_t       q_sane, timeout = B_FALSE;
10341 
10342         QL_PRINT_6(ha, "started\n");
10343 
10344         /* Find out if this device is in a sane state */
10345         if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
10346             TQF_QUEUE_SUSPENDED)) {
10347                 q_sane = B_FALSE;
10348         } else {
10349                 q_sane = B_TRUE;
10350         }
10351         /* Loop through commands on watchdog queue. */
10352         for (link = tq->wdg.first; link != NULL; link = next_cmd) {
10353                 next_cmd = link->next;
10354                 sp = link->base_address;
10355                 lq = sp->lun_queue;
10356 
10357                 /*
10358                  * For SCSI commands, if everything
10359                  * seems to * be going fine and this
10360                  * packet is stuck
10361                  * because of throttling at LUN or
10362                  * target level then do not decrement
10363                  * the sp->wdg_q_time
10364                  */
10365                 if (ha->task_daemon_flags & STATE_ONLINE &&
10366                     !(sp->flags & SRB_ISP_STARTED) &&
10367                     q_sane == B_TRUE &&
10368                     sp->flags & SRB_FCP_CMD_PKT &&
10369                     lq->lun_outcnt >= ha->execution_throttle) {
10370                         continue;
10371                 }
10372 
10373                 if (sp->wdg_q_time != 0) {
10374                         sp->wdg_q_time--;
10375 
10376                         /* Timeout? */
10377                         if (sp->wdg_q_time != 0) {
10378                                 continue;
10379                         }
10380 
10381                         sp->flags |= SRB_COMMAND_TIMEOUT;
10382                         timeout = B_TRUE;
10383                 }
10384         }
10385 
10386         /*
10387          * Loop through commands on watchdog queue and
10388          * abort timed out commands.
10389          */
10390         if (timeout == B_TRUE) {
10391                 for (link = tq->wdg.first; link != NULL; link = next_cmd) {
10392                         sp = link->base_address;
10393                         next_cmd = link->next;
10394 
10395                         if (sp->flags & SRB_COMMAND_TIMEOUT) {
10396                                 ql_remove_link(&tq->wdg, &sp->wdg);
10397                                 sp->flags &= ~(SRB_WATCHDOG_ENABLED |
10398                                     SRB_COMMAND_TIMEOUT);
10399                                 ql_cmd_timeout(ha, tq, sp);
10400                                 next_cmd = tq->wdg.first;
10401                         }
10402                 }
10403         }
10404 
10405         /* Release device queue lock. */
10406         DEVICE_QUEUE_UNLOCK(tq);
10407 
10408         QL_PRINT_6(ha, "done\n");
10409 }
10410 
10411 /*
10412  * ql_cmd_timeout
10413  *      Command timeout handler.
10414  *
10415  * Input:
10416  *      ha:             adapter state pointer.
10417  *      tq:             target queue pointer.
10418  *      sp:             SRB pointer.
10419  *
10420  * Context:
10421  *      Kernel context.
10422  */
10423 static void
10424 ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
10425 {
10426         int     rval = 0;
10427 
10428         QL_PRINT_3(ha, "started\n");
10429 
10430         REQUEST_RING_LOCK(ha);
10431         if (!(sp->flags & SRB_ISP_STARTED)) {
10432                 EL(ha, "command timed out in driver, sp=%ph spf=%xh\n",
10433                     (void *)sp, sp->flags);
10434 
10435                 /* if it's on a queue */
10436                 if (sp->cmd.head) {
10437                         /*
10438                          * The pending_cmds que needs to be
10439                          * protected by the ring lock
10440                          */
10441                         ql_remove_link(sp->cmd.head, &sp->cmd);
10442                 }
10443                 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10444 
10445                 /* Release device queue lock. */
10446                 REQUEST_RING_UNLOCK(ha);
10447                 DEVICE_QUEUE_UNLOCK(tq);
10448 
10449                 /* Set timeout status */
10450                 sp->pkt->pkt_reason = CS_TIMEOUT;
10451 
10452                 /* Ensure no retry */
10453                 sp->flags &= ~SRB_RETRY;
10454 
10455                 /* Call done routine to handle completion. */
10456                 ql_done(&sp->cmd, B_FALSE);
10457         } else if (CFG_IST(ha, CFG_CTRL_82XX)) {
10458                 REQUEST_RING_UNLOCK(ha);
10459                 DEVICE_QUEUE_UNLOCK(tq);
10460 
10461                 EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10462                     "spf=%xh\n", (void *)sp,
10463                     (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10464                     sp->handle & OSC_INDEX_MASK, sp->flags);
10465 
10466                 if (ha->pha->timeout_cnt++ > TIMEOUT_THRESHOLD ||
10467                     (rval = ql_abort_io(ha, sp)) != QL_SUCCESS) {
10468                         sp->flags |= SRB_COMMAND_TIMEOUT;
10469                         TASK_DAEMON_LOCK(ha);
10470                         ha->task_daemon_flags |= ISP_ABORT_NEEDED;
10471                         TASK_DAEMON_UNLOCK(ha);
10472                         EL(ha, "abort status=%xh, tc=%xh, isp_abort_"
10473                             "needed\n", rval, ha->pha->timeout_cnt);
10474                 }
10475         } else {
10476                 REQUEST_RING_UNLOCK(ha);
10477                 DEVICE_QUEUE_UNLOCK(tq);
10478 
10479                 EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10480                     "spf=%xh, isp_abort_needed\n", (void *)sp,
10481                     (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10482                     sp->handle & OSC_INDEX_MASK, sp->flags);
10483 
10484                 INTR_LOCK(ha);
10485                 ha->pha->xioctl->ControllerErrorCount++;
10486                 INTR_UNLOCK(ha);
10487 
10488                 /* Set ISP needs to be reset */
10489                 sp->flags |= SRB_COMMAND_TIMEOUT;
10490 
10491                 if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
10492                         ADAPTER_STATE_LOCK(ha);
10493                         ha->flags |= FW_DUMP_NEEDED;
10494                         ADAPTER_STATE_UNLOCK(ha);
10495                 }
10496 
10497                 TASK_DAEMON_LOCK(ha);
10498                 ha->task_daemon_flags |= ISP_ABORT_NEEDED;
10499                 TASK_DAEMON_UNLOCK(ha);
10500         }
10501         DEVICE_QUEUE_LOCK(tq);
10502 
10503         QL_PRINT_3(ha, "done\n");
10504 }
10505 
10506 /*
10507  * ql_cmd_wait
10508  *      Stall driver until all outstanding commands are returned.
10509  *
10510  * Input:
10511  *      ha = adapter state pointer.
10512  *
10513  * Context:
10514  *      Kernel context.
10515  */
10516 void
10517 ql_cmd_wait(ql_adapter_state_t *ha)
10518 {
10519         uint16_t                index;
10520         ql_link_t               *link;
10521         ql_tgt_t                *tq;
10522         ql_adapter_state_t      *vha;
10523 
10524         QL_PRINT_3(ha, "started\n");
10525 
10526         /* Wait for all outstanding commands to be returned. */
10527         (void) ql_wait_outstanding(ha);
10528 
10529         /*
10530          * clear out internally queued commands
10531          */
10532         for (vha = ha; vha != NULL; vha = vha->vp_next) {
10533                 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10534                         for (link = vha->dev[index].first; link != NULL;
10535                             link = link->next) {
10536                                 tq = link->base_address;
10537                                 if (tq &&
10538                                     (!(tq->prli_svc_param_word_3 &
10539                                     PRLI_W3_RETRY) ||
10540                                     ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
10541                                         (void) ql_abort_device(vha, tq, 0);
10542                                 }
10543                         }
10544                 }
10545         }
10546 
10547         QL_PRINT_3(ha, "done\n");
10548 }
10549 
10550 /*
10551  * ql_wait_outstanding
10552  *      Wait for all outstanding commands to complete.
10553  *
10554  * Input:
10555  *      ha = adapter state pointer.
10556  *
10557  * Returns:
10558  *      index - the index for ql_srb into outstanding_cmds.
10559  *
10560  * Context:
10561  *      Kernel context.
10562  */
10563 static uint16_t
10564 ql_wait_outstanding(ql_adapter_state_t *ha)
10565 {
10566         ql_srb_t        *sp;
10567         uint16_t        index, count;
10568 
10569         QL_PRINT_3(ha, "started\n");
10570 
10571         count = ql_osc_wait_count;
10572         for (index = 1; index < ha->pha->osc_max_cnt; index++) {
10573                 if (ha->pha->pending_cmds.first != NULL) {
10574                         ql_start_iocb(ha, NULL);
10575                         index = 1;
10576                 }
10577                 if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
10578                     sp != QL_ABORTED_SRB(ha) &&
10579                     (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
10580                         if (count-- != 0) {
10581                                 ql_delay(ha, 10000);
10582                                 index = 0;
10583                         } else {
10584                                 EL(ha, "still in OSC,sp=%ph,oci=%d,sph=%xh,"
10585                                     "spf=%xh\n", (void *) sp, index, sp->handle,
10586                                     sp->flags);
10587                                 break;
10588                         }
10589                 }
10590         }
10591 
10592         QL_PRINT_3(ha, "done\n");
10593 
10594         return (index);
10595 }
10596 
10597 /*
10598  * ql_restart_queues
10599  *      Restart device queues.
10600  *
10601  * Input:
10602  *      ha = adapter state pointer.
10603  *      DEVICE_QUEUE_LOCK must be released.
10604  *
10605  * Context:
10606  *      Interrupt or Kernel context, no mailbox commands allowed.
10607  */
10608 void
10609 ql_restart_queues(ql_adapter_state_t *ha)
10610 {
10611         ql_link_t               *link, *link2;
10612         ql_tgt_t                *tq;
10613         ql_lun_t                *lq;
10614         uint16_t                index;
10615         ql_adapter_state_t      *vha;
10616 
10617         QL_PRINT_3(ha, "started\n");
10618 
10619         for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10620                 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10621                         for (link = vha->dev[index].first; link != NULL;
10622                             link = link->next) {
10623                                 tq = link->base_address;
10624 
10625                                 /* Acquire device queue lock. */
10626                                 DEVICE_QUEUE_LOCK(tq);
10627 
10628                                 tq->flags &= ~TQF_QUEUE_SUSPENDED;
10629 
10630                                 for (link2 = tq->lun_queues.first;
10631                                     link2 != NULL; link2 = link2->next) {
10632                                         lq = link2->base_address;
10633 
10634                                         if (lq->cmd.first != NULL) {
10635                                                 ql_next(vha, lq);
10636                                                 DEVICE_QUEUE_LOCK(tq);
10637                                         }
10638                                 }
10639 
10640                                 /* Release device queue lock. */
10641                                 DEVICE_QUEUE_UNLOCK(tq);
10642                         }
10643                 }
10644         }
10645 
10646         QL_PRINT_3(ha, "done\n");
10647 }
10648 
10649 /*
10650  * ql_iidma
10651  *      Setup iiDMA parameters to firmware
10652  *
10653  * Input:
10654  *      ha = adapter state pointer.
10655  *      DEVICE_QUEUE_LOCK must be released.
10656  *
10657  * Context:
10658  *      Interrupt or Kernel context, no mailbox commands allowed.
10659  */
10660 static void
10661 ql_iidma(ql_adapter_state_t *ha)
10662 {
10663         ql_link_t       *link;
10664         ql_tgt_t        *tq;
10665         uint16_t        index;
10666         char            buf[256];
10667         uint32_t        data;
10668 
10669         QL_PRINT_3(ha, "started\n");
10670 
10671         if (!CFG_IST(ha, CFG_IIDMA_SUPPORT)) {
10672                 QL_PRINT_3(ha, "done\n");
10673                 return;
10674         }
10675 
10676         for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10677                 for (link = ha->dev[index].first; link != NULL;
10678                     link = link->next) {
10679                         tq = link->base_address;
10680 
10681                         if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
10682                                 continue;
10683                         }
10684 
10685                         /* Acquire device queue lock. */
10686                         DEVICE_QUEUE_LOCK(tq);
10687 
10688                         tq->flags &= ~TQF_IIDMA_NEEDED;
10689 
10690                         /* Release device queue lock. */
10691                         DEVICE_QUEUE_UNLOCK(tq);
10692 
10693                         if ((tq->loop_id > LAST_N_PORT_HDL) ||
10694                             (tq->d_id.b24 == FS_MANAGEMENT_SERVER) ||
10695                             (tq->flags & TQF_INITIATOR_DEVICE) ||
10696                             (tq->iidma_rate == IIDMA_RATE_NDEF)) {
10697                                 continue;
10698                         }
10699 
10700                         /* Get the iiDMA persistent data */
10701                         (void) snprintf(buf, sizeof (buf),
10702                             "iidma-rate-%02x%02x%02x%02x%02x"
10703                             "%02x%02x%02x", tq->port_name[0],
10704                             tq->port_name[1], tq->port_name[2],
10705                             tq->port_name[3], tq->port_name[4],
10706                             tq->port_name[5], tq->port_name[6],
10707                             tq->port_name[7]);
10708 
10709                         if ((data = ql_get_prop(ha, buf)) ==
10710                             0xffffffff) {
10711                                 tq->iidma_rate = IIDMA_RATE_NDEF;
10712                         } else {
10713                                 switch (data) {
10714                                 case IIDMA_RATE_4GB:
10715                                 case IIDMA_RATE_8GB:
10716                                 case IIDMA_RATE_10GB:
10717                                 case IIDMA_RATE_16GB:
10718                                 case IIDMA_RATE_32GB:
10719                                         tq->iidma_rate = data;
10720                                         break;
10721                                 default:
10722                                         EL(ha, "invalid data for "
10723                                             "parameter: %s: %xh\n",
10724                                             buf, data);
10725                                         tq->iidma_rate =
10726                                             IIDMA_RATE_NDEF;
10727                                         break;
10728                                 }
10729                         }
10730 
10731                         EL(ha, "d_id = %xh iidma_rate = %xh\n",
10732                             tq->d_id.b24, tq->iidma_rate);
10733 
10734                         /* Set the firmware's iiDMA rate */
10735                         if (!CFG_IST(ha, CFG_FCOE_SUPPORT)) {
10736                                 if (tq->iidma_rate <= IIDMA_RATE_MAX) {
10737                                         data = ql_iidma_rate(ha, tq->loop_id,
10738                                             &tq->iidma_rate,
10739                                             EXT_IIDMA_MODE_SET);
10740                                         if (data != QL_SUCCESS) {
10741                                                 EL(ha, "mbx failed: %xh\n",
10742                                                     data);
10743                                         }
10744                                 }
10745                         }
10746                 }
10747         }
10748 
10749         QL_PRINT_3(ha, "done\n");
10750 }
10751 
10752 /*
10753  * ql_abort_queues
10754  *      Abort all commands on device queues.
10755  *
10756  * Input:
10757  *      ha = adapter state pointer.
10758  *
10759  * Context:
10760  *      Interrupt or Kernel context, no mailbox commands allowed.
10761  */
10762 void
10763 ql_abort_queues(ql_adapter_state_t *ha)
10764 {
10765         ql_link_t               *link;
10766         ql_tgt_t                *tq;
10767         ql_srb_t                *sp;
10768         uint16_t                index;
10769         ql_adapter_state_t      *vha;
10770 
10771         QL_PRINT_10(ha, "started\n");
10772 
10773         /* Return all commands in outstanding command list. */
10774         INTR_LOCK(ha);
10775 
10776         /* Place all commands in outstanding cmd list on device queue. */
10777         for (index = 1; index < ha->osc_max_cnt; index++) {
10778                 if (ha->pending_cmds.first != NULL) {
10779                         INTR_UNLOCK(ha);
10780                         ql_start_iocb(ha, NULL);
10781                         /* Delay for system */
10782                         ql_delay(ha, 10000);
10783                         INTR_LOCK(ha);
10784                         index = 1;
10785                 }
10786                 sp = ha->outstanding_cmds[index];
10787 
10788                 if (sp && (sp == QL_ABORTED_SRB(ha) || sp->ha != ha)) {
10789                         continue;
10790                 }
10791 
10792                 /* skip devices capable of FCP2 retrys */
10793                 if (sp != NULL &&
10794                     (sp->lun_queue == NULL ||
10795                     (tq = sp->lun_queue->target_queue) == NULL ||
10796                     !(tq->prli_svc_param_word_3 & PRLI_W3_RETRY) ||
10797                     ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
10798                         ha->outstanding_cmds[index] = NULL;
10799                         sp->handle = 0;
10800                         sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10801 
10802                         INTR_UNLOCK(ha);
10803 
10804                         /* Set ending status. */
10805                         sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10806                         sp->flags |= SRB_ISP_COMPLETED;
10807 
10808                         /* Call done routine to handle completions. */
10809                         sp->cmd.next = NULL;
10810                         ql_done(&sp->cmd, B_FALSE);
10811 
10812                         INTR_LOCK(ha);
10813                 }
10814         }
10815         INTR_UNLOCK(ha);
10816 
10817         for (vha = ha; vha != NULL; vha = vha->vp_next) {
10818                 QL_PRINT_10(vha, "abort instance\n");
10819                 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10820                         for (link = vha->dev[index].first; link != NULL;
10821                             link = link->next) {
10822                                 tq = link->base_address;
10823                                 /* skip devices capable of FCP2 retrys */
10824                                 if (!(tq->prli_svc_param_word_3 &
10825                                     PRLI_W3_RETRY) ||
10826                                     ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
10827                                         /*
10828                                          * Set port unavailable status and
10829                                          * return all commands on a devices
10830                                          * queues.
10831                                          */
10832                                         ql_abort_device_queues(ha, tq);
10833                                 }
10834                         }
10835                 }
10836         }
10837         QL_PRINT_3(ha, "done\n");
10838 }
10839 
10840 /*
10841  * ql_abort_device_queues
10842  *      Abort all commands on device queues.
10843  *
10844  * Input:
10845  *      ha = adapter state pointer.
10846  *
10847  * Context:
10848  *      Interrupt or Kernel context, no mailbox commands allowed.
10849  */
10850 static void
10851 ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq)
10852 {
10853         ql_link_t       *lun_link, *cmd_link;
10854         ql_srb_t        *sp;
10855         ql_lun_t        *lq;
10856 
10857         QL_PRINT_10(ha, "started\n");
10858 
10859         DEVICE_QUEUE_LOCK(tq);
10860         ql_requeue_pending_cmds(ha, tq);
10861 
10862         for (lun_link = tq->lun_queues.first; lun_link != NULL;
10863             lun_link = lun_link->next) {
10864                 lq = lun_link->base_address;
10865 
10866                 cmd_link = lq->cmd.first;
10867                 while (cmd_link != NULL) {
10868                         sp = cmd_link->base_address;
10869 
10870                         /* Remove srb from device cmd queue. */
10871                         ql_remove_link(&lq->cmd, &sp->cmd);
10872 
10873                         sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10874 
10875                         DEVICE_QUEUE_UNLOCK(tq);
10876 
10877                         /* Set ending status. */
10878                         sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10879 
10880                         /* Call done routine to handle completion. */
10881                         ql_done(&sp->cmd, B_FALSE);
10882 
10883                         /* Delay for system */
10884                         ql_delay(ha, 10000);
10885 
10886                         DEVICE_QUEUE_LOCK(tq);
10887                         cmd_link = lq->cmd.first;
10888                 }
10889         }
10890         DEVICE_QUEUE_UNLOCK(tq);
10891 
10892         QL_PRINT_10(ha, "done\n");
10893 }
10894 
10895 /*
10896  * ql_loop_resync
10897  *      Resync with fibre channel devices.
10898  *
10899  * Input:
10900  *      ha = adapter state pointer.
10901  *      DEVICE_QUEUE_LOCK must be released.
10902  *
10903  * Context:
10904  *      Kernel context.
10905  */
10906 static void
10907 ql_loop_resync(ql_adapter_state_t *ha)
10908 {
10909         int rval;
10910 
10911         QL_PRINT_3(ha, "started\n");
10912 
10913         if (ha->flags & IP_INITIALIZED) {
10914                 (void) ql_shutdown_ip(ha);
10915         }
10916 
10917         rval = ql_fw_ready(ha, 10);
10918 
10919         TASK_DAEMON_LOCK(ha);
10920         ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10921         TASK_DAEMON_UNLOCK(ha);
10922 
10923         /* Set loop online, if it really is. */
10924         if (rval == QL_SUCCESS) {
10925                 ql_loop_online(ha);
10926                 QL_PRINT_3(ha, "done\n");
10927         } else {
10928                 EL(ha, "failed, rval = %xh\n", rval);
10929         }
10930 }
10931 
10932 /*
10933  * ql_loop_online
10934  *      Set loop online status if it really is online.
10935  *
10936  * Input:
10937  *      ha = adapter state pointer.
10938  *      DEVICE_QUEUE_LOCK must be released.
10939  *
10940  * Context:
10941  *      Kernel context.
10942  */
10943 void
10944 ql_loop_online(ql_adapter_state_t *ha)
10945 {
10946         ql_adapter_state_t      *vha;
10947 
10948         QL_PRINT_3(ha, "started\n");
10949 
10950         /* Inform the FC Transport that the hardware is online. */
10951         for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10952                 if (!(vha->task_daemon_flags &
10953                     (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10954                         /* Restart IP if it was shutdown. */
10955                         if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10956                             !(vha->flags & IP_INITIALIZED)) {
10957                                 (void) ql_initialize_ip(vha);
10958                                 ql_isp_rcvbuf(vha);
10959                         }
10960 
10961                         if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10962                             FC_PORT_STATE_MASK(vha->state) !=
10963                             FC_STATE_ONLINE) {
10964                                 vha->state = FC_PORT_SPEED_MASK(vha->state);
10965                                 if (vha->topology & QL_LOOP_CONNECTION) {
10966                                         vha->state |= FC_STATE_LOOP;
10967                                 } else {
10968                                         vha->state |= FC_STATE_ONLINE;
10969                                 }
10970                                 TASK_DAEMON_LOCK(ha);
10971                                 vha->task_daemon_flags |= FC_STATE_CHANGE;
10972                                 TASK_DAEMON_UNLOCK(ha);
10973                         }
10974                 }
10975         }
10976 
10977         ql_awaken_task_daemon(ha, NULL, 0, 0);
10978 
10979         /* Restart device queues that may have been stopped. */
10980         ql_restart_queues(ha);
10981 
10982         QL_PRINT_3(ha, "done\n");
10983 }
10984 
10985 /*
10986  * ql_fca_handle_to_state
10987  *      Verifies handle to be correct.
10988  *
10989  * Input:
10990  *      fca_handle = pointer to state structure.
10991  *
10992  * Returns:
10993  *      NULL = failure
10994  *
10995  * Context:
10996  *      Kernel context.
10997  */
10998 static ql_adapter_state_t *
10999 ql_fca_handle_to_state(opaque_t fca_handle)
11000 {
11001 #ifdef  QL_DEBUG_ROUTINES
11002         ql_link_t               *link;
11003         ql_adapter_state_t      *ha = NULL;
11004         ql_adapter_state_t      *vha = NULL;
11005 
11006         for (link = ql_hba.first; link != NULL; link = link->next) {
11007                 ha = link->base_address;
11008                 for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
11009                         if ((opaque_t)vha == fca_handle) {
11010                                 ha = vha;
11011                                 break;
11012                         }
11013                 }
11014                 if ((opaque_t)ha == fca_handle) {
11015                         break;
11016                 } else {
11017                         ha = NULL;
11018                 }
11019         }
11020 
11021         if (ha == NULL) {
11022                 /*EMPTY*/
11023                 QL_PRINT_2(ha, "failed\n");
11024         }
11025 
11026 #endif /* QL_DEBUG_ROUTINES */
11027 
11028         return ((ql_adapter_state_t *)fca_handle);
11029 }
11030 
11031 /*
11032  * ql_d_id_to_queue
11033  *      Locate device queue that matches destination ID.
11034  *
11035  * Input:
11036  *      ha = adapter state pointer.
11037  *      d_id = destination ID
11038  *
11039  * Returns:
11040  *      NULL = failure
11041  *
11042  * Context:
11043  *      Interrupt or Kernel context, no mailbox commands allowed.
 
11100 /*
11101  * ql_kstat_update
11102  *      Updates kernel statistics.
11103  *
11104  * Input:
11105  *      ksp - driver kernel statistics structure pointer.
11106  *      rw - function to perform
11107  *
11108  * Returns:
11109  *      0 or EACCES
11110  *
11111  * Context:
11112  *      Kernel context.
11113  */
11114 /* ARGSUSED */
11115 static int
11116 ql_kstat_update(kstat_t *ksp, int rw)
11117 {
11118         int     rval;
11119 
11120         QL_PRINT_3(ksp->ks_private, "started\n");
11121 
11122         if (rw == KSTAT_WRITE) {
11123                 rval = EACCES;
11124         } else {
11125                 rval = 0;
11126         }
11127 
11128         if (rval != 0) {
11129                 /*EMPTY*/
11130                 QL_PRINT_2(ksp->ks_private, "failed, rval = %xh\n", rval);
11131         } else {
11132                 /*EMPTY*/
11133                 QL_PRINT_3(ksp->ks_private, "done\n");
11134         }
11135         return (rval);
11136 }
11137 
11138 /*
11139  * ql_load_flash
11140  *      Loads flash.
11141  *
11142  * Input:
11143  *      ha:     adapter state pointer.
11144  *      dp:     data pointer.
11145  *      size:   data length.
11146  *
11147  * Returns:
11148  *      ql local function return status code.
11149  *
11150  * Context:
11151  *      Kernel context.
11152  */
11153 int
11154 ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
11155 {
11156         uint32_t        cnt;
11157         int             rval;
11158         uint32_t        size_to_offset;
11159         uint32_t        size_to_compare;
11160         int             erase_all;
11161 
11162         if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
11163                 return (ql_24xx_load_flash(ha, dp, size, 0));
11164         }
11165 
11166         QL_PRINT_3(ha, "started\n");
11167 
11168         size_to_compare = 0x20000;
11169         size_to_offset = 0;
11170         erase_all = 0;
11171         if (CFG_IST(ha, CFG_SBUS_CARD)) {
11172                 if (size == 0x80000) {
11173                         /* Request to flash the entire chip. */
11174                         size_to_compare = 0x80000;
11175                         erase_all = 1;
11176                 } else {
11177                         size_to_compare = 0x40000;
11178                         if (ql_flash_sbus_fpga) {
11179                                 size_to_offset = 0x40000;
11180                         }
11181                 }
11182         }
11183         if (size > size_to_compare) {
11184                 rval = QL_FUNCTION_PARAMETER_ERROR;
11185                 EL(ha, "failed=%xh\n", rval);
11186                 return (rval);
11187         }
11188 
11189         /* Enable Flash Read/Write. */
11190         ql_flash_enable(ha);
11191 
11192         /* Erase flash prior to write. */
11193         rval = ql_erase_flash(ha, erase_all);
11194 
11195         if (rval == QL_SUCCESS) {
11196                 /* Write data to flash. */
11197                 for (cnt = 0; cnt < size; cnt++) {
11198                         /* Allow other system activity. */
11199                         if (cnt % 0x1000 == 0) {
11200                                 ql_delay(ha, 10000);
11201                         }
11202                         rval = ql_program_flash_address(ha,
11203                             cnt + size_to_offset, *dp++);
11204                         if (rval != QL_SUCCESS) {
11205                                 break;
11206                         }
11207                 }
11208         }
11209 
11210         ql_flash_disable(ha);
11211 
11212         if (rval != QL_SUCCESS) {
11213                 EL(ha, "failed=%xh\n", rval);
11214         } else {
11215                 /*EMPTY*/
11216                 QL_PRINT_3(ha, "done\n");
11217         }
11218         return (rval);
11219 }
11220 
11221 /*
11222  * ql_program_flash_address
11223  *      Program flash address.
11224  *
11225  * Input:
11226  *      ha = adapter state pointer.
11227  *      addr = flash byte address.
11228  *      data = data to be written to flash.
11229  *
11230  * Returns:
11231  *      ql local function return status code.
11232  *
11233  * Context:
11234  *      Kernel context.
11235  */
11236 static int
11237 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
11238 {
11239         int rval;
11240 
11241         QL_PRINT_3(ha, "started\n");
11242 
11243         if (CFG_IST(ha, CFG_SBUS_CARD)) {
11244                 ql_write_flash_byte(ha, 0x5555, 0xa0);
11245                 ql_write_flash_byte(ha, addr, data);
11246         } else {
11247                 /* Write Program Command Sequence */
11248                 ql_write_flash_byte(ha, 0x5555, 0xaa);
11249                 ql_write_flash_byte(ha, 0x2aaa, 0x55);
11250                 ql_write_flash_byte(ha, 0x5555, 0xa0);
11251                 ql_write_flash_byte(ha, addr, data);
11252         }
11253 
11254         /* Wait for write to complete. */
11255         rval = ql_poll_flash(ha, addr, data);
11256 
11257         if (rval != QL_SUCCESS) {
11258                 EL(ha, "failed=%xh\n", rval);
11259         } else {
11260                 /*EMPTY*/
11261                 QL_PRINT_3(ha, "done\n");
11262         }
11263         return (rval);
11264 }
11265 
11266 /*
11267  * ql_erase_flash
11268  *      Erases entire flash.
11269  *
11270  * Input:
11271  *      ha = adapter state pointer.
11272  *
11273  * Returns:
11274  *      ql local function return status code.
11275  *
11276  * Context:
11277  *      Kernel context.
11278  */
11279 int
11280 ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
11281 {
11282         int             rval;
11283         uint32_t        erase_delay = 2000000;
11284         uint32_t        sStartAddr;
11285         uint32_t        ssize;
11286         uint32_t        cnt;
11287         uint8_t         *bfp;
11288         uint8_t         *tmp;
11289 
11290         QL_PRINT_3(ha, "started\n");
11291 
11292         if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
11293                 if (ql_flash_sbus_fpga == 1) {
11294                         ssize = QL_SBUS_FCODE_SIZE;
11295                         sStartAddr = QL_FCODE_OFFSET;
11296                 } else {
11297                         ssize = QL_FPGA_SIZE;
11298                         sStartAddr = QL_FPGA_OFFSET;
11299                 }
11300 
11301                 erase_delay = 20000000;
11302 
11303                 bfp = (uint8_t *)kmem_zalloc(ssize, KM_SLEEP);
11304 
11305                 /* Save the section of flash we're not updating to buffer */
11306                 tmp = bfp;
11307                 for (cnt = sStartAddr; cnt < ssize + sStartAddr; cnt++) {
11308                         /* Allow other system activity. */
11309                         if (cnt % 0x1000 == 0) {
11310                                 ql_delay(ha, 10000);
11311                         }
11312                         *tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
11313                 }
11314 
11315                 /* Chip Erase Command Sequence */
11316                 ql_write_flash_byte(ha, 0x5555, 0xaa);
11317                 ql_write_flash_byte(ha, 0x2aaa, 0x55);
11318                 ql_write_flash_byte(ha, 0x5555, 0x80);
11319                 ql_write_flash_byte(ha, 0x5555, 0xaa);
11320                 ql_write_flash_byte(ha, 0x2aaa, 0x55);
11321                 ql_write_flash_byte(ha, 0x5555, 0x10);
11322 
11323                 ql_delay(ha, erase_delay);
11324 
11325                 /* Wait for erase to complete. */
11326                 rval = ql_poll_flash(ha, 0, 0x80);
11327 
11328                 if (rval == QL_SUCCESS) {
11329                         /* Restore the section we saved off */
11330                         tmp = bfp;
11331                         for (cnt = sStartAddr; cnt < ssize + sStartAddr;
11332                             cnt++) {
11333                                 /* Allow other system activity. */
11334                                 if (cnt % 0x1000 == 0) {
11335                                         ql_delay(ha, 10000);
11336                                 }
11337                                 rval = ql_program_flash_address(ha, cnt,
11338                                     *tmp++);
11339                                 if (rval != QL_SUCCESS) {
11340                                         break;
11341                                 }
11342                         }
11343                 }
11344                 kmem_free(bfp, ssize);
11345         } else {
11346                 /* Chip Erase Command Sequence */
11347                 ql_write_flash_byte(ha, 0x5555, 0xaa);
11348                 ql_write_flash_byte(ha, 0x2aaa, 0x55);
11349                 ql_write_flash_byte(ha, 0x5555, 0x80);
11350                 ql_write_flash_byte(ha, 0x5555, 0xaa);
11351                 ql_write_flash_byte(ha, 0x2aaa, 0x55);
11352                 ql_write_flash_byte(ha, 0x5555, 0x10);
11353 
11354                 ql_delay(ha, erase_delay);
11355 
11356                 /* Wait for erase to complete. */
11357                 rval = ql_poll_flash(ha, 0, 0x80);
11358         }
11359 
11360         if (rval != QL_SUCCESS) {
11361                 EL(ha, "failed=%xh\n", rval);
11362         } else {
11363                 /*EMPTY*/
11364                 QL_PRINT_3(ha, "done\n");
11365         }
11366         return (rval);
11367 }
11368 
11369 /*
11370  * ql_poll_flash
11371  *      Polls flash for completion.
11372  *
11373  * Input:
11374  *      ha = adapter state pointer.
11375  *      addr = flash byte address.
11376  *      data = data to be polled.
11377  *
11378  * Returns:
11379  *      ql local function return status code.
11380  *
11381  * Context:
11382  *      Kernel context.
11383  */
11384 int
11385 ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
11386 {
11387         uint8_t         flash_data;
11388         uint32_t        cnt;
11389         int             rval = QL_FUNCTION_FAILED;
11390 
11391         QL_PRINT_3(ha, "started\n");
11392 
11393         poll_data = (uint8_t)(poll_data & BIT_7);
11394 
11395         /* Wait for 30 seconds for command to finish. */
11396         for (cnt = 30000000; cnt; cnt--) {
11397                 flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
11398 
11399                 if ((flash_data & BIT_7) == poll_data) {
11400                         rval = QL_SUCCESS;
11401                         break;
11402                 }
11403                 if (flash_data & BIT_5 && cnt > 2) {
11404                         cnt = 2;
11405                 }
11406                 drv_usecwait(1);
11407         }
11408 
11409         if (rval != QL_SUCCESS) {
11410                 EL(ha, "failed=%xh\n", rval);
11411         } else {
11412                 /*EMPTY*/
11413                 QL_PRINT_3(ha, "done\n");
11414         }
11415         return (rval);
11416 }
11417 
11418 /*
11419  * ql_flash_enable
11420  *      Setup flash for reading/writing.
11421  *
11422  * Input:
11423  *      ha = adapter state pointer.
11424  *
11425  * Context:
11426  *      Kernel context.
11427  */
11428 void
11429 ql_flash_enable(ql_adapter_state_t *ha)
11430 {
11431         uint16_t        data;
11432 
11433         QL_PRINT_3(ha, "started\n");
11434 
11435         /* Enable Flash Read/Write. */
11436         if (CFG_IST(ha, CFG_SBUS_CARD)) {
11437                 data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11438                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11439                 data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
11440                 ddi_put16(ha->sbus_fpga_dev_handle,
11441                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11442                 /* Read reset command sequence */
11443                 ql_write_flash_byte(ha, 0xaaa, 0xaa);
11444                 ql_write_flash_byte(ha, 0x555, 0x55);
11445                 ql_write_flash_byte(ha, 0xaaa, 0x20);
11446                 ql_write_flash_byte(ha, 0x555, 0xf0);
11447         } else {
11448                 data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
11449                     ISP_FLASH_ENABLE);
11450                 WRT16_IO_REG(ha, ctrl_status, data);
11451 
11452                 /* Read/Reset Command Sequence */
11453                 ql_write_flash_byte(ha, 0x5555, 0xaa);
11454                 ql_write_flash_byte(ha, 0x2aaa, 0x55);
11455                 ql_write_flash_byte(ha, 0x5555, 0xf0);
11456         }
11457         (void) ql_read_flash_byte(ha, 0);
11458 
11459         QL_PRINT_3(ha, "done\n");
11460 }
11461 
11462 /*
11463  * ql_flash_disable
11464  *      Disable flash and allow RISC to run.
11465  *
11466  * Input:
11467  *      ha = adapter state pointer.
11468  *
11469  * Context:
11470  *      Kernel context.
11471  */
11472 void
11473 ql_flash_disable(ql_adapter_state_t *ha)
11474 {
11475         uint16_t        data;
11476 
11477         QL_PRINT_3(ha, "started\n");
11478 
11479         if (CFG_IST(ha, CFG_SBUS_CARD)) {
11480                 /*
11481                  * Lock the flash back up.
11482                  */
11483                 ql_write_flash_byte(ha, 0x555, 0x90);
11484                 ql_write_flash_byte(ha, 0x555, 0x0);
11485 
11486                 data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11487                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11488                 data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
11489                 ddi_put16(ha->sbus_fpga_dev_handle,
11490                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11491         } else {
11492                 data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
11493                     ~ISP_FLASH_ENABLE);
11494                 WRT16_IO_REG(ha, ctrl_status, data);
11495         }
11496 
11497         QL_PRINT_3(ha, "done\n");
11498 }
11499 
11500 /*
11501  * ql_write_flash_byte
11502  *      Write byte to flash.
11503  *
11504  * Input:
11505  *      ha = adapter state pointer.
11506  *      addr = flash byte address.
11507  *      data = data to be written.
11508  *
11509  * Context:
11510  *      Kernel context.
11511  */
11512 void
11513 ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
11514 {
11515         if (CFG_IST(ha, CFG_SBUS_CARD)) {
11516                 ddi_put16(ha->sbus_fpga_dev_handle,
11517                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11518                     LSW(addr));
11519                 ddi_put16(ha->sbus_fpga_dev_handle,
11520                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11521                     MSW(addr));
11522                 ddi_put16(ha->sbus_fpga_dev_handle,
11523                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
11524                     (uint16_t)data);
11525         } else {
11526                 uint16_t bank_select;
11527 
11528                 /* Setup bit 16 of flash address. */
11529                 bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
11530 
11531                 if (ha->device_id == 0x2322 || ha->device_id == 0x6322) {
11532                         bank_select = (uint16_t)(bank_select & ~0xf0);
11533                         bank_select = (uint16_t)(bank_select |
11534                             ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11535                         WRT16_IO_REG(ha, ctrl_status, bank_select);
11536                 } else {
11537                         if (addr & BIT_16 && !(bank_select &
11538                             ISP_FLASH_64K_BANK)) {
11539                                 bank_select = (uint16_t)(bank_select |
11540                                     ISP_FLASH_64K_BANK);
11541                                 WRT16_IO_REG(ha, ctrl_status, bank_select);
11542                         } else if (!(addr & BIT_16) && bank_select &
11543                             ISP_FLASH_64K_BANK) {
11544                                 bank_select = (uint16_t)(bank_select &
11545                                     ~ISP_FLASH_64K_BANK);
11546                                 WRT16_IO_REG(ha, ctrl_status, bank_select);
11547                         }
11548                 }
11549 
11550                 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11551                         WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
 
11573  */
11574 uint8_t
11575 ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
11576 {
11577         uint8_t data;
11578 
11579         if (CFG_IST(ha, CFG_SBUS_CARD)) {
11580                 ddi_put16(ha->sbus_fpga_dev_handle,
11581                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11582                     LSW(addr));
11583                 ddi_put16(ha->sbus_fpga_dev_handle,
11584                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11585                     MSW(addr));
11586                 data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
11587                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
11588         } else {
11589                 uint16_t        bank_select;
11590 
11591                 /* Setup bit 16 of flash address. */
11592                 bank_select = RD16_IO_REG(ha, ctrl_status);
11593                 if (ha->device_id == 0x2322 || ha->device_id == 0x6322) {
11594                         bank_select = (uint16_t)(bank_select & ~0xf0);
11595                         bank_select = (uint16_t)(bank_select |
11596                             ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11597                         WRT16_IO_REG(ha, ctrl_status, bank_select);
11598                 } else {
11599                         if (addr & BIT_16 &&
11600                             !(bank_select & ISP_FLASH_64K_BANK)) {
11601                                 bank_select = (uint16_t)(bank_select |
11602                                     ISP_FLASH_64K_BANK);
11603                                 WRT16_IO_REG(ha, ctrl_status, bank_select);
11604                         } else if (!(addr & BIT_16) &&
11605                             bank_select & ISP_FLASH_64K_BANK) {
11606                                 bank_select = (uint16_t)(bank_select &
11607                                     ~ISP_FLASH_64K_BANK);
11608                                 WRT16_IO_REG(ha, ctrl_status, bank_select);
11609                         }
11610                 }
11611 
11612                 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11613                         WRT16_IO_REG(ha, flash_address, addr);
 
11625  * ql_24xx_flash_id
11626  *      Get flash IDs.
11627  *
11628  * Input:
11629  *      ha:             adapter state pointer.
11630  *
11631  * Returns:
11632  *      ql local function return status code.
11633  *
11634  * Context:
11635  *      Kernel context.
11636  */
11637 int
11638 ql_24xx_flash_id(ql_adapter_state_t *vha)
11639 {
11640         int                     rval;
11641         uint32_t                fdata = 0;
11642         ql_adapter_state_t      *ha = vha->pha;
11643         ql_xioctl_t             *xp = ha->xioctl;
11644 
11645         QL_PRINT_3(ha, "started\n");
11646 
11647         rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
11648         if (CFG_IST(ha, CFG_CTRL_24XX)) {
11649                 if (rval != QL_SUCCESS || fdata == 0) {
11650                         fdata = 0;
11651                         rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x39F,
11652                             &fdata);
11653                 }
11654         } else {
11655                 fdata = 0;
11656                 rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
11657                     (CFG_IST(ha, CFG_CTRL_25XX) ? 0x49F : 0x39F), &fdata);
11658         }
11659 
11660         if (rval != QL_SUCCESS) {
11661                 EL(ha, "24xx read_flash failed=%xh\n", rval);
11662         } else if (fdata != 0) {
11663                 xp->fdesc.flash_manuf = LSB(LSW(fdata));
11664                 xp->fdesc.flash_id = MSB(LSW(fdata));
11665                 xp->fdesc.flash_len = LSB(MSW(fdata));
11666         } else {
11667                 xp->fdesc.flash_manuf = ATMEL_FLASH;
11668                 xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
11669                 xp->fdesc.flash_len = 0;
11670         }
11671 
11672         QL_PRINT_3(ha, "done\n");
11673 
11674         return (rval);
11675 }
11676 
11677 /*
11678  * ql_24xx_load_flash
11679  *      Loads flash.
11680  *
11681  * Input:
11682  *      ha = adapter state pointer.
11683  *      dp = data pointer.
11684  *      size = data length in bytes.
11685  *      faddr = 32bit word flash byte address.
11686  *
11687  * Returns:
11688  *      ql local function return status code.
11689  *
11690  * Context:
11691  *      Kernel context.
11692  */
11693 int
11694 ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
11695     uint32_t faddr)
11696 {
11697         int                     rval;
11698         uint32_t                cnt, rest_addr, fdata, wc;
11699         dma_mem_t               dmabuf = {0};
11700         ql_adapter_state_t      *ha = vha->pha;
11701         ql_xioctl_t             *xp = ha->xioctl;
11702 
11703         QL_PRINT_3(ha, "started, faddr=%xh, size=%xh\n",
11704             ha->instance, faddr, size);
11705 
11706         /* start address must be 32 bit word aligned */
11707         if ((faddr & 0x3) != 0) {
11708                 EL(ha, "incorrect buffer size alignment\n");
11709                 return (QL_FUNCTION_PARAMETER_ERROR);
11710         }
11711 
11712         /* Allocate DMA buffer */
11713         if (CFG_IST(ha, CFG_FLASH_DMA_SUPPORT)) {
11714                 if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
11715                     LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN)) !=
11716                     QL_SUCCESS) {
11717                         EL(ha, "dma alloc failed, rval=%xh\n", rval);
11718                         return (rval);
11719                 }
11720         }
11721 
11722         /* Enable flash write */
11723         if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
11724                 EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
11725                 ql_free_phys(ha, &dmabuf);
11726                 return (rval);
11727         }
11728 
11729         /* setup mask of address range within a sector */
11730         rest_addr = (xp->fdesc.block_size - 1) >> 2;
11731 
11732         faddr = faddr >> 2;       /* flash gets 32 bit words */
11733 
11734         /*
11735          * Write data to flash.
11736          */
11737         cnt = 0;
11738         size = (size + 3) >> 2;   /* Round up & convert to dwords */
11739 
11740         while (cnt < size) {
11741                 /* Beginning of a sector? */
11742                 if ((faddr & rest_addr) == 0) {
11743                         if (CFG_IST(ha, CFG_CTRL_82XX)) {
11744                                 fdata = ha->flash_data_addr | faddr;
11745                                 rval = ql_8021_rom_erase(ha, fdata);
11746                                 if (rval != QL_SUCCESS) {
11747                                         EL(ha, "8021 erase sector status="
11748                                             "%xh, start=%xh, end=%xh"
11749                                             "\n", rval, fdata,
11750                                             fdata + rest_addr);
11751                                         break;
11752                                 }
11753                         } else if (CFG_IST(ha, CFG_FLASH_ACC_SUPPORT)) {
11754                                 fdata = ha->flash_data_addr | faddr;
11755                                 rval = ql_flash_access(ha,
11756                                     FAC_ERASE_SECTOR, fdata, fdata +
11757                                     rest_addr, 0);
11758                                 if (rval != QL_SUCCESS) {
11759                                         EL(ha, "erase sector status="
11760                                             "%xh, start=%xh, end=%xh"
11761                                             "\n", rval, fdata,
11762                                             fdata + rest_addr);
11763                                         break;
11764                                 }
11765                         } else {
11766                                 fdata = (faddr & ~rest_addr) << 2;
11767                                 fdata = (fdata & 0xff00) |
11768                                     (fdata << 16 & 0xff0000) |
11769                                     (fdata >> 16 & 0xff);
11770 
11771                                 if (rest_addr == 0x1fff) {
11772                                         /* 32kb sector block erase */
11773                                         rval = ql_24xx_write_flash(ha,
11774                                             FLASH_CONF_ADDR | 0x0352,
11775                                             fdata);
11776                                 } else {
11777                                         /* 64kb sector block erase */
11778                                         rval = ql_24xx_write_flash(ha,
11779                                             FLASH_CONF_ADDR | 0x03d8,
11780                                             fdata);
11781                                 }
11782                                 if (rval != QL_SUCCESS) {
11783                                         EL(ha, "Unable to flash sector"
11784                                             ": address=%xh\n", faddr);
11785                                         break;
11786                                 }
11787                         }
11788                 }
11789 
11790                 /* Write data */
11791                 if (CFG_IST(ha, CFG_FLASH_DMA_SUPPORT) &&
11792                     ((faddr & 0x3f) == 0)) {
11793                         /*
11794                          * Limit write up to sector boundary.
11795                          */
11796                         wc = ((~faddr & (rest_addr>>1)) + 1);
11797 
11798                         if (size - cnt < wc) {
11799                                 wc = size - cnt;
11800                         }
11801 
11802                         ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
11803                             (uint8_t *)dmabuf.bp, wc<<2,
11804                             DDI_DEV_AUTOINCR);
11805 
11806                         rval = ql_wrt_risc_ram(ha, ha->flash_data_addr |
11807                             faddr, dmabuf.cookie.dmac_laddress, wc);
11808                         if (rval != QL_SUCCESS) {
11809                                 EL(ha, "unable to dma to flash "
11810                                     "address=%xh\n", faddr << 2);
11811                                 break;
 
11822                         rval = ql_24xx_write_flash(ha,
11823                             ha->flash_data_addr | faddr, fdata);
11824                         if (rval != QL_SUCCESS) {
11825                                 EL(ha, "Unable to program flash "
11826                                     "address=%xh data=%xh\n", faddr,
11827                                     *dp);
11828                                 break;
11829                         }
11830                         cnt++;
11831                         faddr++;
11832 
11833                         /* Allow other system activity. */
11834                         if (cnt % 0x1000 == 0) {
11835                                 ql_delay(ha, 10000);
11836                         }
11837                 }
11838         }
11839 
11840         ql_24xx_protect_flash(ha);
11841 
11842         if (CFG_IST(ha, CFG_FLASH_DMA_SUPPORT)) {
11843                 ql_free_phys(ha, &dmabuf);
11844         }
11845 
11846         if (rval != QL_SUCCESS) {
11847                 EL(ha, "failed=%xh\n", rval);
11848         } else {
11849                 /*EMPTY*/
11850                 QL_PRINT_3(ha, "done\n");
11851         }
11852         return (rval);
11853 }
11854 
11855 /*
11856  * ql_24xx_read_flash
11857  *      Reads a 32bit word from ISP24xx NVRAM/FLASH.
11858  *
11859  * Input:
11860  *      ha:     adapter state pointer.
11861  *      faddr:  NVRAM/FLASH address.
11862  *      bp:     data pointer.
11863  *
11864  * Returns:
11865  *      ql local function return status code.
11866  *
11867  * Context:
11868  *      Kernel context.
11869  */
11870 int
11871 ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
11872 {
11873         uint32_t                timer;
11874         int                     rval = QL_SUCCESS;
11875         ql_adapter_state_t      *ha = vha->pha;
11876 
11877         if (CFG_IST(ha, CFG_CTRL_82XX)) {
11878                 if ((rval = ql_8021_rom_read(ha, faddr, bp)) != QL_SUCCESS) {
11879                         EL(ha, "8021 access error\n");
11880                 }
11881                 return (rval);
11882         }
11883 
11884         /* Clear access error flag */
11885         WRT32_IO_REG(ha, ctrl_status,
11886             RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11887 
11888         WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
11889 
11890         /* Wait for READ cycle to complete. */
11891         for (timer = 300000; timer; timer--) {
11892                 if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
11893                         break;
11894                 }
11895                 drv_usecwait(10);
11896         }
11897 
 
11913  *      Writes a 32bit word to ISP24xx NVRAM/FLASH.
11914  *
11915  * Input:
11916  *      ha:     adapter state pointer.
11917  *      addr:   NVRAM/FLASH address.
11918  *      value:  data.
11919  *
11920  * Returns:
11921  *      ql local function return status code.
11922  *
11923  * Context:
11924  *      Kernel context.
11925  */
11926 int
11927 ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11928 {
11929         uint32_t                timer, fdata;
11930         int                     rval = QL_SUCCESS;
11931         ql_adapter_state_t      *ha = vha->pha;
11932 
11933         if (CFG_IST(ha, CFG_CTRL_82XX)) {
11934                 if ((rval = ql_8021_rom_write(ha, addr, data)) != QL_SUCCESS) {
11935                         EL(ha, "8021 access error\n");
11936                 }
11937                 return (rval);
11938         }
11939         /* Clear access error flag */
11940         WRT32_IO_REG(ha, ctrl_status,
11941             RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11942 
11943         WRT32_IO_REG(ha, flash_data, data);
11944         RD32_IO_REG(ha, flash_data);            /* PCI Posting. */
11945         WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11946 
11947         /* Wait for Write cycle to complete. */
11948         for (timer = 3000000; timer; timer--) {
11949                 if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11950                         /* Check flash write in progress. */
11951                         if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11952                                 (void) ql_24xx_read_flash(ha,
11953                                     FLASH_CONF_ADDR | 0x105, &fdata);
11954                                 if (!(fdata & BIT_0)) {
11955                                         break;
11956                                 }
11957                         } else {
11958                                 break;
11959                         }
11960                 }
11961                 drv_usecwait(10);
11962         }
11963         if (timer == 0) {
11964                 EL(ha, "failed, timeout\n");
11965                 rval = QL_FUNCTION_TIMEOUT;
11966         } else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11967                 EL(ha, "access error\n");
11968                 rval = QL_FUNCTION_FAILED;
11969         }
11970 
11971         return (rval);
11972 }
11973 /*
11974  * ql_24xx_unprotect_flash
11975  *      Enable writes
11976  *
11977  * Input:
11978  *      ha:     adapter state pointer.
11979  *
11980  * Returns:
11981  *      ql local function return status code.
11982  *
11983  * Context:
11984  *      Kernel context.
11985  */
11986 int
11987 ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11988 {
11989         int                     rval;
11990         uint32_t                fdata, timer;
11991         ql_adapter_state_t      *ha = vha->pha;
11992         ql_xioctl_t             *xp = ha->xioctl;
11993 
11994         QL_PRINT_3(ha, "started\n");
11995 
11996         if (CFG_IST(ha, CFG_CTRL_82XX)) {
11997                 (void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11998                 rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11999                 if (rval != QL_SUCCESS) {
12000                         EL(ha, "8021 access error\n");
12001                 }
12002                 return (rval);
12003         }
12004         if (CFG_IST(ha, CFG_FLASH_ACC_SUPPORT)) {
12005                 if (ha->task_daemon_flags & FIRMWARE_UP) {
12006                         for (timer = 3000; timer; timer--) {
12007                                 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
12008                                         EL(ha, "ISP_ABORT_NEEDED done\n");
12009                                         return (QL_ABORTED);
12010                                 }
12011                                 rval = ql_flash_access(ha, FAC_SEMA_LOCK,
12012                                     0, 0, NULL);
12013                                 if (rval == QL_SUCCESS ||
12014                                     rval == QL_FUNCTION_TIMEOUT) {
12015                                         EL(ha, "lock status=%xh\n", rval);
12016                                         break;
12017                                 }
12018                                 delay(1);
12019                         }
12020 
12021                         if (rval == QL_SUCCESS &&
12022                             (rval = ql_flash_access(ha, FAC_WRT_ENABLE, 0,
12023                             0, NULL)) != QL_SUCCESS) {
12024                                 EL(ha, "WRT_ENABLE status=%xh\n", rval);
12025                                 (void) ql_flash_access(ha, FAC_SEMA_UNLOCK,
12026                                     0, 0, NULL);
12027                         }
12028                 } else {
12029                         rval = QL_SUCCESS;
12030                 }
12031                 QL_PRINT_3(ha, "CFG_FLASH_ACC_SUPPORT done\n");
12032                 return (rval);
12033         } else {
12034                 /* Enable flash write. */
12035                 WRT32_IO_REG(ha, ctrl_status,
12036                     RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
12037                 RD32_IO_REG(ha, ctrl_status);   /* PCI Posting. */
12038         }
12039 
12040         /* Sector/Block Protection Register Lock (SST, ST, ATMEL). */
12041         (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
12042             xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
12043 
12044         /*
12045          * Remove block write protection (SST and ST)
12046          * Global unprotect sectors (ATMEL).
12047          */
12048         (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
12049             xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
12050 
12051         if (xp->fdesc.unprotect_sector_cmd != 0) {
12052                 for (fdata = 0; fdata < 0x10; fdata++) {
12053                         (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
12054                             0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
12055                 }
12056 
12057                 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
12058                     xp->fdesc.unprotect_sector_cmd, 0x00400f);
12059                 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
12060                     xp->fdesc.unprotect_sector_cmd, 0x00600f);
12061                 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
12062                     xp->fdesc.unprotect_sector_cmd, 0x00800f);
12063         }
12064 
12065         QL_PRINT_3(ha, "done\n");
12066 
12067         return (QL_SUCCESS);
12068 }
12069 
12070 /*
12071  * ql_24xx_protect_flash
12072  *      Disable writes
12073  *
12074  * Input:
12075  *      ha:     adapter state pointer.
12076  *
12077  * Context:
12078  *      Kernel context.
12079  */
12080 void
12081 ql_24xx_protect_flash(ql_adapter_state_t *vha)
12082 {
12083         int                     rval;
12084         uint32_t                fdata, timer;
12085         ql_adapter_state_t      *ha = vha->pha;
12086         ql_xioctl_t             *xp = ha->xioctl;
12087 
12088         QL_PRINT_3(ha, "started\n");
12089 
12090         if (CFG_IST(ha, CFG_CTRL_82XX)) {
12091                 (void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
12092                 rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_disable_bits);
12093                 if (rval != QL_SUCCESS) {
12094                         EL(ha, "8021 access error\n");
12095                 }
12096                 return;
12097         }
12098         if (CFG_IST(ha, CFG_FLASH_ACC_SUPPORT)) {
12099                 if (ha->task_daemon_flags & FIRMWARE_UP) {
12100                         for (timer = 3000; timer; timer--) {
12101                                 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
12102                                         EL(ha, "ISP_ABORT_NEEDED done\n");
12103                                         return;
12104                                 }
12105                                 rval = ql_flash_access(ha, FAC_SEMA_LOCK,
12106                                     0, 0, NULL);
12107                                 if (rval == QL_SUCCESS ||
12108                                     rval == QL_FUNCTION_TIMEOUT) {
12109                                         if (rval != QL_SUCCESS) {
12110                                                 EL(ha, "lock status=%xh\n",
12111                                                     rval);
12112                                         }
12113                                         break;
12114                                 }
12115                                 delay(1);
12116                         }
12117 
12118                         if (rval == QL_SUCCESS &&
12119                             (rval = ql_flash_access(ha, FAC_WRT_PROTECT, 0,
12120                             0, NULL)) != QL_SUCCESS) {
12121                                 EL(ha, "protect status=%xh\n", rval);
12122                                 (void) ql_flash_access(ha, FAC_SEMA_UNLOCK, 0,
12123                                     0, NULL);
12124                         }
12125                         QL_PRINT_3(ha, "CFG_FLASH_ACC_SUPPORT done\n");
12126                         return;
12127                 }
12128         } else {
12129                 /* Enable flash write. */
12130                 WRT32_IO_REG(ha, ctrl_status,
12131                     RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
12132                 RD32_IO_REG(ha, ctrl_status);   /* PCI Posting. */
12133         }
12134 
12135         /*
12136          * Protect sectors.
12137          * Set block write protection (SST and ST) and
12138          * Sector/Block Protection Register Lock (SST, ST, ATMEL).
12139          */
12140         if (xp->fdesc.protect_sector_cmd != 0) {
12141                 for (fdata = 0; fdata < 0x10; fdata++) {
12142                         (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
12143                             0x300 | xp->fdesc.protect_sector_cmd, fdata);
12144                 }
12145                 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
12146                     xp->fdesc.protect_sector_cmd, 0x00400f);
12147                 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
12148                     xp->fdesc.protect_sector_cmd, 0x00600f);
12149                 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
12150                     xp->fdesc.protect_sector_cmd, 0x00800f);
12151         }
12152 
12153         /* Remove Sector Protection Registers Locked (SPRL) bit. */
12154         (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
12155             xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
12156 
12157         (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
12158             xp->fdesc.write_statusreg_cmd, xp->fdesc.write_disable_bits);
12159 
12160         /* Disable flash write. */
12161         if (!CFG_IST(ha, CFG_FLASH_ACC_SUPPORT)) {
12162                 WRT32_IO_REG(ha, ctrl_status,
12163                     RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
12164                 RD32_IO_REG(ha, ctrl_status);   /* PCI Posting. */
12165         }
12166 
12167         QL_PRINT_3(ha, "done\n");
12168 }
12169 
12170 /*
12171  * ql_dump_firmware
12172  *      Save RISC code state information.
12173  *
12174  * Input:
12175  *      ha = adapter state pointer.
12176  *
12177  * Returns:
12178  *      QL local function return status code.
12179  *
12180  * Context:
12181  *      Kernel context.
12182  */
12183 int
12184 ql_dump_firmware(ql_adapter_state_t *vha)
12185 {
12186         int                     rval;
12187         clock_t                 timer = drv_usectohz(30000000);
12188         ql_adapter_state_t      *ha = vha->pha;
12189 
12190         QL_PRINT_3(ha, "started\n");
12191 
12192         QL_DUMP_LOCK(ha);
12193 
12194         if (ha->ql_dump_state & QL_DUMPING ||
12195             (ha->ql_dump_state & QL_DUMP_VALID &&
12196             !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
12197                 QL_PRINT_3(ha, "done\n");
12198                 QL_DUMP_UNLOCK(ha);
12199                 return (QL_SUCCESS);
12200         }
12201 
12202         QL_DUMP_UNLOCK(ha);
12203 
12204         (void) ql_stall_driver(ha, 0);
12205 
12206         /* Dump firmware. */
12207         if (CFG_IST(ha, CFG_CTRL_82XX)) {
12208                 rval = ql_binary_fw_dump(ha, FALSE);
12209         } else {
12210                 rval = ql_binary_fw_dump(ha, TRUE);
12211         }
12212 
12213         /* Do abort to force restart. */
12214         ql_restart_driver(ha);
12215         ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
12216         EL(ha, "restarting, isp_abort_needed\n");
12217 
12218         /* Acquire task daemon lock. */
12219         TASK_DAEMON_LOCK(ha);
12220 
12221         /* Wait for suspension to end. */
12222         while (DRIVER_SUSPENDED(ha)) {
12223                 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
12224 
12225                 /* 30 seconds from now */
12226                 if (cv_reltimedwait(&ha->cv_dr_suspended,
12227                     &ha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
12228                         /*
12229                          * The timeout time 'timer' was
12230                          * reached without the condition
12231                          * being signaled.
12232                          */
12233                         break;
12234                 }
12235         }
12236 
12237         /* Release task daemon lock. */
12238         TASK_DAEMON_UNLOCK(ha);
12239 
12240         if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
12241                 /*EMPTY*/
12242                 QL_PRINT_3(ha, "done\n");
12243         } else {
12244                 EL(ha, "failed, rval = %xh\n", rval);
12245         }
12246         return (rval);
12247 }
12248 
12249 /*
12250  * ql_binary_fw_dump
12251  *      Dumps binary data from firmware.
12252  *
12253  * Input:
12254  *      ha = adapter state pointer.
12255  *      lock_needed = mailbox lock needed.
12256  *
12257  * Returns:
12258  *      ql local function return status code.
12259  *
12260  * Context:
12261  *      Interrupt or Kernel context, no mailbox commands allowed.
12262  */
12263 int
12264 ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
12265 {
12266         uint32_t                cnt, index;
12267         clock_t                 timer;
12268         int                     rval = QL_SUCCESS;
12269         ql_adapter_state_t      *ha = vha->pha;
12270 
12271         QL_PRINT_3(ha, "started\n");
12272 
12273         ADAPTER_STATE_LOCK(ha);
12274         ha->flags &= ~FW_DUMP_NEEDED;
12275         ADAPTER_STATE_UNLOCK(ha);
12276 
12277         if (CFG_IST(ha, CFG_CTRL_82XX) && ha->md_capture_size == 0) {
12278                 EL(ha, "8021 not supported\n");
12279                 return (QL_NOT_SUPPORTED);
12280         }
12281 
12282         QL_DUMP_LOCK(ha);
12283 
12284         if (ha->ql_dump_state & QL_DUMPING ||
12285             (ha->ql_dump_state & QL_DUMP_VALID &&
12286             !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
12287                 EL(ha, "dump already done, qds=%x\n", ha->ql_dump_state);
12288                 QL_DUMP_UNLOCK(ha);
12289                 return (QL_DATA_EXISTS);
12290         }
12291 
12292         ha->ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
12293         ha->ql_dump_state |= QL_DUMPING;
12294 
12295         QL_DUMP_UNLOCK(ha);
12296 
12297         if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE)) {
12298                 /* Insert Time Stamp */
12299                 rval = ql_fw_etrace(ha, &ha->fwexttracebuf,
12300                     FTO_INSERT_TIME_STAMP, NULL);
12301                 if (rval != QL_SUCCESS) {
12302                         EL(ha, "f/w extended trace insert"
12303                             "time stamp failed: %xh\n", rval);
12304                 }
12305         }
12306 
12307         if (lock_needed == TRUE) {
12308                 /* Acquire mailbox register lock. */
12309                 MBX_REGISTER_LOCK(ha);
12310                 timer = ((MAILBOX_TOV + 6) * drv_usectohz(1000000));
12311 
12312                 /* Check for mailbox available, if not wait for signal. */
12313                 while (ha->mailbox_flags & MBX_BUSY_FLG) {
12314                         ha->mailbox_flags = (uint8_t)
12315                             (ha->mailbox_flags | MBX_WANT_FLG);
12316 
12317                         /* 30 seconds from now */
12318                         if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
12319                             timer, TR_CLOCK_TICK) == -1) {
12320                                 /*
12321                                  * The timeout time 'timer' was
12322                                  * reached without the condition
12323                                  * being signaled.
12324                                  */
12325 
12326                                 /* Release mailbox register lock. */
12327                                 MBX_REGISTER_UNLOCK(ha);
12328 
12329                                 EL(ha, "failed, rval = %xh\n",
12330                                     QL_FUNCTION_TIMEOUT);
12331                                 return (QL_FUNCTION_TIMEOUT);
12332                         }
12333                 }
12334 
12335                 /* Set busy flag. */
12336                 ha->mailbox_flags = (uint8_t)
12337                     (ha->mailbox_flags | MBX_BUSY_FLG);
12338 
12339                 /* Release mailbox register lock. */
12340                 MBX_REGISTER_UNLOCK(ha);
12341         }
12342 
12343         /* Free previous dump buffer. */
12344         if (ha->ql_dump_ptr != NULL) {
12345                 kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
12346                 ha->ql_dump_ptr = NULL;
12347         }
12348 
12349         if (CFG_IST(ha, CFG_CTRL_24XX)) {
12350                 ha->ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
12351                     ha->fw_ext_memory_size);
12352         } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
12353                 cnt = ha->rsp_queues_cnt > 1 ? ha->req_q[0]->req_ring.size +
12354                     ha->req_q[1]->req_ring.size : ha->req_q[0]->req_ring.size;
12355                 index = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
12356 
12357                 ha->ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
12358                     cnt + index + ha->fw_ext_memory_size +
12359                     (ha->rsp_queues_cnt * 16));
12360 
12361         } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
12362                 cnt = ha->rsp_queues_cnt > 1 ? ha->req_q[0]->req_ring.size +
12363                     ha->req_q[1]->req_ring.size : ha->req_q[0]->req_ring.size;
12364                 index = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
12365 
12366                 ha->ql_dump_size = (uint32_t)(sizeof (ql_81xx_fw_dump_t) +
12367                     cnt + index + ha->fw_ext_memory_size +
12368                     (ha->rsp_queues_cnt * 16));
12369 
12370         } else if (CFG_IST(ha, CFG_CTRL_83XX)) {
12371                 cnt = ha->rsp_queues_cnt > 1 ? ha->req_q[0]->req_ring.size +
12372                     ha->req_q[1]->req_ring.size : ha->req_q[0]->req_ring.size;
12373                 index = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
12374 
12375                 ha->ql_dump_size = (uint32_t)(sizeof (ql_83xx_fw_dump_t) +
12376                     cnt + index + ha->fw_ext_memory_size +
12377                     (ha->rsp_queues_cnt * 16));
12378         } else if (CFG_IST(ha, CFG_CTRL_82XX)) {
12379                 ha->ql_dump_size = ha->md_capture_size;
12380         } else {
12381                 ha->ql_dump_size = sizeof (ql_fw_dump_t);
12382         }
12383 
12384         if (CFG_IST(ha, CFG_CTRL_27XX)) {
12385                 rval = ql_27xx_binary_fw_dump(ha);
12386         } else {
12387                 if ((ha->ql_dump_ptr =
12388                     kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) == NULL) {
12389                         rval = QL_MEMORY_ALLOC_FAILED;
12390                 } else {
12391                         if (CFG_IST(ha, CFG_CTRL_2363)) {
12392                                 rval = ql_2300_binary_fw_dump(ha,
12393                                     ha->ql_dump_ptr);
12394                         } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
12395                                 rval = ql_81xx_binary_fw_dump(ha,
12396                                     ha->ql_dump_ptr);
12397                         } else if (CFG_IST(ha, CFG_CTRL_83XX)) {
12398                                 rval = ql_83xx_binary_fw_dump(ha,
12399                                     ha->ql_dump_ptr);
12400                         } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
12401                                 rval = ql_25xx_binary_fw_dump(ha,
12402                                     ha->ql_dump_ptr);
12403                         } else if (CFG_IST(ha, CFG_CTRL_24XX)) {
12404                                 rval = ql_24xx_binary_fw_dump(ha,
12405                                     ha->ql_dump_ptr);
12406                         } else if (CFG_IST(ha, CFG_CTRL_82XX)) {
12407                                 (void) ql_8021_reset_fw(ha);
12408                                 rval = QL_SUCCESS;
12409                         } else {
12410                                 rval = ql_2200_binary_fw_dump(ha,
12411                                     ha->ql_dump_ptr);
12412                         }
12413                 }
12414         }
12415 
12416         /* Reset ISP chip. */
12417         ql_reset_chip(ha);
12418 
12419         QL_DUMP_LOCK(ha);
12420 
12421         if (rval != QL_SUCCESS) {
12422                 if (ha->ql_dump_ptr != NULL) {
12423                         kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
12424                         ha->ql_dump_ptr = NULL;
12425                 }
12426                 ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
12427                     QL_DUMP_UPLOADED);
12428                 EL(ha, "failed, rval = %xh\n", rval);
12429         } else {
12430                 ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
12431                 ha->ql_dump_state |= QL_DUMP_VALID;
12432                 EL(ha, "done\n");
12433         }
12434 
 
12443  *
12444  * Input:
12445  *      ha = adapter state pointer.
12446  *      bptr = buffer pointer.
12447  *
12448  * Returns:
12449  *      Amount of data buffer used.
12450  *
12451  * Context:
12452  *      Kernel context.
12453  */
12454 size_t
12455 ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
12456 {
12457         uint32_t                cnt;
12458         caddr_t                 bp;
12459         int                     mbox_cnt;
12460         ql_adapter_state_t      *ha = vha->pha;
12461         ql_fw_dump_t            *fw = ha->ql_dump_ptr;
12462 
12463         if (CFG_IST(ha, CFG_CTRL_24XX)) {
12464                 return (ql_24xx_ascii_fw_dump(ha, bufp));
12465         } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
12466                 return (ql_25xx_ascii_fw_dump(ha, bufp));
12467         } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
12468                 return (ql_81xx_ascii_fw_dump(ha, bufp));
12469         } else if (CFG_IST(ha, CFG_CTRL_82XX)) {
12470                 return (ql_8021_ascii_fw_dump(ha, bufp));
12471         } else if (CFG_IST(ha, CFG_CTRL_83XX)) {
12472                 return (ql_83xx_ascii_fw_dump(ha, bufp));
12473         } else if (CFG_IST(ha, CFG_CTRL_27XX)) {
12474                 return (ql_27xx_ascii_fw_dump(ha, bufp));
12475         }
12476 
12477         QL_PRINT_3(ha, "started\n");
12478 
12479         if (CFG_IST(ha, CFG_CTRL_23XX)) {
12480                 (void) sprintf(bufp, "\nISP 2300IP ");
12481         } else if (CFG_IST(ha, CFG_CTRL_63XX)) {
12482                 (void) sprintf(bufp, "\nISP 2322/6322FLX ");
12483         } else {
12484                 (void) sprintf(bufp, "\nISP 2200IP ");
12485         }
12486 
12487         bp = bufp + strlen(bufp);
12488         (void) sprintf(bp, "Firmware Version %d.%d.%d\n",
12489             ha->fw_major_version, ha->fw_minor_version,
12490             ha->fw_subminor_version);
12491 
12492         (void) strcat(bufp, "\nPBIU Registers:");
12493         bp = bufp + strlen(bufp);
12494         for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
12495                 if (cnt % 8 == 0) {
12496                         *bp++ = '\n';
12497                 }
12498                 (void) sprintf(bp, "%04x  ", fw->pbiu_reg[cnt]);
12499                 bp = bp + 6;
12500         }
12501 
12502         if (CFG_IST(ha, CFG_CTRL_2363)) {
12503                 (void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
12504                     "registers:");
12505                 bp = bufp + strlen(bufp);
12506                 for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
12507                         if (cnt % 8 == 0) {
12508                                 *bp++ = '\n';
12509                         }
12510                         (void) sprintf(bp, "%04x  ", fw->risc_host_reg[cnt]);
12511                         bp = bp + 6;
12512                 }
12513         }
12514 
12515         (void) strcat(bp, "\n\nMailbox Registers:");
12516         bp = bufp + strlen(bufp);
12517         mbox_cnt = CFG_IST(ha, CFG_CTRL_2363) ? 16 : 8;
12518         for (cnt = 0; cnt < mbox_cnt; cnt++) {
12519                 if (cnt % 8 == 0) {
12520                         *bp++ = '\n';
12521                 }
12522                 (void) sprintf(bp, "%04x  ", fw->mailbox_reg[cnt]);
12523                 bp = bp + 6;
12524         }
12525 
12526         if (CFG_IST(ha, CFG_CTRL_2363)) {
12527                 (void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
12528                 bp = bufp + strlen(bufp);
12529                 for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
12530                         if (cnt % 8 == 0) {
12531                                 *bp++ = '\n';
12532                         }
12533                         (void) sprintf(bp, "%04x  ", fw->resp_dma_reg[cnt]);
12534                         bp = bp + 6;
12535                 }
12536         }
12537 
12538         (void) strcat(bp, "\n\nDMA Registers:");
12539         bp = bufp + strlen(bufp);
12540         for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
12541                 if (cnt % 8 == 0) {
12542                         *bp++ = '\n';
12543                 }
12544                 (void) sprintf(bp, "%04x  ", fw->dma_reg[cnt]);
12545                 bp = bp + 6;
12546         }
 
12621                 if (cnt % 8 == 0) {
12622                         *bp++ = '\n';
12623                 }
12624                 (void) sprintf(bp, "%04x  ", fw->risc_gp6_reg[cnt]);
12625                 bp = bp + 6;
12626         }
12627 
12628         (void) strcat(bp, "\n\nRISC GP7 Registers:");
12629         bp = bufp + strlen(bufp);
12630         for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
12631                 if (cnt % 8 == 0) {
12632                         *bp++ = '\n';
12633                 }
12634                 (void) sprintf(bp, "%04x  ", fw->risc_gp7_reg[cnt]);
12635                 bp = bp + 6;
12636         }
12637 
12638         (void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
12639         bp = bufp + strlen(bufp);
12640         for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
12641                 if (cnt == 16 && !CFG_IST(ha, CFG_CTRL_2363)) {
12642                         break;
12643                 }
12644                 if (cnt % 8 == 0) {
12645                         *bp++ = '\n';
12646                 }
12647                 (void) sprintf(bp, "%04x  ", fw->frame_buf_hdw_reg[cnt]);
12648                 bp = bp + 6;
12649         }
12650 
12651         (void) strcat(bp, "\n\nFPM B0 Registers:");
12652         bp = bufp + strlen(bufp);
12653         for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
12654                 if (cnt % 8 == 0) {
12655                         *bp++ = '\n';
12656                 }
12657                 (void) sprintf(bp, "%04x  ", fw->fpm_b0_reg[cnt]);
12658                 bp = bp + 6;
12659         }
12660 
12661         (void) strcat(bp, "\n\nFPM B1 Registers:");
12662         bp = bufp + strlen(bufp);
12663         for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
12664                 if (cnt % 8 == 0) {
12665                         *bp++ = '\n';
12666                 }
12667                 (void) sprintf(bp, "%04x  ", fw->fpm_b1_reg[cnt]);
12668                 bp = bp + 6;
12669         }
12670 
12671         if (CFG_IST(ha, CFG_CTRL_2363)) {
12672                 (void) strcat(bp, "\n\nCode RAM Dump:");
12673                 bp = bufp + strlen(bufp);
12674                 for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
12675                         if (cnt % 8 == 0) {
12676                                 (void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
12677                                 bp = bp + 8;
12678                         }
12679                         (void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
12680                         bp = bp + 6;
12681                 }
12682 
12683                 (void) strcat(bp, "\n\nStack RAM Dump:");
12684                 bp = bufp + strlen(bufp);
12685                 for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
12686                         if (cnt % 8 == 0) {
12687                                 (void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
12688                                 bp = bp + 8;
12689                         }
12690                         (void) sprintf(bp, "%04x  ", fw->stack_ram[cnt]);
12691                         bp = bp + 6;
 
12724                         (void) sprintf(bp, "\n%08x: ", cnt);
12725                         bp += strlen(bp);
12726                 }
12727                 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12728                 bp += strlen(bp);
12729         }
12730 
12731         (void) sprintf(bp, "\n\nResponse Queue");
12732         bp += strlen(bp);
12733         for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12734                 if (cnt % 8 == 0) {
12735                         (void) sprintf(bp, "\n%08x: ", cnt);
12736                         bp += strlen(bp);
12737                 }
12738                 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12739                 bp += strlen(bp);
12740         }
12741 
12742         (void) sprintf(bp, "\n");
12743 
12744         QL_PRINT_10(ha, "done, size=0x%x\n", strlen(bufp));
12745 
12746         return (strlen(bufp));
12747 }
12748 
12749 /*
12750  * ql_24xx_ascii_fw_dump
12751  *      Converts ISP24xx firmware binary dump to ascii.
12752  *
12753  * Input:
12754  *      ha = adapter state pointer.
12755  *      bptr = buffer pointer.
12756  *
12757  * Returns:
12758  *      Amount of data buffer used.
12759  *
12760  * Context:
12761  *      Kernel context.
12762  */
12763 static size_t
12764 ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12765 {
12766         uint32_t                cnt;
12767         caddr_t                 bp = bufp;
12768         ql_24xx_fw_dump_t       *fw = ha->ql_dump_ptr;
12769 
12770         QL_PRINT_3(ha, "started\n");
12771 
12772         (void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
12773             ha->fw_major_version, ha->fw_minor_version,
12774             ha->fw_subminor_version, ha->fw_attributes);
12775         bp += strlen(bp);
12776 
12777         (void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
12778 
12779         (void) strcat(bp, "\nHost Interface Registers");
12780         bp += strlen(bp);
12781         for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12782                 if (cnt % 8 == 0) {
12783                         (void) sprintf(bp++, "\n");
12784                 }
12785 
12786                 (void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12787                 bp += 9;
12788         }
12789 
12790         (void) sprintf(bp, "\n\nMailbox Registers");
 
13138                 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
13139                 bp += strlen(bp);
13140                 /* show data address as a byte address, data as long words */
13141                 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13142                         cnt_b = cnt * 4;
13143                         if (cnt_b % 32 == 0) {
13144                                 (void) sprintf(bp, "\n%08x: ",
13145                                     (int)(w64 + cnt_b));
13146                                 bp += 11;
13147                         }
13148                         (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
13149                         bp += 9;
13150                 }
13151         }
13152 
13153         (void) sprintf(bp, "\n\n");
13154         bp += strlen(bp);
13155 
13156         cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
13157 
13158         QL_PRINT_10(ha, "done=%xh\n", cnt);
13159 
13160         return (cnt);
13161 }
13162 
13163 /*
13164  * ql_25xx_ascii_fw_dump
13165  *      Converts ISP25xx firmware binary dump to ascii.
13166  *
13167  * Input:
13168  *      ha = adapter state pointer.
13169  *      bptr = buffer pointer.
13170  *
13171  * Returns:
13172  *      Amount of data buffer used.
13173  *
13174  * Context:
13175  *      Kernel context.
13176  */
13177 static size_t
13178 ql_25xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
13179 {
13180         uint32_t                cnt, cnt1, *dp, *dp2;
13181         caddr_t                 bp = bufp;
13182         ql_25xx_fw_dump_t       *fw = ha->ql_dump_ptr;
13183 
13184         QL_PRINT_3(ha, "started\n");
13185 
13186         (void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
13187             ha->fw_major_version, ha->fw_minor_version,
13188             ha->fw_subminor_version, ha->fw_attributes);
13189         bp += strlen(bp);
13190 
13191         (void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
13192         bp += strlen(bp);
13193 
13194         (void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
13195         bp += strlen(bp);
13196 
13197         (void) sprintf(bp, "\nAER Uncorrectable Error Status Register\n%08x\n",
13198             fw->aer_ues);
13199         bp += strlen(bp);
13200 
13201         (void) sprintf(bp, "\nHostRisc Registers");
13202         bp += strlen(bp);
13203         for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
13204                 if (cnt % 8 == 0) {
13205                         (void) sprintf(bp++, "\n");
13206                 }
13207                 (void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
13208                 bp += 9;
13209         }
13210 
13211         (void) sprintf(bp, "\n\nPCIe Registers");
13212         bp += strlen(bp);
13213         for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
13214                 if (cnt % 8 == 0) {
13215                         (void) sprintf(bp++, "\n");
13216                 }
13217                 (void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
13218                 bp += 9;
13219         }
13220 
 
13487         for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
13488                 if (cnt % 8 == 0) {
13489                         (void) sprintf(bp++, "\n");
13490                 }
13491                 (void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
13492                 bp += 9;
13493         }
13494 
13495         (void) sprintf(bp, "\n\nLMC Registers");
13496         bp += strlen(bp);
13497         for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
13498                 if (cnt % 8 == 0) {
13499                         (void) sprintf(bp++, "\n");
13500                 }
13501                 (void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
13502                 bp += 9;
13503         }
13504 
13505         (void) sprintf(bp, "\n\nFPM Hardware Registers");
13506         bp += strlen(bp);
13507         cnt1 = sizeof (fw->fpm_hdw_reg);
13508         for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13509                 if (cnt % 8 == 0) {
13510                         (void) sprintf(bp++, "\n");
13511                 }
13512                 (void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
13513                 bp += 9;
13514         }
13515 
13516         (void) sprintf(bp, "\n\nFB Hardware Registers");
13517         bp += strlen(bp);
13518         cnt1 = sizeof (fw->fb_hdw_reg);
13519         for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13520                 if (cnt % 8 == 0) {
13521                         (void) sprintf(bp++, "\n");
13522                 }
13523                 (void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
13524                 bp += 9;
13525         }
13526 
13527         (void) sprintf(bp, "\n\nCode RAM");
13528         bp += strlen(bp);
13529         for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
13530                 if (cnt % 8 == 0) {
13531                         (void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
13532                         bp += 11;
13533                 }
13534                 (void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
13535                 bp += 9;
13536         }
13537 
13538         (void) sprintf(bp, "\n\nExternal Memory");
13539         bp += strlen(bp);
13540         dp = (uint32_t *)((caddr_t)fw->req_rsp_ext_mem + fw->req_q_size[0] +
13541             fw->req_q_size[1] + fw->rsp_q_size + (ha->rsp_queues_cnt * 16));
13542         for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
13543                 if (cnt % 8 == 0) {
13544                         (void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
13545                         bp += 11;
13546                 }
13547                 (void) sprintf(bp, "%08x ", *dp++);
13548                 bp += 9;
13549         }
13550 
13551         (void) sprintf(bp, "\n[<==END] ISP Debug Dump");
13552         bp += strlen(bp);
13553 
13554         dp = fw->req_rsp_ext_mem + (ha->rsp_queues_cnt * 4);
13555         for (cnt = 0; cnt < 2 && fw->req_q_size[cnt]; cnt++) {
13556                 dp2 = dp;
13557                 for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
13558                         if (*dp2++) {
13559                                 break;
13560                         }
13561                 }
13562                 if (cnt1 == fw->req_q_size[cnt] / 4) {
13563                         dp = dp2;
13564                         continue;
13565                 }
13566                 (void) sprintf(bp, "\n\nRequest Queue\nQueue %d:", cnt);
13567                 bp += strlen(bp);
13568                 for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
13569                         if (cnt1 % 8 == 0) {
13570                                 (void) sprintf(bp, "\n%08x: ", cnt1);
13571                                 bp += strlen(bp);
13572                         }
13573                         (void) sprintf(bp, "%08x ", *dp++);
13574                         bp += strlen(bp);
13575                 }
13576         }
13577 
13578         for (cnt = 0; cnt < ha->rsp_queues_cnt && cnt < 16; cnt++) {
13579                 dp2 = dp;
13580                 for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
13581                     cnt1++) {
13582                         if (*dp2++) {
13583                                 break;
13584                         }
13585                 }
13586                 if (cnt1 == ha->rsp_queues[cnt]->rsp_ring.size / 4) {
13587                         dp = dp2;
13588                         continue;
13589                 }
13590                 (void) sprintf(bp, "\n\nResponse Queue\nQueue %d:", cnt);
13591                 bp += strlen(bp);
13592                 for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
13593                     cnt1++) {
13594                         if (cnt1 % 8 == 0) {
13595                                 (void) sprintf(bp, "\n%08x: ", cnt1);
13596                                 bp += strlen(bp);
13597                         }
13598                         (void) sprintf(bp, "%08x ", *dp++);
13599                         bp += strlen(bp);
13600                 }
13601         }
13602 
13603         if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13604             (ha->fwexttracebuf.bp != NULL)) {
13605                 uint32_t cnt_b = 0;
13606                 uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
13607 
13608                 (void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
13609                 bp += strlen(bp);
13610                 /* show data address as a byte address, data as long words */
13611                 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13612                         cnt_b = cnt * 4;
13613                         if (cnt_b % 32 == 0) {
13614                                 (void) sprintf(bp, "\n%08x: ",
13615                                     (int)(w64 + cnt_b));
13616                                 bp += 11;
13617                         }
13618                         (void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
13619                         bp += 9;
13620                 }
13621         }
13622 
13623         if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13624             (ha->fwfcetracebuf.bp != NULL)) {
13625                 uint32_t cnt_b = 0;
13626                 uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
13627 
13628                 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
13629                 bp += strlen(bp);
13630                 /* show data address as a byte address, data as long words */
13631                 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13632                         cnt_b = cnt * 4;
13633                         if (cnt_b % 32 == 0) {
13634                                 (void) sprintf(bp, "\n%08x: ",
13635                                     (int)(w64 + cnt_b));
13636                                 bp += 11;
13637                         }
13638                         (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
13639                         bp += 9;
13640                 }
13641         }
13642 
13643         (void) sprintf(bp, "\n\n");
13644         bp += strlen(bp);
13645 
13646         cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
13647 
13648         QL_PRINT_10(ha, "done=%xh\n", cnt);
13649 
13650         return (cnt);
13651 }
13652 
13653 /*
13654  * ql_81xx_ascii_fw_dump
13655  *      Converts ISP81xx firmware binary dump to ascii.
13656  *
13657  * Input:
13658  *      ha = adapter state pointer.
13659  *      bptr = buffer pointer.
13660  *
13661  * Returns:
13662  *      Amount of data buffer used.
13663  *
13664  * Context:
13665  *      Kernel context.
13666  */
13667 static size_t
13668 ql_81xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
13669 {
13670         uint32_t                cnt, cnt1, *dp, *dp2;
13671         caddr_t                 bp = bufp;
13672         ql_81xx_fw_dump_t       *fw = ha->ql_dump_ptr;
13673 
13674         QL_PRINT_3(ha, "started\n");
13675 
13676         (void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
13677             ha->fw_major_version, ha->fw_minor_version,
13678             ha->fw_subminor_version, ha->fw_attributes);
13679         bp += strlen(bp);
13680 
13681         (void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
13682         bp += strlen(bp);
13683 
13684         (void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
13685         bp += strlen(bp);
13686 
13687         (void) sprintf(bp, "\nAER Uncorrectable Error Status Register\n%08x\n",
13688             fw->aer_ues);
13689         bp += strlen(bp);
13690 
13691         (void) sprintf(bp, "\nHostRisc Registers");
13692         bp += strlen(bp);
13693         for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
13694                 if (cnt % 8 == 0) {
13695                         (void) sprintf(bp++, "\n");
13696                 }
13697                 (void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
13698                 bp += 9;
13699         }
13700 
13701         (void) sprintf(bp, "\n\nPCIe Registers");
13702         bp += strlen(bp);
13703         for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
13704                 if (cnt % 8 == 0) {
13705                         (void) sprintf(bp++, "\n");
13706                 }
13707                 (void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
13708                 bp += 9;
13709         }
13710 
13711         (void) strcat(bp, "\n\nHost Interface Registers");
13712         bp += strlen(bp);
13713         for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
13714                 if (cnt % 8 == 0) {
13715                         (void) sprintf(bp++, "\n");
13716                 }
13717                 (void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
13718                 bp += 9;
13719         }
13720 
13721         (void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
13722         bp += strlen(bp);
13723         for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
13724                 if (cnt % 8 == 0) {
13725                         (void) sprintf(bp++, "\n");
13726                 }
13727                 (void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
13728                 bp += 9;
13729         }
13730 
13731         (void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
13732             fw->risc_io);
13733         bp += strlen(bp);
13734 
13735         (void) sprintf(bp, "\n\nMailbox Registers");
13736         bp += strlen(bp);
13737         for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
13738                 if (cnt % 16 == 0) {
13739                         (void) sprintf(bp++, "\n");
13740                 }
13741                 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
13742                 bp += 5;
13743         }
13744 
13745         (void) sprintf(bp, "\n\nXSEQ GP Registers");
13746         bp += strlen(bp);
13747         for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
13748                 if (cnt % 8 == 0) {
13749                         (void) sprintf(bp++, "\n");
13750                 }
13751                 (void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
13752                 bp += 9;
13753         }
13754 
13755         (void) sprintf(bp, "\n\nXSEQ-0 Registers");
13756         bp += strlen(bp);
13757         for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
13758                 if (cnt % 8 == 0) {
13759                         (void) sprintf(bp++, "\n");
13760                 }
13761                 (void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
13762                 bp += 9;
13763         }
13764 
13765         (void) sprintf(bp, "\n\nXSEQ-1 Registers");
13766         bp += strlen(bp);
13767         for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
13768                 if (cnt % 8 == 0) {
13769                         (void) sprintf(bp++, "\n");
13770                 }
13771                 (void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
13772                 bp += 9;
13773         }
13774 
13775         (void) sprintf(bp, "\n\nRSEQ GP Registers");
13776         bp += strlen(bp);
13777         for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
13778                 if (cnt % 8 == 0) {
13779                         (void) sprintf(bp++, "\n");
13780                 }
13781                 (void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
13782                 bp += 9;
13783         }
13784 
13785         (void) sprintf(bp, "\n\nRSEQ-0 Registers");
13786         bp += strlen(bp);
13787         for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
13788                 if (cnt % 8 == 0) {
13789                         (void) sprintf(bp++, "\n");
13790                 }
13791                 (void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
13792                 bp += 9;
13793         }
13794 
13795         (void) sprintf(bp, "\n\nRSEQ-1 Registers");
13796         bp += strlen(bp);
13797         for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
13798                 if (cnt % 8 == 0) {
13799                         (void) sprintf(bp++, "\n");
13800                 }
13801                 (void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
13802                 bp += 9;
13803         }
13804 
13805         (void) sprintf(bp, "\n\nRSEQ-2 Registers");
13806         bp += strlen(bp);
13807         for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
13808                 if (cnt % 8 == 0) {
13809                         (void) sprintf(bp++, "\n");
13810                 }
13811                 (void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
13812                 bp += 9;
13813         }
13814 
13815         (void) sprintf(bp, "\n\nASEQ GP Registers");
13816         bp += strlen(bp);
13817         for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
13818                 if (cnt % 8 == 0) {
13819                         (void) sprintf(bp++, "\n");
13820                 }
13821                 (void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
13822                 bp += 9;
13823         }
13824 
13825         (void) sprintf(bp, "\n\nASEQ-0 Registers");
13826         bp += strlen(bp);
13827         for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
13828                 if (cnt % 8 == 0) {
13829                         (void) sprintf(bp++, "\n");
13830                 }
13831                 (void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
13832                 bp += 9;
13833         }
13834 
13835         (void) sprintf(bp, "\n\nASEQ-1 Registers");
13836         bp += strlen(bp);
13837         for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
13838                 if (cnt % 8 == 0) {
13839                         (void) sprintf(bp++, "\n");
13840                 }
13841                 (void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
13842                 bp += 9;
13843         }
13844 
13845         (void) sprintf(bp, "\n\nASEQ-2 Registers");
13846         bp += strlen(bp);
13847         for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
13848                 if (cnt % 8 == 0) {
13849                         (void) sprintf(bp++, "\n");
13850                 }
13851                 (void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
13852                 bp += 9;
13853         }
13854 
13855         (void) sprintf(bp, "\n\nCommand DMA Registers");
13856         bp += strlen(bp);
13857         for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
13858                 if (cnt % 8 == 0) {
13859                         (void) sprintf(bp++, "\n");
13860                 }
13861                 (void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
13862                 bp += 9;
13863         }
13864 
13865         (void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
13866         bp += strlen(bp);
13867         for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
13868                 if (cnt % 8 == 0) {
13869                         (void) sprintf(bp++, "\n");
13870                 }
13871                 (void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
13872                 bp += 9;
13873         }
13874 
13875         (void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
13876         bp += strlen(bp);
13877         for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
13878                 if (cnt % 8 == 0) {
13879                         (void) sprintf(bp++, "\n");
13880                 }
13881                 (void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
13882                 bp += 9;
13883         }
13884 
13885         (void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
13886         bp += strlen(bp);
13887         for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
13888                 if (cnt % 8 == 0) {
13889                         (void) sprintf(bp++, "\n");
13890                 }
13891                 (void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
13892                 bp += 9;
13893         }
13894 
13895         (void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
13896         bp += strlen(bp);
13897         for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
13898                 if (cnt % 8 == 0) {
13899                         (void) sprintf(bp++, "\n");
13900                 }
13901                 (void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
13902                 bp += 9;
13903         }
13904 
13905         (void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
13906         bp += strlen(bp);
13907         for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
13908                 if (cnt % 8 == 0) {
13909                         (void) sprintf(bp++, "\n");
13910                 }
13911                 (void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
13912                 bp += 9;
13913         }
13914 
13915         (void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
13916         bp += strlen(bp);
13917         for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
13918                 if (cnt % 8 == 0) {
13919                         (void) sprintf(bp++, "\n");
13920                 }
13921                 (void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
13922                 bp += 9;
13923         }
13924 
13925         (void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
13926         bp += strlen(bp);
13927         for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
13928                 if (cnt % 8 == 0) {
13929                         (void) sprintf(bp++, "\n");
13930                 }
13931                 (void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
13932                 bp += 9;
13933         }
13934 
13935         (void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
13936         bp += strlen(bp);
13937         for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
13938                 if (cnt % 8 == 0) {
13939                         (void) sprintf(bp++, "\n");
13940                 }
13941                 (void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
13942                 bp += 9;
13943         }
13944 
13945         (void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
13946         bp += strlen(bp);
13947         for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
13948                 if (cnt % 8 == 0) {
13949                         (void) sprintf(bp++, "\n");
13950                 }
13951                 (void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
13952                 bp += 9;
13953         }
13954 
13955         (void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
13956         bp += strlen(bp);
13957         for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
13958                 if (cnt % 8 == 0) {
13959                         (void) sprintf(bp++, "\n");
13960                 }
13961                 (void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
13962                 bp += 9;
13963         }
13964 
13965         (void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
13966         bp += strlen(bp);
13967         for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
13968                 if (cnt % 8 == 0) {
13969                         (void) sprintf(bp++, "\n");
13970                 }
13971                 (void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
13972                 bp += 9;
13973         }
13974 
13975         (void) sprintf(bp, "\n\nRISC GP Registers");
13976         bp += strlen(bp);
13977         for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
13978                 if (cnt % 8 == 0) {
13979                         (void) sprintf(bp++, "\n");
13980                 }
13981                 (void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
13982                 bp += 9;
13983         }
13984 
13985         (void) sprintf(bp, "\n\nLMC Registers");
13986         bp += strlen(bp);
13987         for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
13988                 if (cnt % 8 == 0) {
13989                         (void) sprintf(bp++, "\n");
13990                 }
13991                 (void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
13992                 bp += 9;
13993         }
13994 
13995         (void) sprintf(bp, "\n\nFPM Hardware Registers");
13996         bp += strlen(bp);
13997         cnt1 = sizeof (fw->fpm_hdw_reg);
13998         for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13999                 if (cnt % 8 == 0) {
14000                         (void) sprintf(bp++, "\n");
14001                 }
14002                 (void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
14003                 bp += 9;
14004         }
14005 
14006         (void) sprintf(bp, "\n\nFB Hardware Registers");
14007         bp += strlen(bp);
14008         cnt1 = sizeof (fw->fb_hdw_reg);
14009         for (cnt = 0; cnt < cnt1 / 4; cnt++) {
14010                 if (cnt % 8 == 0) {
14011                         (void) sprintf(bp++, "\n");
14012                 }
14013                 (void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
14014                 bp += 9;
14015         }
14016 
14017         (void) sprintf(bp, "\n\nCode RAM");
14018         bp += strlen(bp);
14019         for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
14020                 if (cnt % 8 == 0) {
14021                         (void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
14022                         bp += 11;
14023                 }
14024                 (void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
14025                 bp += 9;
14026         }
14027 
14028         (void) sprintf(bp, "\n\nExternal Memory");
14029         bp += strlen(bp);
14030         dp = (uint32_t *)((caddr_t)fw->req_rsp_ext_mem + fw->req_q_size[0] +
14031             fw->req_q_size[1] + fw->rsp_q_size + (ha->rsp_queues_cnt * 16));
14032         for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
14033                 if (cnt % 8 == 0) {
14034                         (void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
14035                         bp += 11;
14036                 }
14037                 (void) sprintf(bp, "%08x ", *dp++);
14038                 bp += 9;
14039         }
14040 
14041         (void) sprintf(bp, "\n[<==END] ISP Debug Dump");
14042         bp += strlen(bp);
14043 
14044         dp = fw->req_rsp_ext_mem + (ha->rsp_queues_cnt * 4);
14045         for (cnt = 0; cnt < 2 && fw->req_q_size[cnt]; cnt++) {
14046                 dp2 = dp;
14047                 for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
14048                         if (*dp2++) {
14049                                 break;
14050                         }
14051                 }
14052                 if (cnt1 == fw->req_q_size[cnt] / 4) {
14053                         dp = dp2;
14054                         continue;
14055                 }
14056                 (void) sprintf(bp, "\n\nRequest Queue\nQueue %d:", cnt);
14057                 bp += strlen(bp);
14058                 for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
14059                         if (cnt1 % 8 == 0) {
14060                                 (void) sprintf(bp, "\n%08x: ", cnt1);
14061                                 bp += strlen(bp);
14062                         }
14063                         (void) sprintf(bp, "%08x ", *dp++);
14064                         bp += strlen(bp);
14065                 }
14066         }
14067 
14068         for (cnt = 0; cnt < ha->rsp_queues_cnt && cnt < 16; cnt++) {
14069                 dp2 = dp;
14070                 for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
14071                     cnt1++) {
14072                         if (*dp2++) {
14073                                 break;
14074                         }
14075                 }
14076                 if (cnt1 == ha->rsp_queues[cnt]->rsp_ring.size / 4) {
14077                         dp = dp2;
14078                         continue;
14079                 }
14080                 (void) sprintf(bp, "\n\nResponse Queue\nQueue %d:", cnt);
14081                 bp += strlen(bp);
14082                 for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
14083                     cnt1++) {
14084                         if (cnt1 % 8 == 0) {
14085                                 (void) sprintf(bp, "\n%08x: ", cnt1);
14086                                 bp += strlen(bp);
14087                         }
14088                         (void) sprintf(bp, "%08x ", *dp++);
14089                         bp += strlen(bp);
14090                 }
14091         }
14092 
14093         if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14094             (ha->fwexttracebuf.bp != NULL)) {
14095                 uint32_t cnt_b = 0;
14096                 uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
14097 
14098                 (void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
14099                 bp += strlen(bp);
14100                 /* show data address as a byte address, data as long words */
14101                 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14102                         cnt_b = cnt * 4;
14103                         if (cnt_b % 32 == 0) {
14104                                 (void) sprintf(bp, "\n%08x: ",
14105                                     (int)(w64 + cnt_b));
14106                                 bp += 11;
14107                         }
14108                         (void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
14109                         bp += 9;
14110                 }
14111         }
14112 
 
14118                 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
14119                 bp += strlen(bp);
14120                 /* show data address as a byte address, data as long words */
14121                 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14122                         cnt_b = cnt * 4;
14123                         if (cnt_b % 32 == 0) {
14124                                 (void) sprintf(bp, "\n%08x: ",
14125                                     (int)(w64 + cnt_b));
14126                                 bp += 11;
14127                         }
14128                         (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
14129                         bp += 9;
14130                 }
14131         }
14132 
14133         (void) sprintf(bp, "\n\n");
14134         bp += strlen(bp);
14135 
14136         cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
14137 
14138         QL_PRINT_10(ha, "done=%xh\n", cnt);
14139 
14140         return (cnt);
14141 }
14142 
14143 /*
14144  * ql_8021_ascii_fw_dump
14145  *      Converts ISP8021 firmware binary dump to ascii.
14146  *
14147  * Input:
14148  *      ha = adapter state pointer.
14149  *      bptr = buffer pointer.
14150  *
14151  * Returns:
14152  *      Amount of data buffer used.
14153  *
14154  * Context:
14155  *      Kernel context.
14156  */
14157 static size_t
14158 ql_8021_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
14159 {
14160         uint32_t        cnt;
14161         caddr_t         bp = bufp;
14162         uint8_t         *fw = ha->ql_dump_ptr;
14163 
14164         /*
14165          * 2 ascii bytes per binary byte + a space and
14166          * a newline every 16 binary bytes
14167          */
14168         cnt = 0;
14169         while (cnt < ha->ql_dump_size) {
14170                 (void) sprintf(bp, "%02x ", *fw++);
14171                 bp += strlen(bp);
14172                 if (++cnt % 16 == 0) {
14173                         (void) sprintf(bp, "\n");
14174                         bp += strlen(bp);
14175                 }
14176         }
14177         if (cnt % 16 != 0) {
14178                 (void) sprintf(bp, "\n");
14179                 bp += strlen(bp);
14180         }
14181         cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
14182         QL_PRINT_10(ha, "done=%xh\n", cnt);
14183         return (cnt);
14184 }
14185 
14186 /*
14187  * ql_2200_binary_fw_dump
14188  *
14189  * Input:
14190  *      ha:     adapter state pointer.
14191  *      fw:     firmware dump context pointer.
14192  *
14193  * Returns:
14194  *      ql local function return status code.
14195  *
14196  * Context:
14197  *      Interrupt or Kernel context, no mailbox commands allowed.
14198  */
14199 static int
14200 ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
14201 {
14202         uint32_t        cnt;
14203         uint16_t        risc_address;
14204         clock_t         timer;
14205         mbx_cmd_t       mc;
14206         mbx_cmd_t       *mcp = &mc;
14207         int             rval = QL_SUCCESS;
14208 
14209         QL_PRINT_3(ha, "started\n");
14210 
14211         /* Disable ISP interrupts. */
14212         ql_disable_intr(ha);
14213 
14214         /* Release mailbox registers. */
14215         WRT16_IO_REG(ha, semaphore, 0);
14216 
14217         /* Pause RISC. */
14218         WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
14219         timer = 30000;
14220         while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
14221                 if (timer-- != 0) {
14222                         drv_usecwait(MILLISEC);
14223                 } else {
14224                         rval = QL_FUNCTION_TIMEOUT;
14225                         break;
14226                 }
14227         }
14228 
14229         if (rval == QL_SUCCESS) {
14230                 (void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
14231                     sizeof (fw->pbiu_reg) / 2, 16);
14232 
 
14295                 /* Select frame buffer registers. */
14296                 WRT16_IO_REG(ha, ctrl_status, 0x10);
14297 
14298                 /* Reset frame buffer FIFOs. */
14299                 WRT16_IO_REG(ha, fb_cmd, 0xa000);
14300 
14301                 /* Select RISC module registers. */
14302                 WRT16_IO_REG(ha, ctrl_status, 0);
14303 
14304                 /* Reset RISC module. */
14305                 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
14306 
14307                 /* Reset ISP semaphore. */
14308                 WRT16_IO_REG(ha, semaphore, 0);
14309 
14310                 /* Release RISC module. */
14311                 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
14312 
14313                 /* Wait for RISC to recover from reset. */
14314                 timer = 30000;
14315                 while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_ROM_BUSY) {
14316                         if (timer-- != 0) {
14317                                 drv_usecwait(MILLISEC);
14318                         } else {
14319                                 rval = QL_FUNCTION_TIMEOUT;
14320                                 break;
14321                         }
14322                 }
14323 
14324                 /* Disable RISC pause on FPM parity error. */
14325                 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
14326         }
14327 
14328         if (rval == QL_SUCCESS) {
14329                 /* Pause RISC. */
14330                 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
14331                 timer = 30000;
14332                 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
14333                         if (timer-- != 0) {
14334                                 drv_usecwait(MILLISEC);
14335                         } else {
 
14369                                                 break;
14370                                         }
14371                                         WRT16_IO_REG(ha, hccr,
14372                                             HC_CLR_RISC_INT);
14373                                 }
14374                                 drv_usecwait(5);
14375                         }
14376 
14377                         if (timer == 0) {
14378                                 rval = QL_FUNCTION_TIMEOUT;
14379                         } else {
14380                                 rval = mcp->mb[0];
14381                         }
14382 
14383                         if (rval != QL_SUCCESS) {
14384                                 break;
14385                         }
14386                 }
14387         }
14388 
14389         QL_PRINT_3(ha, "done\n");
14390 
14391         return (rval);
14392 }
14393 
14394 /*
14395  * ql_2300_binary_fw_dump
14396  *
14397  * Input:
14398  *      ha:     adapter state pointer.
14399  *      fw:     firmware dump context pointer.
14400  *
14401  * Returns:
14402  *      ql local function return status code.
14403  *
14404  * Context:
14405  *      Interrupt or Kernel context, no mailbox commands allowed.
14406  */
14407 static int
14408 ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
14409 {
14410         clock_t timer;
14411         int     rval = QL_SUCCESS;
14412 
14413         QL_PRINT_3(ha, "started\n");
14414 
14415         /* Disable ISP interrupts. */
14416         ql_disable_intr(ha);
14417 
14418         /* Release mailbox registers. */
14419         WRT16_IO_REG(ha, semaphore, 0);
14420 
14421         /* Pause RISC. */
14422         WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
14423         timer = 30000;
14424         while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
14425                 if (timer-- != 0) {
14426                         drv_usecwait(MILLISEC);
14427                 } else {
14428                         rval = QL_FUNCTION_TIMEOUT;
14429                         break;
14430                 }
14431         }
14432 
14433         if (rval == QL_SUCCESS) {
14434                 (void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
14435                     sizeof (fw->pbiu_reg) / 2, 16);
14436 
 
14505                 /* Select frame buffer registers. */
14506                 WRT16_IO_REG(ha, ctrl_status, 0x10);
14507 
14508                 /* Reset frame buffer FIFOs. */
14509                 WRT16_IO_REG(ha, fb_cmd, 0xa000);
14510 
14511                 /* Select RISC module registers. */
14512                 WRT16_IO_REG(ha, ctrl_status, 0);
14513 
14514                 /* Reset RISC module. */
14515                 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
14516 
14517                 /* Reset ISP semaphore. */
14518                 WRT16_IO_REG(ha, semaphore, 0);
14519 
14520                 /* Release RISC module. */
14521                 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
14522 
14523                 /* Wait for RISC to recover from reset. */
14524                 timer = 30000;
14525                 while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_ROM_BUSY) {
14526                         if (timer-- != 0) {
14527                                 drv_usecwait(MILLISEC);
14528                         } else {
14529                                 rval = QL_FUNCTION_TIMEOUT;
14530                                 break;
14531                         }
14532                 }
14533 
14534                 /* Disable RISC pause on FPM parity error. */
14535                 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
14536         }
14537 
14538         /* Get RISC SRAM. */
14539         if (rval == QL_SUCCESS) {
14540                 rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
14541         }
14542         /* Get STACK SRAM. */
14543         if (rval == QL_SUCCESS) {
14544                 rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
14545         }
14546         /* Get DATA SRAM. */
14547         if (rval == QL_SUCCESS) {
14548                 rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
14549         }
14550 
14551         QL_PRINT_3(ha, "done\n");
14552 
14553         return (rval);
14554 }
14555 
14556 /*
14557  * ql_24xx_binary_fw_dump
14558  *
14559  * Input:
14560  *      ha:     adapter state pointer.
14561  *      fw:     firmware dump context pointer.
14562  *
14563  * Returns:
14564  *      ql local function return status code.
14565  *
14566  * Context:
14567  *      Interrupt or Kernel context, no mailbox commands allowed.
14568  */
14569 static int
14570 ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
14571 {
14572         uint32_t        *reg32;
14573         void            *bp;
14574         clock_t         timer;
14575         int             rval = QL_SUCCESS;
14576 
14577         QL_PRINT_3(ha, "started\n");
14578 
14579         fw->hccr = RD32_IO_REG(ha, hccr);
14580 
14581         /* Pause RISC. */
14582         if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
14583                 /* Disable ISP interrupts. */
14584                 ql_disable_intr(ha);
14585 
14586                 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
14587                 for (timer = 30000;
14588                     (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
14589                     rval == QL_SUCCESS; timer--) {
14590                         if (timer) {
14591                                 drv_usecwait(100);
14592                         } else {
14593                                 rval = QL_FUNCTION_TIMEOUT;
14594                         }
14595                 }
14596         }
14597 
14598         if (rval == QL_SUCCESS) {
14599                 /* Host interface registers. */
14600                 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14601                     sizeof (fw->host_reg) / 4, 32);
14602 
14603                 /* Disable ISP interrupts. */
14604                 ql_disable_intr(ha);
14605 
14606                 /* Shadow registers. */
14607 
14608                 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14609                 RD32_IO_REG(ha, io_base_addr);
14610 
14611                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14612                 WRT_REG_DWORD(ha, reg32, 0xB0000000);
14613                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14614                 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14615 
14616                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14617                 WRT_REG_DWORD(ha, reg32, 0xB0100000);
14618                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14619                 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14620 
14621                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14622                 WRT_REG_DWORD(ha, reg32, 0xB0200000);
14623                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14624                 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
 
14881                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14882                 WRT32_IO_REG(ha, io_base_addr, 0x6040);
14883                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14884                 WRT32_IO_REG(ha, io_base_addr, 0x6100);
14885                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14886                 WRT32_IO_REG(ha, io_base_addr, 0x6130);
14887                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14888                 WRT32_IO_REG(ha, io_base_addr, 0x6150);
14889                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14890                 WRT32_IO_REG(ha, io_base_addr, 0x6170);
14891                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14892                 WRT32_IO_REG(ha, io_base_addr, 0x6190);
14893                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14894                 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14895                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14896         }
14897 
14898         /* Get the request queue */
14899         if (rval == QL_SUCCESS) {
14900                 uint32_t        cnt;
14901                 uint32_t        *w32 = (uint32_t *)ha->req_q[0]->req_ring.bp;
14902 
14903                 /* Sync DMA buffer. */
14904                 (void) ddi_dma_sync(ha->req_q[0]->req_ring.dma_handle,
14905                     0, sizeof (fw->req_q), DDI_DMA_SYNC_FORKERNEL);
14906 
14907                 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14908                         fw->req_q[cnt] = *w32++;
14909                         LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14910                 }
14911         }
14912 
14913         /* Get the response queue */
14914         if (rval == QL_SUCCESS) {
14915                 uint32_t        cnt;
14916                 uint32_t        *w32 =
14917                     (uint32_t *)ha->rsp_queues[0]->rsp_ring.bp;
14918 
14919                 /* Sync DMA buffer. */
14920                 (void) ddi_dma_sync(ha->rsp_queues[0]->rsp_ring.dma_handle,
14921                     0, sizeof (fw->rsp_q), DDI_DMA_SYNC_FORKERNEL);
14922 
14923                 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14924                         fw->rsp_q[cnt] = *w32++;
14925                         LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14926                 }
14927         }
14928 
14929         /* Reset RISC. */
14930         ql_reset_chip(ha);
14931 
14932         /* Memory. */
14933         if (rval == QL_SUCCESS) {
14934                 /* Code RAM. */
14935                 rval = ql_read_risc_ram(ha, 0x20000,
14936                     sizeof (fw->code_ram) / 4, fw->code_ram);
14937         }
14938         if (rval == QL_SUCCESS) {
14939                 /* External Memory. */
14940                 rval = ql_read_risc_ram(ha, 0x100000,
14941                     ha->fw_ext_memory_size / 4, fw->ext_mem);
 
14962         if (rval == QL_SUCCESS) {
14963                 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14964                     (ha->fwfcetracebuf.bp != NULL)) {
14965                         uint32_t        cnt;
14966                         uint32_t        *w32 = ha->fwfcetracebuf.bp;
14967 
14968                         /* Sync DMA buffer. */
14969                         (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14970                             FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14971 
14972                         for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14973                                 fw->fce_trace_buf[cnt] = *w32++;
14974                         }
14975                 }
14976         }
14977 
14978         if (rval != QL_SUCCESS) {
14979                 EL(ha, "failed=%xh\n", rval);
14980         } else {
14981                 /*EMPTY*/
14982                 QL_PRINT_3(ha, "done\n");
14983         }
14984 
14985         return (rval);
14986 }
14987 
14988 /*
14989  * ql_25xx_binary_fw_dump
14990  *
14991  * Input:
14992  *      ha:     adapter state pointer.
14993  *      fw:     firmware dump context pointer.
14994  *
14995  * Returns:
14996  *      ql local function return status code.
14997  *
14998  * Context:
14999  *      Interrupt or Kernel context, no mailbox commands allowed.
15000  */
15001 static int
15002 ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
15003 {
15004         uint32_t        *reg32, cnt, *w32ptr, index, *dp;
15005         void            *bp;
15006         clock_t         timer;
15007         int             rval = QL_SUCCESS;
15008 
15009         QL_PRINT_3(ha, "started\n");
15010 
15011         fw->req_q_size[0] = ha->req_q[0]->req_ring.size;
15012         if (ha->req_q[1] != NULL) {
15013                 fw->req_q_size[1] = ha->req_q[1]->req_ring.size;
15014         }
15015         fw->rsp_q_size = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
15016 
15017         fw->hccr = RD32_IO_REG(ha, hccr);
15018         fw->r2h_status = RD32_IO_REG(ha, risc2host);
15019         fw->aer_ues = ql_pci_config_get32(ha, 0x104);
15020 
15021         /* Pause RISC. */
15022         if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
15023                 /* Disable ISP interrupts. */
15024                 ql_disable_intr(ha);
15025 
15026                 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
15027                 for (timer = 30000;
15028                     (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
15029                     rval == QL_SUCCESS; timer--) {
15030                         if (timer) {
15031                                 drv_usecwait(100);
15032                                 if (timer % 10000 == 0) {
15033                                         EL(ha, "risc pause %d\n", timer);
15034                                 }
15035                         } else {
15036                                 EL(ha, "risc pause timeout\n");
15037                                 rval = QL_FUNCTION_TIMEOUT;
15038                         }
15039                 }
15040         }
15041 
15042         if (rval == QL_SUCCESS) {
15043 
15044                 /* Host Interface registers */
15045 
15046                 /* HostRisc registers. */
15047                 WRT32_IO_REG(ha, io_base_addr, 0x7000);
15048                 bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
15049                     16, 32);
15050                 WRT32_IO_REG(ha, io_base_addr, 0x7010);
15051                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15052 
15053                 /* PCIe registers. */
15054                 WRT32_IO_REG(ha, io_base_addr, 0x7c00);
15055                 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
15056                 bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
15057                     3, 32);
15058                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
15059                 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
15060 
15061                 /* Host interface registers. */
15062                 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
15063                     sizeof (fw->host_reg) / 4, 32);
15064 
15065                 /* Disable ISP interrupts. */
15066                 ql_disable_intr(ha);
15067 
15068                 /* Shadow registers. */
15069 
15070                 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
15071                 RD32_IO_REG(ha, io_base_addr);
15072 
15073                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15074                 WRT_REG_DWORD(ha, reg32, 0xB0000000);
15075                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15076                 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
15077 
15078                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15079                 WRT_REG_DWORD(ha, reg32, 0xB0100000);
15080                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15081                 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
15082 
15083                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15084                 WRT_REG_DWORD(ha, reg32, 0xB0200000);
15085                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15086                 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
15087 
 
15415                 WRT32_IO_REG(ha, io_base_addr, 0x6030);
15416                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15417                 WRT32_IO_REG(ha, io_base_addr, 0x6040);
15418                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15419                 WRT32_IO_REG(ha, io_base_addr, 0x6100);
15420                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15421                 WRT32_IO_REG(ha, io_base_addr, 0x6130);
15422                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15423                 WRT32_IO_REG(ha, io_base_addr, 0x6150);
15424                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15425                 WRT32_IO_REG(ha, io_base_addr, 0x6170);
15426                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15427                 WRT32_IO_REG(ha, io_base_addr, 0x6190);
15428                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15429                 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
15430                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15431                 WRT32_IO_REG(ha, io_base_addr, 0x6F00);
15432                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15433         }
15434 
15435         if (rval == QL_SUCCESS) {
15436                 /* Get the Queue Pointers */
15437                 dp = fw->req_rsp_ext_mem;
15438                 for (index = 0; index < ha->rsp_queues_cnt; index++) {
15439                         if (index == 0 && ha->flags & MULTI_QUEUE) {
15440                                 *dp = RD32_MBAR_REG(ha,
15441                                     ha->req_q[0]->mbar_req_in);
15442                                 LITTLE_ENDIAN_32(dp);
15443                                 dp++;
15444                                 *dp = RD32_MBAR_REG(ha,
15445                                     ha->req_q[0]->mbar_req_out);
15446                                 LITTLE_ENDIAN_32(dp);
15447                                 dp++;
15448                         } else if (index == 1 && ha->flags & MULTI_QUEUE) {
15449                                 *dp = RD32_MBAR_REG(ha,
15450                                     ha->req_q[1]->mbar_req_in);
15451                                 LITTLE_ENDIAN_32(dp);
15452                                 dp++;
15453                                 *dp = RD32_MBAR_REG(ha,
15454                                     ha->req_q[1]->mbar_req_out);
15455                                 LITTLE_ENDIAN_32(dp);
15456                                 dp++;
15457                         } else {
15458                                 *dp++ = 0;
15459                                 *dp++ = 0;
15460                         }
15461                         if (ha->flags & MULTI_QUEUE) {
15462                                 *dp = RD32_MBAR_REG(ha,
15463                                     ha->rsp_queues[index]->mbar_rsp_in);
15464                                 LITTLE_ENDIAN_32(dp);
15465                                 dp++;
15466                                 *dp = RD32_MBAR_REG(ha,
15467                                     ha->rsp_queues[index]->mbar_rsp_out);
15468                                 LITTLE_ENDIAN_32(dp);
15469                                 dp++;
15470                         } else {
15471                                 *dp++ = 0;
15472                                 *dp++ = 0;
15473                         }
15474                 }
15475                 /* Get the request queue */
15476                 (void) ddi_dma_sync(ha->req_q[0]->req_ring.dma_handle, 0, 0,
15477                     DDI_DMA_SYNC_FORCPU);
15478                 w32ptr = (uint32_t *)ha->req_q[0]->req_ring.bp;
15479                 for (cnt = 0; cnt < fw->req_q_size[0] / 4; cnt++) {
15480                         *dp = *w32ptr++;
15481                         LITTLE_ENDIAN_32(dp);
15482                         dp++;
15483                 }
15484                 if (ha->req_q[1] != NULL) {
15485                         (void) ddi_dma_sync(ha->req_q[1]->req_ring.dma_handle,
15486                             0, 0, DDI_DMA_SYNC_FORCPU);
15487                         w32ptr = (uint32_t *)ha->req_q[1]->req_ring.bp;
15488                         for (cnt = 0; cnt < fw->req_q_size[1] / 4; cnt++) {
15489                                 *dp = *w32ptr++;
15490                                 LITTLE_ENDIAN_32(dp);
15491                                 dp++;
15492                         }
15493                 }
15494 
15495                 /* Get the response queues */
15496                 for (index = 0; index < ha->rsp_queues_cnt; index++) {
15497                         (void) ddi_dma_sync(
15498                             ha->rsp_queues[index]->rsp_ring.dma_handle,
15499                             0, 0, DDI_DMA_SYNC_FORCPU);
15500                         w32ptr = (uint32_t *)
15501                             ha->rsp_queues[index]->rsp_ring.bp;
15502                         for (cnt = 0;
15503                             cnt < ha->rsp_queues[index]->rsp_ring.size / 4;
15504                             cnt++) {
15505                                 *dp = *w32ptr++;
15506                                 LITTLE_ENDIAN_32(dp);
15507                                 dp++;
15508                         }
15509                 }
15510         }
15511 
15512         /* Reset RISC. */
15513         ql_reset_chip(ha);
15514 
15515         /* Memory. */
15516         if (rval == QL_SUCCESS) {
15517                 /* Code RAM. */
15518                 rval = ql_read_risc_ram(ha, 0x20000,
15519                     sizeof (fw->code_ram) / 4, fw->code_ram);
15520         }
15521         if (rval == QL_SUCCESS) {
15522                 /* External Memory. */
15523                 rval = ql_read_risc_ram(ha, 0x100000,
15524                     ha->fw_ext_memory_size / 4, dp);
15525         }
15526 
15527         /* Get the FC event trace buffer */
15528         if (rval == QL_SUCCESS) {
15529                 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
15530                     (ha->fwfcetracebuf.bp != NULL)) {
15531                         uint32_t        cnt;
15532                         uint32_t        *w32 = ha->fwfcetracebuf.bp;
15533 
15534                         /* Sync DMA buffer. */
15535                         (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
15536                             FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
15537 
15538                         for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
15539                                 fw->fce_trace_buf[cnt] = *w32++;
15540                         }
15541                 }
15542         }
15543 
15544         /* Get the extended trace buffer */
15545         if (rval == QL_SUCCESS) {
15546                 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
15547                     (ha->fwexttracebuf.bp != NULL)) {
15548                         uint32_t        cnt;
15549                         uint32_t        *w32 = ha->fwexttracebuf.bp;
15550 
15551                         /* Sync DMA buffer. */
15552                         (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
15553                             FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
15554 
15555                         for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
15556                                 fw->ext_trace_buf[cnt] = *w32++;
15557                         }
15558                 }
15559         }
15560 
15561         if (rval != QL_SUCCESS) {
15562                 EL(ha, "failed=%xh\n", rval);
15563         } else {
15564                 /*EMPTY*/
15565                 QL_PRINT_3(ha, "done\n");
15566         }
15567 
15568         return (rval);
15569 }
15570 
15571 /*
15572  * ql_81xx_binary_fw_dump
15573  *
15574  * Input:
15575  *      ha:     adapter state pointer.
15576  *      fw:     firmware dump context pointer.
15577  *
15578  * Returns:
15579  *      ql local function return status code.
15580  *
15581  * Context:
15582  *      Interrupt or Kernel context, no mailbox commands allowed.
15583  */
15584 static int
15585 ql_81xx_binary_fw_dump(ql_adapter_state_t *ha, ql_81xx_fw_dump_t *fw)
15586 {
15587         uint32_t        *reg32, cnt, *w32ptr, index, *dp;
15588         void            *bp;
15589         clock_t         timer;
15590         int             rval = QL_SUCCESS;
15591 
15592         QL_PRINT_3(ha, "started\n");
15593 
15594         fw->req_q_size[0] = ha->req_q[0]->req_ring.size;
15595         if (ha->req_q[1] != NULL) {
15596                 fw->req_q_size[1] = ha->req_q[1]->req_ring.size;
15597         }
15598         fw->rsp_q_size = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
15599 
15600         fw->hccr = RD32_IO_REG(ha, hccr);
15601         fw->r2h_status = RD32_IO_REG(ha, risc2host);
15602         fw->aer_ues = ql_pci_config_get32(ha, 0x104);
15603 
15604         /* Pause RISC. */
15605         if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
15606                 /* Disable ISP interrupts. */
15607                 ql_disable_intr(ha);
15608 
15609                 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
15610                 for (timer = 30000;
15611                     (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
15612                     rval == QL_SUCCESS; timer--) {
15613                         if (timer) {
15614                                 drv_usecwait(100);
15615                                 if (timer % 10000 == 0) {
15616                                         EL(ha, "risc pause %d\n", timer);
15617                                 }
15618                         } else {
15619                                 EL(ha, "risc pause timeout\n");
15620                                 rval = QL_FUNCTION_TIMEOUT;
15621                         }
15622                 }
15623         }
15624 
15625         if (rval == QL_SUCCESS) {
15626 
15627                 /* Host Interface registers */
15628 
15629                 /* HostRisc registers. */
15630                 WRT32_IO_REG(ha, io_base_addr, 0x7000);
15631                 bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
15632                     16, 32);
15633                 WRT32_IO_REG(ha, io_base_addr, 0x7010);
15634                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15635 
15636                 /* PCIe registers. */
15637                 WRT32_IO_REG(ha, io_base_addr, 0x7c00);
15638                 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
15639                 bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
15640                     3, 32);
15641                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
15642                 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
15643 
15644                 /* Host interface registers. */
15645                 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
15646                     sizeof (fw->host_reg) / 4, 32);
15647 
15648                 /* Disable ISP interrupts. */
15649                 ql_disable_intr(ha);
15650 
15651                 /* Shadow registers. */
15652 
15653                 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
15654                 RD32_IO_REG(ha, io_base_addr);
15655 
15656                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15657                 WRT_REG_DWORD(ha, reg32, 0xB0000000);
15658                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15659                 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
15660 
15661                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15662                 WRT_REG_DWORD(ha, reg32, 0xB0100000);
15663                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15664                 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
15665 
15666                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15667                 WRT_REG_DWORD(ha, reg32, 0xB0200000);
15668                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15669                 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
15670 
 
16004                 WRT32_IO_REG(ha, io_base_addr, 0x6040);
16005                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16006                 WRT32_IO_REG(ha, io_base_addr, 0x6100);
16007                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16008                 WRT32_IO_REG(ha, io_base_addr, 0x6130);
16009                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16010                 WRT32_IO_REG(ha, io_base_addr, 0x6150);
16011                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16012                 WRT32_IO_REG(ha, io_base_addr, 0x6170);
16013                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16014                 WRT32_IO_REG(ha, io_base_addr, 0x6190);
16015                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16016                 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
16017                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16018                 WRT32_IO_REG(ha, io_base_addr, 0x61C0);
16019                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16020                 WRT32_IO_REG(ha, io_base_addr, 0x6F00);
16021                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16022         }
16023 
16024         if (rval == QL_SUCCESS) {
16025                 /* Get the Queue Pointers */
16026                 dp = fw->req_rsp_ext_mem;
16027                 for (index = 0; index < ha->rsp_queues_cnt; index++) {
16028                         if (index == 0 && ha->flags & MULTI_QUEUE) {
16029                                 *dp = RD32_MBAR_REG(ha,
16030                                     ha->req_q[0]->mbar_req_in);
16031                                 LITTLE_ENDIAN_32(dp);
16032                                 dp++;
16033                                 *dp = RD32_MBAR_REG(ha,
16034                                     ha->req_q[0]->mbar_req_out);
16035                                 LITTLE_ENDIAN_32(dp);
16036                                 dp++;
16037                         } else if (index == 1 && ha->flags & MULTI_QUEUE) {
16038                                 *dp = RD32_MBAR_REG(ha,
16039                                     ha->req_q[1]->mbar_req_in);
16040                                 LITTLE_ENDIAN_32(dp);
16041                                 dp++;
16042                                 *dp = RD32_MBAR_REG(ha,
16043                                     ha->req_q[1]->mbar_req_out);
16044                                 LITTLE_ENDIAN_32(dp);
16045                                 dp++;
16046                         } else {
16047                                 *dp++ = 0;
16048                                 *dp++ = 0;
16049                         }
16050                         if (ha->flags & MULTI_QUEUE) {
16051                                 *dp = RD32_MBAR_REG(ha,
16052                                     ha->rsp_queues[index]->mbar_rsp_in);
16053                                 LITTLE_ENDIAN_32(dp);
16054                                 dp++;
16055                                 *dp = RD32_MBAR_REG(ha,
16056                                     ha->rsp_queues[index]->mbar_rsp_out);
16057                                 LITTLE_ENDIAN_32(dp);
16058                                 dp++;
16059                         } else {
16060                                 *dp++ = 0;
16061                                 *dp++ = 0;
16062                         }
16063                 }
16064                 /* Get the request queue */
16065                 (void) ddi_dma_sync(ha->req_q[0]->req_ring.dma_handle, 0, 0,
16066                     DDI_DMA_SYNC_FORCPU);
16067                 w32ptr = (uint32_t *)ha->req_q[0]->req_ring.bp;
16068                 for (cnt = 0; cnt < fw->req_q_size[0] / 4; cnt++) {
16069                         *dp = *w32ptr++;
16070                         LITTLE_ENDIAN_32(dp);
16071                         dp++;
16072                 }
16073                 if (ha->req_q[1] != NULL) {
16074                         (void) ddi_dma_sync(ha->req_q[1]->req_ring.dma_handle,
16075                             0, 0, DDI_DMA_SYNC_FORCPU);
16076                         w32ptr = (uint32_t *)ha->req_q[1]->req_ring.bp;
16077                         for (cnt = 0; cnt < fw->req_q_size[1] / 4; cnt++) {
16078                                 *dp = *w32ptr++;
16079                                 LITTLE_ENDIAN_32(dp);
16080                                 dp++;
16081                         }
16082                 }
16083 
16084                 /* Get the response queues */
16085                 for (index = 0; index < ha->rsp_queues_cnt; index++) {
16086                         (void) ddi_dma_sync(
16087                             ha->rsp_queues[index]->rsp_ring.dma_handle,
16088                             0, 0, DDI_DMA_SYNC_FORCPU);
16089                         w32ptr = (uint32_t *)
16090                             ha->rsp_queues[index]->rsp_ring.bp;
16091                         for (cnt = 0;
16092                             cnt < ha->rsp_queues[index]->rsp_ring.size / 4;
16093                             cnt++) {
16094                                 *dp = *w32ptr++;
16095                                 LITTLE_ENDIAN_32(dp);
16096                                 dp++;
16097                         }
16098                 }
16099         }
16100 
16101         /* Reset RISC. */
16102         ql_reset_chip(ha);
16103 
16104         /* Memory. */
16105         if (rval == QL_SUCCESS) {
16106                 /* Code RAM. */
16107                 rval = ql_read_risc_ram(ha, 0x20000,
16108                     sizeof (fw->code_ram) / 4, fw->code_ram);
16109         }
16110         if (rval == QL_SUCCESS) {
16111                 /* External Memory. */
16112                 rval = ql_read_risc_ram(ha, 0x100000,
16113                     ha->fw_ext_memory_size / 4, dp);
16114         }
16115 
16116         /* Get the FC event trace buffer */
16117         if (rval == QL_SUCCESS) {
16118                 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
16119                     (ha->fwfcetracebuf.bp != NULL)) {
16120                         uint32_t        cnt;
16121                         uint32_t        *w32 = ha->fwfcetracebuf.bp;
16122 
16123                         /* Sync DMA buffer. */
16124                         (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
16125                             FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
16126 
16127                         for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
16128                                 fw->fce_trace_buf[cnt] = *w32++;
16129                         }
16130                 }
16131         }
16132 
16133         /* Get the extended trace buffer */
16134         if (rval == QL_SUCCESS) {
16135                 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
16136                     (ha->fwexttracebuf.bp != NULL)) {
16137                         uint32_t        cnt;
16138                         uint32_t        *w32 = ha->fwexttracebuf.bp;
16139 
16140                         /* Sync DMA buffer. */
16141                         (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
16142                             FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
16143 
16144                         for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
16145                                 fw->ext_trace_buf[cnt] = *w32++;
16146                         }
16147                 }
16148         }
16149 
16150         if (rval != QL_SUCCESS) {
16151                 EL(ha, "failed=%xh\n", rval);
16152         } else {
16153                 /*EMPTY*/
16154                 QL_PRINT_3(ha, "done\n");
16155         }
16156 
16157         return (rval);
16158 }
16159 
16160 /*
16161  * ql_read_risc_ram
16162  *      Reads RISC RAM one word at a time.
16163  *      Risc interrupts must be disabled when this routine is called.
16164  *
16165  * Input:
16166  *      ha:     adapter state pointer.
16167  *      risc_address:   RISC code start address.
16168  *      len:            Number of words.
16169  *      buf:            buffer pointer.
16170  *
16171  * Returns:
16172  *      ql local function return status code.
16173  *
16174  * Context:
16175  *      Interrupt or Kernel context, no mailbox commands allowed.
16176  */
16177 static int
16178 ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
16179     void *buf)
16180 {
16181         uint32_t        cnt;
16182         uint16_t        stat;
16183         clock_t         timer;
16184         uint16_t        *buf16 = (uint16_t *)buf;
16185         uint32_t        *buf32 = (uint32_t *)buf;
16186         int             rval = QL_SUCCESS;
16187 
16188         for (cnt = 0; cnt < len; cnt++, risc_address++) {
16189                 WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_EXTENDED);
16190                 WRT16_IO_REG(ha, mailbox_in[1], LSW(risc_address));
16191                 WRT16_IO_REG(ha, mailbox_in[8], MSW(risc_address));
16192                 if (CFG_IST(ha, CFG_CTRL_82XX)) {
16193                         WRT32_IO_REG(ha, nx_host_int, NX_MBX_CMD);
16194                 } else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
16195                         WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
16196                 } else {
16197                         WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
16198                 }
16199                 for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
16200                         if (INTERRUPT_PENDING(ha)) {
16201                                 stat = (uint16_t)
16202                                     (RD16_IO_REG(ha, risc2host) & 0xff);
16203                                 if ((stat == 1) || (stat == 0x10)) {
16204                                         if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
16205                                                 buf32[cnt] = SHORT_TO_LONG(
16206                                                     RD16_IO_REG(ha,
16207                                                     mailbox_out[2]),
16208                                                     RD16_IO_REG(ha,
16209                                                     mailbox_out[3]));
16210                                         } else {
16211                                                 buf16[cnt] =
16212                                                     RD16_IO_REG(ha,
16213                                                     mailbox_out[2]);
16214                                         }
16215 
16216                                         break;
16217                                 } else if ((stat == 2) || (stat == 0x11)) {
16218                                         rval = RD16_IO_REG(ha, mailbox_out[0]);
16219                                         break;
16220                                 }
16221                                 if (CFG_IST(ha, CFG_CTRL_82XX)) {
16222                                         ql_8021_clr_hw_intr(ha);
16223                                         ql_8021_clr_fw_intr(ha);
16224                                 } else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
16225                                         WRT32_IO_REG(ha, hccr,
16226                                             HC24_CLR_RISC_INT);
16227                                         RD32_IO_REG(ha, hccr);
16228                                 } else {
16229                                         WRT16_IO_REG(ha, semaphore, 0);
16230                                         WRT16_IO_REG(ha, hccr,
16231                                             HC_CLR_RISC_INT);
16232                                         RD16_IO_REG(ha, hccr);
16233                                 }
16234                         }
16235                         drv_usecwait(5);
16236                 }
16237                 if (CFG_IST(ha, CFG_CTRL_82XX)) {
16238                         ql_8021_clr_hw_intr(ha);
16239                         ql_8021_clr_fw_intr(ha);
16240                 } else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
16241                         WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
16242                         RD32_IO_REG(ha, hccr);
16243                 } else {
16244                         WRT16_IO_REG(ha, semaphore, 0);
16245                         WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
16246                         RD16_IO_REG(ha, hccr);
16247                 }
16248 
16249                 if (timer == 0) {
16250                         rval = QL_FUNCTION_TIMEOUT;
16251                 }
16252         }
16253 
16254         return (rval);
16255 }
16256 
16257 /*
16258  * ql_read_regs
16259  *      Reads adapter registers to buffer.
16260  *
16261  * Input:
16262  *      ha:     adapter state pointer.
16263  *      buf:    buffer pointer.
16264  *      reg:    start address.
16265  *      count:  number of registers.
16266  *      wds:    register size.
 
16297                 while (count--) {
16298                         *bp8++ = RD_REG_BYTE(ha, reg8++);
16299                 }
16300                 return (bp8);
16301         default:
16302                 EL(ha, "Unknown word size=%d\n", wds);
16303                 return (buf);
16304         }
16305 }
16306 
16307 static int
16308 ql_save_config_regs(dev_info_t *dip)
16309 {
16310         ql_adapter_state_t      *ha;
16311         int                     ret;
16312         ql_config_space_t       chs;
16313         caddr_t                 prop = "ql-config-space";
16314 
16315         ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
16316         if (ha == NULL) {
16317                 QL_PRINT_2(NULL, "no adapter instance=%d\n",
16318                     ddi_get_instance(dip));
16319                 return (DDI_FAILURE);
16320         }
16321 
16322         QL_PRINT_3(ha, "started\n");
16323 
16324         /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
16325         if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
16326             1) {
16327                 QL_PRINT_2(ha, "no prop exit\n");
16328                 return (DDI_SUCCESS);
16329         }
16330 
16331         chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
16332         chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
16333             PCI_CONF_HEADER);
16334         if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
16335                 chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
16336                     PCI_BCNF_BCNTRL);
16337         }
16338 
16339         chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
16340             PCI_CONF_CACHE_LINESZ);
16341 
16342         chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
16343             PCI_CONF_LATENCY_TIMER);
16344 
16345         if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
16346                 chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
16347                     PCI_BCNF_LATENCY_TIMER);
16348         }
16349 
16350         chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
16351         chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
16352         chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
16353         chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
16354         chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
16355         chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
16356 
16357         /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
16358         ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
16359             (uchar_t *)&chs, sizeof (ql_config_space_t));
16360 
16361         if (ret != DDI_PROP_SUCCESS) {
16362                 cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
16363                     QL_NAME, ddi_get_instance(dip), prop);
16364                 return (DDI_FAILURE);
16365         }
16366 
16367         QL_PRINT_3(ha, "done\n");
16368 
16369         return (DDI_SUCCESS);
16370 }
16371 
16372 static int
16373 ql_restore_config_regs(dev_info_t *dip)
16374 {
16375         ql_adapter_state_t      *ha;
16376         uint_t                  elements;
16377         ql_config_space_t       *chs_p;
16378         caddr_t                 prop = "ql-config-space";
16379 
16380         ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
16381         if (ha == NULL) {
16382                 QL_PRINT_2(NULL, "no adapter instance=%d\n",
16383                     ddi_get_instance(dip));
16384                 return (DDI_FAILURE);
16385         }
16386 
16387         QL_PRINT_3(ha, "started\n");
16388 
16389         /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
16390         if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
16391             DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
16392             (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
16393                 QL_PRINT_2(ha, "no prop exit\n");
16394                 return (DDI_FAILURE);
16395         }
16396 
16397         ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
16398 
16399         if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
16400                 ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
16401                     chs_p->chs_bridge_control);
16402         }
16403 
16404         ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
16405             chs_p->chs_cache_line_size);
16406 
16407         ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
16408             chs_p->chs_latency_timer);
16409 
16410         if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
16411                 ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
16412                     chs_p->chs_sec_latency_timer);
16413         }
16414 
16415         ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
16416         ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
16417         ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
16418         ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
16419         ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
16420         ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
16421 
16422         ddi_prop_free(chs_p);
16423 
16424         /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
16425         if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
16426                 cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
16427                     QL_NAME, ddi_get_instance(dip), prop);
16428         }
16429 
16430         QL_PRINT_3(ha, "done\n");
16431 
16432         return (DDI_SUCCESS);
16433 }
16434 
16435 uint8_t
16436 ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
16437 {
16438         if (CFG_IST(ha, CFG_SBUS_CARD)) {
16439                 return (ddi_get8(ha->sbus_config_handle,
16440                     (uint8_t *)(ha->sbus_config_base + off)));
16441         }
16442 
16443 #ifdef KERNEL_32
16444         return (pci_config_getb(ha->pci_handle, off));
16445 #else
16446         return (pci_config_get8(ha->pci_handle, off));
16447 #endif
16448 }
16449 
16450 uint16_t
 
16521 #endif
16522         }
16523 }
16524 
16525 /*
16526  * ql_halt
16527  *      Waits for commands that are running to finish and
16528  *      if they do not, commands are aborted.
16529  *      Finally the adapter is reset.
16530  *
16531  * Input:
16532  *      ha:     adapter state pointer.
16533  *      pwr:    power state.
16534  *
16535  * Context:
16536  *      Kernel context.
16537  */
16538 static void
16539 ql_halt(ql_adapter_state_t *ha, int pwr)
16540 {
16541         ql_link_t       *link;
16542         ql_response_q_t *rsp_q;
16543         ql_tgt_t        *tq;
16544         ql_srb_t        *sp;
16545         uint32_t        cnt, i;
16546         uint16_t        index;
16547 
16548         QL_PRINT_3(ha, "started\n");
16549 
16550         /* Wait for all commands running to finish. */
16551         for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
16552                 for (link = ha->dev[index].first; link != NULL;
16553                     link = link->next) {
16554                         tq = link->base_address;
16555                         (void) ql_abort_device(ha, tq, 0);
16556 
16557                         /* Wait for 30 seconds for commands to finish. */
16558                         for (cnt = 3000; cnt != 0; cnt--) {
16559                                 /* Acquire device queue lock. */
16560                                 DEVICE_QUEUE_LOCK(tq);
16561                                 if (tq->outcnt == 0) {
16562                                         /* Release device queue lock. */
16563                                         DEVICE_QUEUE_UNLOCK(tq);
16564                                         break;
16565                                 } else {
16566                                         /* Release device queue lock. */
16567                                         DEVICE_QUEUE_UNLOCK(tq);
16568                                         ql_delay(ha, 10000);
16569                                 }
16570                         }
16571 
16572                         /* Finish any commands waiting for more status. */
16573                         for (i = 0; i < ha->rsp_queues_cnt; i++) {
16574                                 if ((rsp_q = ha->rsp_queues[i]) != NULL &&
16575                                     (sp = rsp_q->status_srb) != NULL) {
16576                                         rsp_q->status_srb = NULL;
16577                                         sp->cmd.next = NULL;
16578                                         ql_done(&sp->cmd, B_FALSE);
16579                                 }
16580                         }
16581 
16582                         /* Abort commands that did not finish. */
16583                         if (cnt == 0) {
16584                                 for (cnt = 1; cnt < ha->osc_max_cnt;
16585                                     cnt++) {
16586                                         if (ha->pending_cmds.first != NULL) {
16587                                                 ql_start_iocb(ha, NULL);
16588                                                 cnt = 1;
16589                                         }
16590                                         sp = ha->outstanding_cmds[cnt];
16591                                         if (sp != NULL &&
16592                                             sp != QL_ABORTED_SRB(ha) &&
16593                                             sp->lun_queue->target_queue ==
16594                                             tq) {
16595                                                 (void) ql_abort_io(ha, sp);
16596                                                 sp->pkt->pkt_reason =
16597                                                     CS_ABORTED;
16598                                                 sp->cmd.next = NULL;
16599                                                 ql_done(&sp->cmd, B_FALSE);
16600                                         }
16601                                 }
16602                         }
16603                 }
16604         }
16605 
16606         /* Shutdown IP. */
16607         if (ha->flags & IP_INITIALIZED) {
16608                 (void) ql_shutdown_ip(ha);
16609         }
16610 
16611         /* Stop all timers. */
16612         ADAPTER_STATE_LOCK(ha);
16613         ha->port_retry_timer = 0;
16614         ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
16615         ha->watchdog_timer = 0;
16616         ADAPTER_STATE_UNLOCK(ha);
16617 
16618         if (pwr == PM_LEVEL_D3 && ha->flags & ONLINE) {
16619                 ADAPTER_STATE_LOCK(ha);
16620                 ha->flags &= ~ONLINE;
16621                 ADAPTER_STATE_UNLOCK(ha);
16622 
16623                 if (CFG_IST(ha, CFG_CTRL_82XX)) {
16624                         ql_8021_clr_drv_active(ha);
16625                 }
16626 
16627                 /* Reset ISP chip. */
16628                 ql_reset_chip(ha);
16629         }
16630 
16631         QL_PRINT_3(ha, "done\n");
16632 }
16633 
16634 /*
16635  * ql_get_dma_mem
16636  *      Function used to allocate dma memory.
16637  *
16638  * Input:
16639  *      ha:                     adapter state pointer.
16640  *      mem:                    pointer to dma memory object.
16641  *      size:                   size of the request in bytes
16642  *
16643  * Returns:
16644  *      qn local function return status code.
16645  *
16646  * Context:
16647  *      Kernel context.
16648  */
16649 int
16650 ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
16651     mem_alloc_type_t allocation_type, mem_alignment_t alignment)
16652 {
16653         int     rval;
16654 
16655         QL_PRINT_3(ha, "started\n");
16656 
16657         mem->size = size;
16658         mem->type = allocation_type;
16659         mem->max_cookie_count = 1;
16660 
16661         switch (alignment) {
16662         case QL_DMA_DATA_ALIGN:
16663                 mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
16664                 break;
16665         case QL_DMA_RING_ALIGN:
16666                 mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
16667                 break;
16668         default:
16669                 EL(ha, "failed, unknown alignment type %x\n", alignment);
16670                 break;
16671         }
16672 
16673         if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
16674                 ql_free_phys(ha, mem);
16675                 EL(ha, "failed, alloc_phys=%xh\n", rval);
16676         }
16677 
16678         QL_PRINT_3(ha, "done\n");
16679 
16680         return (rval);
16681 }
16682 
16683 /*
16684  * ql_free_dma_resource
16685  *      Function used to free dma memory.
16686  *
16687  * Input:
16688  *      ha:             adapter state pointer.
16689  *      mem:            pointer to dma memory object.
16690  *      mem->dma_handle      DMA memory handle.
16691  *
16692  * Context:
16693  *      Kernel context.
16694  */
16695 void
16696 ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
16697 {
16698         QL_PRINT_3(ha, "started\n");
16699 
16700         ql_free_phys(ha, mem);
16701 
16702         QL_PRINT_3(ha, "done\n");
16703 }
16704 
16705 /*
16706  * ql_alloc_phys
16707  *      Function used to allocate memory and zero it.
16708  *      Memory is below 4 GB.
16709  *
16710  * Input:
16711  *      ha:                     adapter state pointer.
16712  *      mem:                    pointer to dma memory object.
16713  *      sleep:                  KM_SLEEP/KM_NOSLEEP flag.
16714  *      mem->cookie_count    number of segments allowed.
16715  *      mem->type            memory allocation type.
16716  *      mem->size            memory size.
16717  *      mem->alignment               memory alignment.
16718  *
16719  * Returns:
16720  *      ql local function return status code.
16721  *
16722  * Context:
16723  *      Kernel context.
16724  */
16725 int
16726 ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
16727 {
16728         size_t                  rlen;
16729         ddi_dma_attr_t          dma_attr = ha->io_dma_attr;
16730         ddi_device_acc_attr_t   acc_attr = ql_dev_acc_attr;
16731 
16732         QL_PRINT_3(ha, "started\n");
16733 
16734         dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
16735         dma_attr.dma_attr_sgllen = (int)mem->max_cookie_count;
16736 
16737         /*
16738          * Workaround for SUN XMITS buffer must end and start on 8 byte
16739          * boundary. Else, hardware will overrun the buffer. Simple fix is
16740          * to make sure buffer has enough room for overrun.
16741          */
16742         if (mem->size & 7) {
16743                 mem->size += 8 - (mem->size & 7);
16744         }
16745 
16746         mem->flags = DDI_DMA_CONSISTENT;
16747 
16748         /*
16749          * Allocate DMA memory for command.
16750          */
16751         if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
16752             DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
16753             DDI_SUCCESS) {
16754                 EL(ha, "failed, ddi_dma_alloc_handle\n");
16755                 mem->dma_handle = NULL;
 
16787                         mem->acc_handle = NULL;
16788                         mem->bp = NULL;
16789                 }
16790                 break;
16791         default:
16792                 EL(ha, "failed, unknown type=%xh\n", mem->type);
16793                 mem->acc_handle = NULL;
16794                 mem->bp = NULL;
16795                 break;
16796         }
16797 
16798         if (mem->bp == NULL) {
16799                 EL(ha, "failed, ddi_dma_mem_alloc\n");
16800                 ddi_dma_free_handle(&mem->dma_handle);
16801                 mem->dma_handle = NULL;
16802                 return (QL_MEMORY_ALLOC_FAILED);
16803         }
16804 
16805         mem->flags |= DDI_DMA_RDWR;
16806 
16807         if (qlc_fm_check_dma_handle(ha, mem->dma_handle)
16808             != DDI_FM_OK) {
16809                 EL(ha, "failed, ddi_dma_addr_bind_handle\n");
16810                 ql_free_phys(ha, mem);
16811                 qlc_fm_report_err_impact(ha,
16812                     QL_FM_EREPORT_DMA_HANDLE_CHECK);
16813                 return (QL_MEMORY_ALLOC_FAILED);
16814         }
16815 
16816         if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
16817                 EL(ha, "failed, ddi_dma_addr_bind_handle\n");
16818                 ql_free_phys(ha, mem);
16819                 return (QL_MEMORY_ALLOC_FAILED);
16820         }
16821 
16822         QL_PRINT_3(ha, "done\n");
16823 
16824         return (QL_SUCCESS);
16825 }
16826 
16827 /*
16828  * ql_free_phys
16829  *      Function used to free physical memory.
16830  *
16831  * Input:
16832  *      ha:     adapter state pointer.
16833  *      mem:    pointer to dma memory object.
16834  *
16835  * Context:
16836  *      Kernel context.
16837  */
16838 void
16839 ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
16840 {
16841         QL_PRINT_3(ha, "started\n");
16842 
16843         if (mem != NULL) {
16844                 if (mem->memflags == DDI_DMA_MAPPED) {
16845                         ql_unbind_dma_buffer(ha, mem);
16846                 }
16847 
16848                 switch (mem->type) {
16849                 case KERNEL_MEM:
16850                         if (mem->bp != NULL) {
16851                                 kmem_free(mem->bp, mem->size);
16852                                 mem->bp = NULL;
16853                         }
16854                         break;
16855                 case LITTLE_ENDIAN_DMA:
16856                 case BIG_ENDIAN_DMA:
16857                 case NO_SWAP_DMA:
16858                         if (mem->acc_handle != NULL) {
16859                                 ddi_dma_mem_free(&mem->acc_handle);
16860                                 mem->acc_handle = NULL;
16861                                 mem->bp = NULL;
16862                         }
16863                         break;
16864                 default:
16865                         break;
16866                 }
16867                 if (mem->dma_handle != NULL) {
16868                         ddi_dma_free_handle(&mem->dma_handle);
16869                         mem->dma_handle = NULL;
16870                 }
16871         }
16872 
16873         QL_PRINT_3(ha, "done\n");
16874 }
16875 
16876 /*
16877  * ql_bind_dma_buffer
16878  *      Binds DMA buffer.
16879  *
16880  * Input:
16881  *      ha:                     adapter state pointer.
16882  *      mem:                    pointer to dma memory object.
16883  *      kmflags:                KM_SLEEP or KM_NOSLEEP.
16884  *      mem->dma_handle              DMA memory handle.
16885  *      mem->max_cookie_count        number of segments allowed.
16886  *      mem->type            memory allocation type.
16887  *      mem->size            memory size.
16888  *      mem->bp                      pointer to memory or struct buf
16889  *
16890  * Returns:
16891  *      mem->cookies         pointer to list of cookies.
16892  *      mem->cookie_count    number of cookies.
16893  *      status                  success = DDI_DMA_MAPPED
16894  *                              DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
16895  *                              DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
16896  *                              DDI_DMA_TOOBIG
16897  *
16898  * Context:
16899  *      Kernel context.
16900  */
16901 static int
16902 ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int kmflags)
16903 {
16904         ddi_dma_cookie_t        *cookiep;
16905         uint32_t                cnt;
16906 
16907         QL_PRINT_3(ha, "started\n");
16908 
16909         mem->memflags = ddi_dma_addr_bind_handle(mem->dma_handle, NULL,
16910             mem->bp, mem->size, mem->flags, (kmflags == KM_SLEEP) ?
16911             DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
16912             &mem->cookie_count);
16913 
16914         if (mem->memflags == DDI_DMA_MAPPED) {
16915                 if (mem->cookie_count > mem->max_cookie_count) {
16916                         (void) ddi_dma_unbind_handle(mem->dma_handle);
16917                         EL(ha, "failed, cookie_count %d > %d\n",
16918                             mem->cookie_count, mem->max_cookie_count);
16919                         mem->memflags = (uint32_t)DDI_DMA_TOOBIG;
16920                 } else {
16921                         if (mem->cookie_count > 1) {
16922                                 if (mem->cookies = kmem_zalloc(
16923                                     sizeof (ddi_dma_cookie_t) *
16924                                     mem->cookie_count, kmflags)) {
16925                                         *mem->cookies = mem->cookie;
16926                                         cookiep = mem->cookies;
16927                                         for (cnt = 1; cnt < mem->cookie_count;
16928                                             cnt++) {
16929                                                 ddi_dma_nextcookie(
16930                                                     mem->dma_handle,
16931                                                     ++cookiep);
16932                                         }
16933                                 } else {
16934                                         (void) ddi_dma_unbind_handle(
16935                                             mem->dma_handle);
16936                                         EL(ha, "failed, kmem_zalloc\n");
16937                                         mem->memflags = (uint32_t)
16938                                             DDI_DMA_NORESOURCES;
16939                                 }
16940                         } else {
16941                                 /*
16942                                  * It has been reported that dmac_size at times
16943                                  * may be incorrect on sparc machines so for
16944                                  * sparc machines that only have one segment
16945                                  * use the buffer size instead.
16946                                  */
16947                                 mem->cookies = &mem->cookie;
16948                                 mem->cookies->dmac_size = mem->size;
16949                         }
16950                 }
16951         }
16952 
16953         if (mem->memflags != DDI_DMA_MAPPED) {
16954                 EL(ha, "failed=%xh\n", mem->memflags);
16955         } else {
16956                 /*EMPTY*/
16957                 QL_PRINT_3(ha, "done\n");
16958         }
16959 
16960         return (mem->memflags);
16961 }
16962 
16963 /*
16964  * ql_unbind_dma_buffer
16965  *      Unbinds DMA buffer.
16966  *
16967  * Input:
16968  *      ha:                     adapter state pointer.
16969  *      mem:                    pointer to dma memory object.
16970  *      mem->dma_handle              DMA memory handle.
16971  *      mem->cookies         pointer to cookie list.
16972  *      mem->cookie_count    number of cookies.
16973  *
16974  * Context:
16975  *      Kernel context.
16976  */
16977 /* ARGSUSED */
16978 static void
16979 ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
16980 {
16981         QL_PRINT_3(ha, "started\n");
16982 
16983         if (mem->dma_handle != NULL && mem->memflags == DDI_DMA_MAPPED) {
16984                 (void) ddi_dma_unbind_handle(mem->dma_handle);
16985         }
16986         if (mem->cookie_count > 1) {
16987                 kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
16988                     mem->cookie_count);
16989                 mem->cookies = NULL;
16990         }
16991         mem->cookie_count = 0;
16992         mem->memflags = (uint32_t)DDI_DMA_NORESOURCES;
16993 
16994         QL_PRINT_3(ha, "done\n");
16995 }
16996 
16997 static int
16998 ql_suspend_adapter(ql_adapter_state_t *ha)
16999 {
17000         clock_t timer = (clock_t)(32 * drv_usectohz(1000000));
17001 
17002         QL_PRINT_3(ha, "started\n");
17003 
17004         (void) ql_wait_outstanding(ha);
17005 
17006         /*
17007          * here we are sure that there will not be any mbox interrupt.
17008          * So, let's make sure that we return back all the outstanding
17009          * cmds as well as internally queued commands.
17010          */
17011         ql_halt(ha, PM_LEVEL_D0);
17012 
17013         /*
17014          * First we will claim mbox ownership so that no
17015          * thread using mbox hangs when we disable the
17016          * interrupt in the middle of it.
17017          */
17018         MBX_REGISTER_LOCK(ha);
17019 
17020         /* Check for mailbox available, if not wait for signal. */
17021         while (ha->mailbox_flags & MBX_BUSY_FLG) {
17022                 ha->mailbox_flags = (uint8_t)
17023                     (ha->mailbox_flags | MBX_WANT_FLG);
17024 
17025                 /* 30 seconds from now */
17026                 if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
17027                     timer, TR_CLOCK_TICK) == -1) {
17028 
17029                         /* Release mailbox register lock. */
17030                         MBX_REGISTER_UNLOCK(ha);
17031                         EL(ha, "failed, Suspend mbox");
17032                         return (QL_FUNCTION_TIMEOUT);
17033                 }
17034         }
17035 
17036         /* Set busy flag. */
17037         ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
17038         MBX_REGISTER_UNLOCK(ha);
17039 
17040         if (ha->power_level != PM_LEVEL_D3) {
17041                 /* Disable ISP interrupts. */
17042                 ql_disable_intr(ha);
17043         }
17044 
17045         MBX_REGISTER_LOCK(ha);
17046         /* Reset busy status. */
17047         ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
17048 
17049         /* If thread is waiting for mailbox go signal it to start. */
17050         if (ha->mailbox_flags & MBX_WANT_FLG) {
17051                 ha->mailbox_flags = (uint8_t)
17052                     (ha->mailbox_flags & ~MBX_WANT_FLG);
17053                 cv_broadcast(&ha->cv_mbx_wait);
17054         }
17055         /* Release mailbox register lock. */
17056         MBX_REGISTER_UNLOCK(ha);
17057 
17058         QL_PRINT_3(ha, "done\n");
17059 
17060         return (QL_SUCCESS);
17061 }
17062 
17063 /*
17064  * ql_add_link_b
17065  *      Add link to the end of the chain.
17066  *
17067  * Input:
17068  *      head = Head of link list.
17069  *      link = link to be added.
17070  *      LOCK must be already obtained.
17071  *
17072  * Context:
17073  *      Interrupt or Kernel context, no mailbox commands allowed.
17074  */
17075 void
17076 ql_add_link_b(ql_head_t *head, ql_link_t *link)
17077 {
17078         if (link->head != NULL) {
17079                 EL(NULL, "link in use by list=%ph\n", link->head);
17080         }
17081 
17082         /* at the end there isn't a next */
17083         link->next = NULL;
17084 
17085         if ((link->prev = head->last) == NULL) {
17086                 head->first = link;
17087         } else {
17088                 head->last->next = link;
17089         }
17090 
17091         head->last = link;
17092         link->head = head;   /* the queue we're on */
17093 }
17094 
17095 /*
17096  * ql_add_link_t
17097  *      Add link to the beginning of the chain.
17098  *
17099  * Input:
17100  *      head = Head of link list.
17101  *      link = link to be added.
17102  *      LOCK must be already obtained.
17103  *
17104  * Context:
17105  *      Interrupt or Kernel context, no mailbox commands allowed.
17106  */
17107 void
17108 ql_add_link_t(ql_head_t *head, ql_link_t *link)
17109 {
17110         if (link->head != NULL) {
17111                 EL(NULL, "link in use by list=%ph\n", link->head);
17112         }
17113         link->prev = NULL;
17114 
17115         if ((link->next = head->first) == NULL)   {
17116                 head->last = link;
17117         } else {
17118                 head->first->prev = link;
17119         }
17120 
17121         head->first = link;
17122         link->head = head;   /* the queue we're on */
17123 }
17124 
17125 /*
17126  * ql_remove_link
17127  *      Remove a link from the chain.
17128  *
17129  * Input:
17130  *      head = Head of link list.
17131  *      link = link to be removed.
17132  *      associated proper LOCK must be already obtained.
17133  *
17134  * Context:
17135  *      Interrupt or Kernel context, no mailbox commands allowed.
17136  */
17137 void
17138 ql_remove_link(ql_head_t *head, ql_link_t *link)
17139 {
17140         if (head != NULL) {
17141                 if (link->prev != NULL) {
17142                         if ((link->prev->next = link->next) == NULL) {
17143                                 head->last = link->prev;
17144                         } else {
17145                                 link->next->prev = link->prev;
17146                         }
17147                 } else if ((head->first = link->next) == NULL) {
17148                         head->last = NULL;
17149                 } else {
17150                         head->first->prev = NULL;
17151                 }
17152 
17153                 /* not on a queue any more */
17154                 link->prev = link->next = NULL;
17155                 link->head = NULL;
17156         }
17157 }
17158 
17159 /*
17160  * ql_chg_endian
17161  *      Change endianess of byte array.
17162  *
17163  * Input:
17164  *      buf = array pointer.
17165  *      size = size of array in bytes.
17166  *
17167  * Context:
17168  *      Interrupt or Kernel context, no mailbox commands allowed.
17169  */
17170 void
17171 ql_chg_endian(uint8_t buf[], size_t size)
17172 {
17173         uint8_t byte;
17174         size_t  cnt1;
17175         size_t  cnt;
17176 
 
17225                 *ans += num * mul;
17226         }
17227 
17228         return (cnt);
17229 }
17230 
17231 /*
17232  * ql_delay
17233  *      Calls delay routine if threads are not suspended, otherwise, busy waits
17234  *      Minimum = 1 tick = 10ms
17235  *
17236  * Input:
17237  *      dly = delay time in microseconds.
17238  *
17239  * Context:
17240  *      Kernel or Interrupt context, no mailbox commands allowed.
17241  */
17242 void
17243 ql_delay(ql_adapter_state_t *ha, clock_t usecs)
17244 {
17245         if (ha->flags & ADAPTER_SUSPENDED || ddi_in_panic() ||
17246             curthread->t_flag & T_INTR_THREAD) {
17247                 drv_usecwait(usecs);
17248         } else {
17249                 delay(drv_usectohz(usecs));
17250         }
17251 }
17252 
17253 /*
17254  * ql_stall_drv
17255  *      Stalls one or all driver instances, waits for 30 seconds.
17256  *
17257  * Input:
17258  *      ha:             adapter state pointer or NULL for all.
17259  *      options:        BIT_0 --> leave driver stalled on exit if
17260  *                                failed.
17261  *
17262  * Returns:
17263  *      ql local function return status code.
17264  *
17265  * Context:
17266  *      Kernel context.
17267  */
17268 int
17269 ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
17270 {
17271         ql_link_t               *link;
17272         ql_adapter_state_t      *ha2 = NULL;
17273         uint32_t                timer;
17274 
17275         QL_PRINT_3(ha, "started\n");
17276 
17277         /* Tell all daemons to stall. */
17278         link = ha == NULL ? ql_hba.first : &ha->hba;
17279         while (link != NULL) {
17280                 ha2 = link->base_address;
17281 
17282                 ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
17283 
17284                 link = ha == NULL ? link->next : NULL;
17285         }
17286 
17287         /* Wait for 30 seconds for daemons stall. */
17288         timer = 3000;
17289         link = ha == NULL ? ql_hba.first : &ha->hba;
17290         while (link != NULL && timer) {
17291                 ha2 = link->base_address;
17292 
17293                 if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
17294                     (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
17295                     (ha2->task_daemon_flags & FIRMWARE_UP) == 0 ||
17296                     (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
17297                     ql_wait_outstanding(ha2) == ha2->pha->osc_max_cnt)) {
17298                         link = ha == NULL ? link->next : NULL;
17299                         continue;
17300                 }
17301 
17302                 QL_PRINT_2(ha2, "status, dtf=%xh, stf=%xh\n",
17303                     ha2->task_daemon_flags, ha2->flags);
17304 
17305                 ql_delay(ha2, 10000);
17306                 timer--;
17307                 link = ha == NULL ? ql_hba.first : &ha->hba;
17308         }
17309 
17310         if (ha2 != NULL && timer == 0) {
17311                 EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
17312                     ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
17313                     "unstalled"));
17314                 if (options & BIT_0) {
17315                         ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
17316                 }
17317                 return (QL_FUNCTION_TIMEOUT);
17318         }
17319 
17320         QL_PRINT_3(ha, "done\n");
17321 
17322         return (QL_SUCCESS);
17323 }
17324 
17325 /*
17326  * ql_restart_driver
17327  *      Restarts one or all driver instances.
17328  *
17329  * Input:
17330  *      ha:     adapter state pointer or NULL for all.
17331  *
17332  * Context:
17333  *      Kernel context.
17334  */
17335 void
17336 ql_restart_driver(ql_adapter_state_t *ha)
17337 {
17338         ql_link_t               *link;
17339         ql_adapter_state_t      *ha2;
17340         uint32_t                timer;
17341 
17342         QL_PRINT_3(ha, "started\n");
17343 
17344         /* Tell all daemons to unstall. */
17345         link = ha == NULL ? ql_hba.first : &ha->hba;
17346         while (link != NULL) {
17347                 ha2 = link->base_address;
17348 
17349                 ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
17350 
17351                 link = ha == NULL ? link->next : NULL;
17352         }
17353 
17354         /* Wait for 30 seconds for all daemons unstall. */
17355         timer = 3000;
17356         link = ha == NULL ? ql_hba.first : &ha->hba;
17357         while (link != NULL && timer) {
17358                 ha2 = link->base_address;
17359 
17360                 if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
17361                     (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
17362                     (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
17363                         QL_PRINT_2(ha2, "restarted\n");
17364                         ql_restart_queues(ha2);
17365                         link = ha == NULL ? link->next : NULL;
17366                         continue;
17367                 }
17368 
17369                 QL_PRINT_2(ha2, "status, tdf=%xh\n", ha2->task_daemon_flags);
17370 
17371                 ql_delay(ha2, 10000);
17372                 timer--;
17373                 link = ha == NULL ? ql_hba.first : &ha->hba;
17374         }
17375 
17376         QL_PRINT_3(ha, "done\n");
17377 }
17378 
17379 /*
17380  * ql_setup_interrupts
17381  *      Sets up interrupts based on the HBA's and platform's
17382  *      capabilities (e.g., legacy / MSI / FIXED).
17383  *
17384  * Input:
17385  *      ha = adapter state pointer.
17386  *
17387  * Returns:
17388  *      DDI_SUCCESS or DDI_FAILURE.
17389  *
17390  * Context:
17391  *      Kernel context.
17392  */
17393 static int
17394 ql_setup_interrupts(ql_adapter_state_t *ha)
17395 {
17396         int32_t         rval = DDI_FAILURE;
17397         int32_t         i;
17398         int32_t         itypes = 0;
17399 
17400         QL_PRINT_3(ha, "started\n");
17401 
17402         /*
17403          * The Solaris Advanced Interrupt Functions (aif) are only
17404          * supported on s10U1 or greater.
17405          */
17406         if (ql_os_release_level < 10 || ql_disable_aif != 0) {
17407                 EL(ha, "interrupt framework is not supported or is "
17408                     "disabled, using legacy\n");
17409                 return (ql_legacy_intr(ha));
17410         } else if (ql_os_release_level == 10) {
17411                 /*
17412                  * See if the advanced interrupt functions (aif) are
17413                  * in the kernel
17414                  */
17415                 void    *fptr = (void *)&ddi_intr_get_supported_types;
17416 
17417                 if (fptr == NULL) {
17418                         EL(ha, "aif is not supported, using legacy "
17419                             "interrupts (rev)\n");
17420                         return (ql_legacy_intr(ha));
 
17427                 EL(ha, "get supported types failed, rval=%xh, "
17428                     "assuming FIXED\n", i);
17429                 itypes = DDI_INTR_TYPE_FIXED;
17430         }
17431 
17432         EL(ha, "supported types are: %xh\n", itypes);
17433 
17434         if ((itypes & DDI_INTR_TYPE_MSIX) &&
17435             (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
17436                 EL(ha, "successful MSI-X setup\n");
17437         } else if ((itypes & DDI_INTR_TYPE_MSI) &&
17438             (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
17439                 EL(ha, "successful MSI setup\n");
17440         } else {
17441                 rval = ql_setup_fixed(ha);
17442         }
17443 
17444         if (rval != DDI_SUCCESS) {
17445                 EL(ha, "failed, aif, rval=%xh\n", rval);
17446         } else {
17447                 /* Setup mutexes */
17448                 if ((rval = ql_init_mutex(ha)) != DDI_SUCCESS) {
17449                         EL(ha, "failed, mutex init ret=%xh\n", rval);
17450                         ql_release_intr(ha);
17451                 }
17452                 QL_PRINT_3(ha, "done\n");
17453         }
17454 
17455         return (rval);
17456 }
17457 
17458 /*
17459  * ql_setup_msi
17460  *      Set up aif MSI interrupts
17461  *
17462  * Input:
17463  *      ha = adapter state pointer.
17464  *
17465  * Returns:
17466  *      DDI_SUCCESS or DDI_FAILURE.
17467  *
17468  * Context:
17469  *      Kernel context.
17470  */
17471 static int
17472 ql_setup_msi(ql_adapter_state_t *ha)
17473 {
17474         uint_t          i;
17475         int32_t         count = 0;
17476         int32_t         avail = 0;
17477         int32_t         actual = 0;
17478         int32_t         msitype = DDI_INTR_TYPE_MSI;
17479         int32_t         ret;
17480 
17481         QL_PRINT_3(ha, "started\n");
17482 
17483         if (ql_disable_msi != 0) {
17484                 EL(ha, "MSI is disabled by user\n");
17485                 return (DDI_FAILURE);
17486         }
17487 
17488         /* MSI support is only suported on 24xx HBA's. */
17489         if (!CFG_IST(ha, CFG_MSI_SUPPORT)) {
17490                 EL(ha, "HBA does not support MSI\n");
17491                 return (DDI_FAILURE);
17492         }
17493 
17494         /* Get number of MSI interrupts the system supports */
17495         if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
17496             DDI_SUCCESS) || count == 0) {
17497                 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
17498                 return (DDI_FAILURE);
17499         }
17500 
17501         /* Get number of available MSI interrupts */
17502         if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
17503             DDI_SUCCESS) || avail == 0) {
17504                 EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
17505                 return (DDI_FAILURE);
17506         }
17507 
17508         /* MSI requires only 1.  */
17509         count = 1;
17510 
17511         /* Allocate space for interrupt handles */
17512         ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
17513         ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
17514 
17515         ha->iflags |= IFLG_INTR_MSI;
17516 
17517         /* Allocate the interrupts */
17518         if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
17519             &actual, 0)) != DDI_SUCCESS || actual < count) {
17520                 EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
17521                     "actual=%xh\n", ret, count, actual);
17522                 ql_release_intr(ha);
17523                 return (DDI_FAILURE);
17524         }
17525         ha->intr_cnt = actual;
17526 
17527         /* Get interrupt priority */
17528         if ((ret = ddi_intr_get_pri(ha->htable[0], &i)) != DDI_SUCCESS) {
17529                 EL(ha, "failed, get_pri ret=%xh\n", ret);
17530                 ql_release_intr(ha);
17531                 return (ret);
17532         }
17533         ha->intr_pri = DDI_INTR_PRI(i);
17534 
17535         /* Add the interrupt handler */
17536         if ((ret = ddi_intr_add_handler(ha->htable[0], ql_isr_aif,
17537             (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
17538                 EL(ha, "failed, intr_add ret=%xh\n", ret);
17539                 ql_release_intr(ha);
17540                 return (ret);
17541         }
17542 
17543         /* Get the capabilities */
17544         (void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
17545 
17546         /* Enable interrupts */
17547         if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
17548                 if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
17549                     DDI_SUCCESS) {
17550                         EL(ha, "failed, block enable, ret=%xh\n", ret);
17551                         ql_release_intr(ha);
17552                         return (ret);
17553                 }
17554         } else {
17555                 for (i = 0; i < actual; i++) {
17556                         if ((ret = ddi_intr_enable(ha->htable[i])) !=
17557                             DDI_SUCCESS) {
17558                                 EL(ha, "failed, intr enable, ret=%xh\n", ret);
17559                                 ql_release_intr(ha);
17560                                 return (ret);
17561                         }
17562                 }
17563         }
17564 
17565         QL_PRINT_3(ha, "done\n");
17566 
17567         return (DDI_SUCCESS);
17568 }
17569 
17570 /*
17571  * ql_setup_msix
17572  *      Set up aif MSI-X interrupts
17573  *
17574  * Input:
17575  *      ha = adapter state pointer.
17576  *
17577  * Returns:
17578  *      DDI_SUCCESS or DDI_FAILURE.
17579  *
17580  * Context:
17581  *      Kernel context.
17582  */
17583 static int
17584 ql_setup_msix(ql_adapter_state_t *ha)
17585 {
17586         int             hwvect;
17587         int32_t         count = 0;
17588         int32_t         avail = 0;
17589         int32_t         actual = 0;
17590         int32_t         msitype = DDI_INTR_TYPE_MSIX;
17591         int32_t         ret;
17592         uint_t          i;
17593 
17594         QL_PRINT_3(ha, "started\n");
17595 
17596         if (ql_disable_msix != 0) {
17597                 EL(ha, "MSI-X is disabled by user\n");
17598                 return (DDI_FAILURE);
17599         }
17600 
17601 #ifdef __x86
17602         if (get_hwenv() == HW_VMWARE) {
17603                 EL(ha, "running under hypervisor, disabling MSI-X\n");
17604                 return (DDI_FAILURE);
17605         }
17606 #endif
17607 
17608         /*
17609          * MSI-X support is only available on 24xx HBA's that have
17610          * rev A2 parts (revid = 3) or greater.
17611          */
17612         if (CFG_IST(ha, CFG_ISP_FW_TYPE_1) ||
17613             (CFG_IST(ha, CFG_CTRL_24XX) && ha->rev_id < 3)) {
17614                 EL(ha, "HBA does not support MSI-X\n");
17615                 return (DDI_FAILURE);
17616         }
17617 
17618         /* Per HP, these HP branded HBA's are not supported with MSI-X */
17619         if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
17620             ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
17621                 EL(ha, "HBA does not support MSI-X (subdevid)\n");
17622                 return (DDI_FAILURE);
17623         }
17624 
17625         /* Get number of MSI-X interrupts the platform h/w supports */
17626         if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &hwvect)) !=
17627             DDI_SUCCESS) || hwvect == 0) {
17628                 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, hwvect);
17629                 return (DDI_FAILURE);
17630         }
17631         QL_PRINT_10(ha, "ddi_intr_get_nintrs, hwvect=%d\n", hwvect);
17632 
17633         /* Get number of available system interrupts */
17634         if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
17635             DDI_SUCCESS) || avail == 0) {
17636                 EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
17637                 return (DDI_FAILURE);
17638         }
17639         QL_PRINT_10(ha, "ddi_intr_get_navail, avail=%d\n", avail);
17640 
17641         /* Fill out the intr table */
17642         count = ha->interrupt_count;
17643         if (ha->flags & MULTI_QUEUE && count < ha->mq_msix_vectors) {
17644                 count = ha->mq_msix_vectors;
17645                 /* don't exceed the h/w capability */
17646                 if (count > hwvect) {
17647                         count = hwvect;
17648                 }
17649         }
17650 
17651         /* Allocate space for interrupt handles */
17652         ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
17653         ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
17654 
17655         ha->iflags |= IFLG_INTR_MSIX;
17656 
17657         /* Allocate the interrupts */
17658         if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
17659             DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
17660             actual < ha->interrupt_count) {
17661                 EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
17662                     "actual=%xh\n", ret, count, actual);
17663                 ql_release_intr(ha);
17664                 return (DDI_FAILURE);
17665         }
17666         ha->intr_cnt = actual;
17667         EL(ha, "min=%d, multi-q=%d, req=%d, rcv=%d\n",
17668             ha->interrupt_count, ha->mq_msix_vectors, count,
17669             ha->intr_cnt);
17670 
17671         /* Get interrupt priority */
17672         if ((ret = ddi_intr_get_pri(ha->htable[0], &i)) != DDI_SUCCESS) {
17673                 EL(ha, "failed, get_pri ret=%xh\n", ret);
17674                 ql_release_intr(ha);
17675                 return (ret);
17676         }
17677         ha->intr_pri = DDI_INTR_PRI(i);
17678 
17679         /* Add the interrupt handlers */
17680         for (i = 0; i < actual; i++) {
17681                 if ((ret = ddi_intr_add_handler(ha->htable[i], ql_isr_aif,
17682                     (void *)ha, (void *)((ulong_t)i))) != DDI_SUCCESS) {
17683                         EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
17684                             actual, ret);
17685                         ql_release_intr(ha);
17686                         return (ret);
17687                 }
17688         }
17689 
17690         /*
17691          * duplicate the rest of the intr's
17692          * ddi_intr_dup_handler() isn't working on x86 just yet...
17693          */
17694 #ifdef __sparc
17695         for (i = actual; i < hwvect; i++) {
17696                 if ((ret = ddi_intr_dup_handler(ha->htable[0], (int)i,
17697                     &ha->htable[i])) != DDI_SUCCESS) {
17698                         EL(ha, "failed, intr_dup#=%xh, act=%xh, ret=%xh\n",
17699                             i, actual, ret);
17700                         ql_release_intr(ha);
17701                         return (ret);
17702                 }
17703                 if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
17704                         EL(ha, "failed, intr enable, ret=%xh\n", ret);
17705                         ql_release_intr(ha);
17706                         return (ret);
17707                 }
17708         }
17709 #endif
17710 
17711         /* Get the capabilities */
17712         (void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
17713 
17714         /* Enable interrupts */
17715         if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
17716                 if ((ret = ddi_intr_block_enable(ha->htable, actual)) !=
17717                     DDI_SUCCESS) {
17718                         EL(ha, "failed, block enable, ret=%xh\n", ret);
17719                         ql_release_intr(ha);
17720                         return (ret);
17721                 }
17722                 QL_PRINT_10(ha, "intr_block_enable %d\n", actual);
17723         } else {
17724                 for (i = 0; i < actual; i++) {
17725                         if ((ret = ddi_intr_enable(ha->htable[i])) !=
17726                             DDI_SUCCESS) {
17727                                 EL(ha, "failed, intr enable, ret=%xh\n", ret);
17728                                 ql_release_intr(ha);
17729                                 return (ret);
17730                         }
17731                         QL_PRINT_10(ha, "intr_enable %d\n", i);
17732                 }
17733         }
17734 
17735         QL_PRINT_3(ha, "done\n");
17736 
17737         return (DDI_SUCCESS);
17738 }
17739 
17740 /*
17741  * ql_setup_fixed
17742  *      Sets up aif FIXED interrupts
17743  *
17744  * Input:
17745  *      ha = adapter state pointer.
17746  *
17747  * Returns:
17748  *      DDI_SUCCESS or DDI_FAILURE.
17749  *
17750  * Context:
17751  *      Kernel context.
17752  */
17753 static int
17754 ql_setup_fixed(ql_adapter_state_t *ha)
17755 {
17756         int32_t         count = 0;
17757         int32_t         actual = 0;
17758         int32_t         ret;
17759         uint_t          i;
17760 
17761         QL_PRINT_3(ha, "started\n");
17762 
17763         if (ql_disable_intx != 0) {
17764                 EL(ha, "INT-X is disabled by user\n");
17765                 return (DDI_FAILURE);
17766         }
17767 
17768         /* Get number of fixed interrupts the system supports */
17769         if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
17770             &count)) != DDI_SUCCESS) || count == 0) {
17771                 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
17772                 return (DDI_FAILURE);
17773         }
17774 
17775         /* Allocate space for interrupt handles */
17776         ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
17777         ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
17778 
17779         ha->iflags |= IFLG_INTR_FIXED;
17780 
17781         /* Allocate the interrupts */
17782         if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
17783             0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
17784             actual < count) {
17785                 EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
17786                     "actual=%xh\n", ret, count, actual);
17787                 ql_release_intr(ha);
17788                 return (DDI_FAILURE);
17789         }
17790         ha->intr_cnt = actual;
17791 
17792         /* Get interrupt priority */
17793         if ((ret = ddi_intr_get_pri(ha->htable[0], &i)) != DDI_SUCCESS) {
17794                 EL(ha, "failed, get_pri ret=%xh\n", ret);
17795                 ql_release_intr(ha);
17796                 return (ret);
17797         }
17798         ha->intr_pri = DDI_INTR_PRI(i);
17799 
17800         /* Add the interrupt handlers */
17801         for (i = 0; i < actual; i++) {
17802                 if ((ret = ddi_intr_add_handler(ha->htable[i], ql_isr_aif,
17803                     (void *)ha, (void *)((ulong_t)(i)))) != DDI_SUCCESS) {
17804                         EL(ha, "failed, intr_add ret=%xh\n", ret);
17805                         ql_release_intr(ha);
17806                         return (ret);
17807                 }
17808         }
17809 
17810         /* Enable interrupts */
17811         for (i = 0; i < actual; i++) {
17812                 if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
17813                         EL(ha, "failed, intr enable, ret=%xh\n", ret);
17814                         ql_release_intr(ha);
17815                         return (ret);
17816                 }
17817         }
17818 
17819         EL(ha, "using FIXED interupts\n");
17820 
17821         QL_PRINT_3(ha, "done\n");
17822 
17823         return (DDI_SUCCESS);
17824 }
17825 
17826 /*
17827  * ql_release_intr
17828  *      Releases aif legacy interrupt resources
17829  *
17830  * Input:
17831  *      ha = adapter state pointer.
17832  *
17833  * Returns:
17834  *
17835  * Context:
17836  *      Kernel context.
17837  */
17838 static void
17839 ql_release_intr(ql_adapter_state_t *ha)
17840 {
17841         int32_t i, x;
17842 
17843         QL_PRINT_3(ha, "started\n");
17844 
17845         if (!(ha->iflags & IFLG_INTR_AIF)) {
17846                 ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
17847         } else {
17848                 ha->iflags &= ~(IFLG_INTR_AIF);
17849                 if (ha->htable != NULL && ha->hsize > 0) {
17850                         i = x = (int32_t)ha->hsize /
17851                             (int32_t)sizeof (ddi_intr_handle_t);
17852                         if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
17853                                 (void) ddi_intr_block_disable(ha->htable,
17854                                     ha->intr_cnt);
17855                         } else {
17856                                 while (i-- > 0) {
17857                                         if (ha->htable[i] == 0) {
17858                                                 EL(ha, "htable[%x]=0h\n", i);
17859                                                 continue;
17860                                         }
17861 
17862                                         (void) ddi_intr_disable(ha->htable[i]);
17863                                 }
17864                         }
17865 
17866                         i = x;
17867                         while (i-- > 0) {
17868                                 if (i < ha->intr_cnt) {
17869                                         (void) ddi_intr_remove_handler(
17870                                             ha->htable[i]);
17871                                 }
17872                                 (void) ddi_intr_free(ha->htable[i]);
17873                         }
17874 
17875                         ha->intr_cnt = 0;
17876                         ha->intr_cap = 0;
17877 
17878                         kmem_free(ha->htable, ha->hsize);
17879                         ha->htable = NULL;
17880                         ha->hsize = 0;
17881                 }
17882         }
17883 
17884         ha->intr_pri = NULL;
17885 
17886         QL_PRINT_3(ha, "done\n");
17887 }
17888 
17889 /*
17890  * ql_legacy_intr
17891  *      Sets up legacy interrupts.
17892  *
17893  *      NB: Only to be used if AIF (Advanced Interupt Framework)
17894  *          if NOT in the kernel.
17895  *
17896  * Input:
17897  *      ha = adapter state pointer.
17898  *
17899  * Returns:
17900  *      DDI_SUCCESS or DDI_FAILURE.
17901  *
17902  * Context:
17903  *      Kernel context.
17904  */
17905 static int
17906 ql_legacy_intr(ql_adapter_state_t *ha)
17907 {
17908         int     rval;
17909 
17910         QL_PRINT_3(ha, "started\n");
17911 
17912         /* Get iblock cookies to initialize mutexes */
17913         if ((rval = ddi_get_iblock_cookie(ha->dip, 0, &ha->iblock_cookie)) !=
17914             DDI_SUCCESS) {
17915                 EL(ha, "failed, get_iblock: %xh\n", rval);
17916                 return (rval);
17917         }
17918         ha->intr_pri = (void *)ha->iblock_cookie;
17919 
17920         /* Setup standard/legacy interrupt handler */
17921         if (ddi_add_intr(ha->dip, (uint_t)0, &ha->iblock_cookie,
17922             (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
17923                 cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
17924                     QL_NAME, ha->instance);
17925                 return (rval);
17926         }
17927         ha->iflags |= IFLG_INTR_LEGACY;
17928 
17929         /* Setup mutexes */
17930         if ((rval = ql_init_mutex(ha)) != DDI_SUCCESS) {
17931                 EL(ha, "failed, mutex init ret=%xh\n", rval);
17932                 ql_release_intr(ha);
17933         } else {
17934                 EL(ha, "using legacy interrupts\n");
17935         }
17936         return (rval);
17937 }
17938 
17939 /*
17940  * ql_init_mutex
17941  *      Initializes mutex's
17942  *
17943  * Input:
17944  *      ha = adapter state pointer.
17945  *
17946  * Returns:
17947  *      DDI_SUCCESS or DDI_FAILURE.
17948  *
17949  * Context:
17950  *      Kernel context.
17951  */
17952 static int
17953 ql_init_mutex(ql_adapter_state_t *ha)
17954 {
17955         QL_PRINT_3(ha, "started\n");
17956 
17957         /* mutexes to protect the adapter state structure. */
17958         mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17959 
17960         /* mutex to protect the ISP request ring. */
17961         mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17962 
17963         /* I/O completion queue protection. */
17964         mutex_init(&ha->comp_q_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17965         cv_init(&ha->cv_comp_thread, NULL, CV_DRIVER, NULL);
17966 
17967         /* mutex to protect the mailbox registers. */
17968         mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17969 
17970         /* Mailbox wait and interrupt conditional variable. */
17971         cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
17972         cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
17973 
17974         /* power management protection */
17975         mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17976 
17977         /* Unsolicited buffer conditional variable. */
17978         mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17979         cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
17980 
17981         /* mutex to protect task daemon context. */
17982         mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17983         cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
17984 
17985         /* Suspended conditional variable. */
17986         cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
17987 
17988         /* mutex to protect per instance f/w dump flags and buffer */
17989         mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17990 
17991         QL_PRINT_3(ha, "done\n");
17992 
17993         return (DDI_SUCCESS);
17994 }
17995 
17996 /*
17997  * ql_destroy_mutex
17998  *      Destroys mutex's
17999  *
18000  * Input:
18001  *      ha = adapter state pointer.
18002  *
18003  * Returns:
18004  *
18005  * Context:
18006  *      Kernel context.
18007  */
18008 static void
18009 ql_destroy_mutex(ql_adapter_state_t *ha)
18010 {
18011         QL_PRINT_3(ha, "started\n");
18012 
18013         mutex_destroy(&ha->dump_mutex);
18014         cv_destroy(&ha->cv_dr_suspended);
18015         cv_destroy(&ha->cv_task_daemon);
18016         mutex_destroy(&ha->task_daemon_mutex);
18017         cv_destroy(&ha->cv_ub);
18018         mutex_destroy(&ha->ub_mutex);
18019         mutex_destroy(&ha->pm_mutex);
18020         cv_destroy(&ha->cv_mbx_intr);
18021         cv_destroy(&ha->cv_mbx_wait);
18022         mutex_destroy(&ha->mbx_mutex);
18023         cv_destroy(&ha->cv_comp_thread);
18024         mutex_destroy(&ha->comp_q_mutex);
18025         mutex_destroy(&ha->req_ring_mutex);
18026         mutex_destroy(&ha->mutex);
18027 
18028         QL_PRINT_3(ha, "done\n");
18029 }
18030 
18031 /*
18032  * ql_fwmodule_resolve
18033  *      Loads and resolves external firmware module and symbols
18034  *
18035  * Input:
18036  *      ha:             adapter state pointer.
18037  *
18038  * Returns:
18039  *      ql local function return status code:
18040  *              QL_SUCCESS - external f/w module module and symbols resolved
18041  *              QL_FW_NOT_SUPPORTED - Driver does not support ISP type
18042  *              QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
18043  *              QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
18044  * Context:
18045  *      Kernel context.
18046  *
18047  * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time.  We
18048  * could switch to a tighter scope around acutal download (and add an extra
18049  * ddi_modopen for module opens that occur before root is mounted).
18050  *
18051  */
18052 uint32_t
18053 ql_fwmodule_resolve(ql_adapter_state_t *ha)
18054 {
18055         int8_t                  module[128];
18056         int8_t                  fw_version[128];
18057         uint32_t                rval = QL_SUCCESS;
18058         caddr_t                 code, code02, code03;
18059         uint8_t                 *p_ucfw;
18060         uint16_t                *p_usaddr, *p_uslen;
18061         uint32_t                *p_uiaddr, *p_uilen, *p_uifw;
18062         uint32_t                *p_uiaddr02, *p_uilen02, *p_uilen03;
18063         struct fw_table         *fwt;
18064         extern struct fw_table  fw_table[];
18065 
18066         QL_PRINT_3(ha, "started\n");
18067 
18068         if (ha->fw_module != NULL) {
18069                 EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
18070                     ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
18071                     ha->fw_subminor_version);
18072                 return (rval);
18073         }
18074 
18075         /* make sure the fw_class is in the fw_table of supported classes */
18076         for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
18077                 if (fwt->fw_class == ha->fw_class)
18078                         break;                  /* match */
18079         }
18080         if (fwt->fw_version == NULL) {
18081                 cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
18082                     "in driver's fw_table", QL_NAME, ha->instance,
18083                     ha->fw_class);
18084                 return (QL_FW_NOT_SUPPORTED);
18085         }
18086 
 
18119                         rval = QL_FWSYM_NOT_FOUND;
18120                         EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
18121                 } else if ((p_ucfw = ddi_modsym(ha->fw_module,
18122                     "firmware_version", NULL)) == NULL) {
18123                         rval = QL_FWSYM_NOT_FOUND;
18124                         EL(ha, "failed, f/w module %d fwver symbol\n", module);
18125                 }
18126 
18127                 if (rval == QL_SUCCESS) {
18128                         ha->risc_fw[0].code = code;
18129                         ha->risc_fw[0].addr = *p_usaddr;
18130                         ha->risc_fw[0].length = *p_uslen;
18131 
18132                         (void) snprintf(fw_version, sizeof (fw_version),
18133                             "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
18134                 }
18135                 break;
18136 
18137         case 0x2400:
18138         case 0x2500:
18139         case 0x2700:
18140         case 0x8100:
18141         case 0x8301fc:
18142 
18143                 if ((code = ddi_modsym(ha->fw_module, "risc_code01",
18144                     NULL)) == NULL) {
18145                         rval = QL_FWSYM_NOT_FOUND;
18146                         EL(ha, "failed, f/w module %d rc01 symbol\n", module);
18147                 } else if ((p_uiaddr = ddi_modsym(ha->fw_module,
18148                     "risc_code_addr01", NULL)) == NULL) {
18149                         rval = QL_FWSYM_NOT_FOUND;
18150                         EL(ha, "failed, f/w module %d rca01 symbol\n", module);
18151                 } else if ((p_uilen = ddi_modsym(ha->fw_module,
18152                     "risc_code_length01", NULL)) == NULL) {
18153                         rval = QL_FWSYM_NOT_FOUND;
18154                         EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
18155                 } else if ((p_uifw = ddi_modsym(ha->fw_module,
18156                     "firmware_version", NULL)) == NULL) {
18157                         rval = QL_FWSYM_NOT_FOUND;
18158                         EL(ha, "failed, f/w module %d fwver symbol\n", module);
18159                 }
18160 
18161                 if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
18162                     NULL)) == NULL) {
18163                         rval = QL_FWSYM_NOT_FOUND;
18164                         EL(ha, "failed, f/w module %d rc02 symbol\n", module);
18165                 } else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
18166                     "risc_code_addr02", NULL)) == NULL) {
18167                         rval = QL_FWSYM_NOT_FOUND;
18168                         EL(ha, "failed, f/w module %d rca02 symbol\n", module);
18169                 } else if ((p_uilen02 = ddi_modsym(ha->fw_module,
18170                     "risc_code_length02", NULL)) == NULL) {
18171                         rval = QL_FWSYM_NOT_FOUND;
18172                         EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
18173                 }
18174 
18175                 if (rval == QL_SUCCESS) {
18176                         if (ha->fw_class == 0x2700) {
18177                                 if ((code03 = ddi_modsym(ha->fw_module,
18178                                     "tmplt_code01", NULL)) == NULL) {
18179                                         EL(ha, "failed, f/w module %d "
18180                                             "tmplt_code01 symbol\n", module);
18181                                 } else if ((p_uilen03 = ddi_modsym(
18182                                     ha->fw_module, "tmplt_code_length01",
18183                                     NULL)) == NULL) {
18184                                         code03 = NULL;
18185                                         EL(ha, "failed, f/w module %d "
18186                                             "tmplt_code_length01 symbol\n",
18187                                             module);
18188                                 }
18189                                 ha->risc_fw[2].code = code03;
18190                                 if ((ha->risc_fw[2].code = code03) != NULL) {
18191                                         ha->risc_fw[2].length = *p_uilen03;
18192                                 }
18193                         }
18194                         ha->risc_fw[0].code = code;
18195                         ha->risc_fw[0].addr = *p_uiaddr;
18196                         ha->risc_fw[0].length = *p_uilen;
18197                         ha->risc_fw[1].code = code02;
18198                         ha->risc_fw[1].addr = *p_uiaddr02;
18199                         ha->risc_fw[1].length = *p_uilen02;
18200 
18201                         (void) snprintf(fw_version, sizeof (fw_version),
18202                             "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
18203                 }
18204                 break;
18205 
18206         default:
18207                 EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
18208                 rval = QL_FW_NOT_SUPPORTED;
18209         }
18210 
18211         if (rval != QL_SUCCESS) {
18212                 cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
18213                     "module %s (%x)", QL_NAME, ha->instance, module, rval);
 
18216                         ha->fw_module = NULL;
18217                 }
18218         } else {
18219                 /*
18220                  * check for firmware version mismatch between module and
18221                  * compiled in fw_table version.
18222                  */
18223 
18224                 if (strcmp(fwt->fw_version, fw_version) != 0) {
18225 
18226                         /*
18227                          * If f/w / driver version mismatches then
18228                          * return a successful status -- however warn
18229                          * the user that this is NOT recommended.
18230                          */
18231 
18232                         cmn_err(CE_WARN, "%s(%d): driver / f/w version "
18233                             "mismatch for %x: driver-%s module-%s", QL_NAME,
18234                             ha->instance, ha->fw_class, fwt->fw_version,
18235                             fw_version);
18236                 }
18237         }
18238 
18239         QL_PRINT_3(ha, "done\n");
18240 
18241         return (rval);
18242 }
18243 
18244 /*
18245  * ql_port_state
18246  *      Set the state on all adapter ports.
18247  *
18248  * Input:
18249  *      ha:     parent adapter state pointer.
18250  *      state:  port state.
18251  *      flags:  task daemon flags to set.
18252  *
18253  * Context:
18254  *      Interrupt or Kernel context, no mailbox commands allowed.
18255  */
18256 void
18257 ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
18258 {
18259         ql_adapter_state_t      *vha;
18260 
18261         QL_PRINT_3(ha, "started\n");
18262 
18263         TASK_DAEMON_LOCK(ha);
18264         for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
18265                 if (FC_PORT_STATE_MASK(vha->state) != state) {
18266                         vha->state = state != FC_STATE_OFFLINE ?
18267                             (FC_PORT_SPEED_MASK(vha->state) | state) : state;
18268                         vha->task_daemon_flags |= flags;
18269                 }
18270         }
18271         ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
18272         TASK_DAEMON_UNLOCK(ha);
18273 
18274         QL_PRINT_3(ha, "done\n");
18275 }
18276 
18277 /*
18278  * ql_el_trace_alloc - Construct an extended logging trace descriptor.
18279  *
18280  * Input:       Pointer to the adapter state structure.
18281  * Context:     Kernel context.
18282  */
18283 void
18284 ql_el_trace_alloc(ql_adapter_state_t *ha)
18285 {
18286         ql_trace_entry_t        *entry;
18287         size_t                  maxsize;
18288 
18289         ha->ql_trace_desc =
18290             (ql_trace_desc_t *)kmem_zalloc(
18291             sizeof (ql_trace_desc_t), KM_SLEEP);
18292 
18293         /* ql_log_entries could be adjusted in /etc/system */
18294         maxsize = ql_log_entries * sizeof (ql_trace_entry_t);
18295         entry = kmem_zalloc(maxsize, KM_SLEEP);
18296 
18297         mutex_init(&ha->ql_trace_desc->mutex, NULL,
18298             MUTEX_DRIVER, NULL);
18299 
18300         ha->ql_trace_desc->trace_buffer = entry;
18301         ha->ql_trace_desc->trace_buffer_size = maxsize;
18302         ha->ql_trace_desc->nindex = 0;
18303 
18304         ha->ql_trace_desc->nentries = ql_log_entries;
18305         ha->ql_trace_desc->start = ha->ql_trace_desc->end = 0;
18306         ha->ql_trace_desc->csize = 0;
18307         ha->ql_trace_desc->count = 0;
18308 }
18309 
18310 /*
18311  * ql_el_trace_dealloc - Destroy an extended logging trace descriptor.
18312  *
18313  * Input:       Pointer to the adapter state structure.
18314  * Context:     Kernel context.
18315  */
18316 void
18317 ql_el_trace_dealloc(ql_adapter_state_t *ha)
18318 {
18319         if (ha->ql_trace_desc != NULL) {
18320                 if (ha->ql_trace_desc->trace_buffer != NULL) {
18321                         kmem_free(ha->ql_trace_desc->trace_buffer,
18322                             ha->ql_trace_desc->trace_buffer_size);
18323                 }
18324                 mutex_destroy(&ha->ql_trace_desc->mutex);
18325                 kmem_free(ha->ql_trace_desc,
18326                     sizeof (ql_trace_desc_t));
18327         }
18328 }
18329 
18330 /*
18331  * els_cmd_text - Return a pointer to a string describing the command
18332  *
18333  * Input:       els_cmd = the els command opcode.
18334  * Returns:     pointer to a string.
18335  * Context:     Kernel context.
18336  */
18337 char *
18338 els_cmd_text(int els_cmd)
18339 {
18340         cmd_table_t *entry = &els_cmd_tbl[0];
18341 
18342         return (cmd_text(entry, els_cmd));
18343 }
18344 
18345 /*
18346  * mbx_cmd_text - Return a pointer to a string describing the command
18347  *
 
18360 /*
18361  * cmd_text     Return a pointer to a string describing the command
18362  *
18363  * Input:       entry = the command table
18364  *              cmd = the command.
18365  * Returns:     pointer to a string.
18366  * Context:     Kernel context.
18367  */
18368 char *
18369 cmd_text(cmd_table_t *entry, int cmd)
18370 {
18371         for (; entry->cmd != 0; entry++) {
18372                 if (entry->cmd == cmd) {
18373                         break;
18374                 }
18375         }
18376         return (entry->string);
18377 }
18378 
18379 /*
18380  * ql_els_24xx_iocb
18381  *      els request indication.
18382  *
18383  * Input:
18384  *      ha:     adapter state pointer.
18385  *      req_q:  request queue structure pointer.
18386  *      srb:    scsi request block pointer.
18387  *      arg:    els passthru entry iocb pointer.
18388  *
18389  * Returns:
18390  *
18391  * Context:     Kernel context.
18392  */
18393 void
18394 ql_els_24xx_iocb(ql_adapter_state_t *ha, ql_request_q_t *req_q, ql_srb_t *srb,
18395     void *arg)
18396 {
18397         els_descriptor_t        els_desc;
18398 
18399         /* Extract the ELS information */
18400         ql_fca_isp_els_request(ha, req_q, (fc_packet_t *)srb->pkt,
18401             &els_desc);
18402 
18403         /* Construct the passthru entry */
18404         ql_isp_els_request_ctor(&els_desc, (els_passthru_entry_t *)arg);
18405 
18406         /* Ensure correct endianness */
18407         ql_isp_els_handle_cmd_endian(ha, srb);
18408 }
18409 
18410 /*
18411  * ql_fca_isp_els_request
18412  *      Extract into an els descriptor the info required
18413  *      to build an els_passthru iocb from an fc packet.
18414  *
18415  * Input:
18416  *      ha:             adapter state pointer.
18417  *      req_q:          request queue structure pointer.
18418  *      pkt:            fc packet pointer
18419  *      els_desc:       els descriptor pointer
18420  *
18421  * Context:
18422  *      Kernel context.
18423  */
18424 static void
18425 ql_fca_isp_els_request(ql_adapter_state_t *ha, ql_request_q_t *req_q,
18426     fc_packet_t *pkt, els_descriptor_t *els_desc)
18427 {
18428         ls_code_t       els;
18429 
18430         ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
18431             (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
18432 
18433         els_desc->els = els.ls_code;
18434 
18435         els_desc->els_handle = req_q->req_ring.acc_handle;
18436         els_desc->d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
18437         els_desc->s_id.b24 = pkt->pkt_cmd_fhdr.s_id;
18438         /* if n_port_handle is not < 0x7d use 0 */
18439         if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
18440                 els_desc->n_port_handle = ha->n_port->n_port_handle;
18441         } else {
18442                 els_desc->n_port_handle = 0;
18443         }
18444         els_desc->control_flags = 0;
18445         els_desc->cmd_byte_count = pkt->pkt_cmdlen;
18446         /*
18447          * Transmit DSD. This field defines the Fibre Channel Frame payload
18448          * (without the frame header) in system memory.
18449          */
18450         els_desc->tx_dsd.addr[0] = LSD(pkt->pkt_cmd_cookie->dmac_laddress);
18451         els_desc->tx_dsd.addr[1] = MSD(pkt->pkt_cmd_cookie->dmac_laddress);
18452         els_desc->tx_dsd.length = (uint32_t)pkt->pkt_cmd_cookie->dmac_size;
18453 
18454         els_desc->rsp_byte_count = pkt->pkt_rsplen;
18455         /*
 
18492             els_desc->els);
18493         ddi_put8(els_desc->els_handle, &els_entry->d_id_7_0,
18494             els_desc->d_id.b.al_pa);
18495         ddi_put8(els_desc->els_handle, &els_entry->d_id_15_8,
18496             els_desc->d_id.b.area);
18497         ddi_put8(els_desc->els_handle, &els_entry->d_id_23_16,
18498             els_desc->d_id.b.domain);
18499         ddi_put8(els_desc->els_handle, &els_entry->s_id_7_0,
18500             els_desc->s_id.b.al_pa);
18501         ddi_put8(els_desc->els_handle, &els_entry->s_id_15_8,
18502             els_desc->s_id.b.area);
18503         ddi_put8(els_desc->els_handle, &els_entry->s_id_23_16,
18504             els_desc->s_id.b.domain);
18505         ddi_put16(els_desc->els_handle, &els_entry->control_flags,
18506             els_desc->control_flags);
18507         ddi_put32(els_desc->els_handle, &els_entry->rcv_payld_data_bcnt,
18508             els_desc->rsp_byte_count);
18509         ddi_put32(els_desc->els_handle, &els_entry->xmt_payld_data_bcnt,
18510             els_desc->cmd_byte_count);
18511         /* Load transmit data segments and count. */
18512         ptr32 = (uint32_t *)&els_entry->dseg;
18513         ddi_put16(els_desc->els_handle, &els_entry->xmt_dseg_count, 1);
18514         ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[0]);
18515         ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[1]);
18516         ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.length);
18517         ddi_put16(els_desc->els_handle, &els_entry->rcv_dseg_count, 1);
18518         ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[0]);
18519         ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[1]);
18520         ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.length);
18521 }
18522 
18523 /*
18524  * ql_isp_els_handle_cmd_endian - els requests must be in big endian
18525  *                                in host memory.
18526  *
18527  * Input:       ha = adapter state pointer.
18528  *              srb = scsi request block
18529  * Returns:
18530  * Context:     Kernel context.
18531  */
18532 void
 
 
18639         }
18640 }
18641 
18642 /*
18643  * ql_n_port_plogi
18644  *      In N port 2 N port topology where an N Port has logged in with the
18645  *      firmware because it has the N_Port login initiative, we send up
18646  *      a plogi by proxy which stimulates the login procedure to continue.
18647  *
18648  * Input:
18649  *      ha = adapter state pointer.
18650  * Returns:
18651  *
18652  * Context:
18653  *      Kernel context.
18654  */
18655 static int
18656 ql_n_port_plogi(ql_adapter_state_t *ha)
18657 {
18658         int             rval;
18659         ql_tgt_t        *tq = NULL;
18660         ql_head_t done_q = { NULL, NULL };
18661 
18662         rval = QL_SUCCESS;
18663 
18664         if (ha->topology & QL_N_PORT) {
18665                 /* if we're doing this the n_port_handle must be good */
18666                 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
18667                         tq = ql_loop_id_to_queue(ha,
18668                             ha->n_port->n_port_handle);
18669                         if (tq != NULL) {
18670                                 (void) ql_send_plogi(ha, tq, &done_q);
18671                         } else {
18672                                 EL(ha, "n_port_handle = %x, tq = %x\n",
18673                                     ha->n_port->n_port_handle, tq);
18674                         }
18675                 } else {
18676                         EL(ha, "n_port_handle = %x, tq = %x\n",
18677                             ha->n_port->n_port_handle, tq);
18678                 }
18679                 if (done_q.first != NULL) {
18680                         ql_done(done_q.first, B_FALSE);
18681                 }
18682         }
18683         return (rval);
18684 }
18685 
18686 /*
18687  * Compare two WWNs. The NAA is omitted for comparison.
18688  *
18689  * Note particularly that the indentation used in this
18690  * function  isn't according to Sun recommendations. It
18691  * is indented to make reading a bit easy.
18692  *
18693  * Return Values:
18694  *   if first == second return  0
18695  *   if first > second  return  1
18696  *   if first < second  return -1
18697  */
18698 /* ARGSUSED */
18699 int
18700 ql_wwn_cmp(ql_adapter_state_t *ha, la_wwn_t *first, la_wwn_t *second)
18701 {
18702         la_wwn_t t1, t2;
18703         int rval;
18704 
18705         /*
18706          * Fibre Channel protocol is big endian, so compare
18707          * as big endian values
18708          */
18709         t1.i_wwn[0] = BE_32(first->i_wwn[0]);
18710         t1.i_wwn[1] = BE_32(first->i_wwn[1]);
18711 
18712         t2.i_wwn[0] = BE_32(second->i_wwn[0]);
18713         t2.i_wwn[1] = BE_32(second->i_wwn[1]);
18714 
18715         if (t1.i_wwn[0] == t2.i_wwn[0]) {
18716                 if (t1.i_wwn[1] == t2.i_wwn[1]) {
18717                         rval = 0;
18718                 } else if (t1.i_wwn[1] > t2.i_wwn[1]) {
18719                         rval = 1;
18720                 } else {
18721                         rval = -1;
18722                 }
18723         } else {
18724                 if (t1.i_wwn[0] > t2.i_wwn[0]) {
18725                         rval = 1;
18726                 } else {
18727                         rval = -1;
18728                 }
18729         }
18730         return (rval);
18731 }
18732 
18733 /*
18734  * ql_nvram_cache_desc_ctor - Construct an nvram cache descriptor.
18735  *
18736  * Input:       Pointer to the adapter state structure.
18737  * Returns:     Success or Failure.
18738  * Context:     Kernel context.
18739  */
18740 int
18741 ql_nvram_cache_desc_ctor(ql_adapter_state_t *ha)
18742 {
18743         int     rval = DDI_SUCCESS;
18744 
18745         QL_PRINT_3(ha, "started\n");
18746 
18747         ha->nvram_cache =
18748             (nvram_cache_desc_t *)kmem_zalloc(sizeof (nvram_cache_desc_t),
18749             KM_SLEEP);
18750 
18751         if (ha->nvram_cache == NULL) {
18752                 cmn_err(CE_WARN, "%s(%d): can't construct nvram cache"
18753                     " descriptor", QL_NAME, ha->instance);
18754                 rval = DDI_FAILURE;
18755         } else {
18756                 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
18757                         ha->nvram_cache->size = sizeof (nvram_24xx_t);
18758                 } else {
18759                         ha->nvram_cache->size = sizeof (nvram_t);
18760                 }
18761                 ha->nvram_cache->cache =
18762                     (void *)kmem_zalloc(ha->nvram_cache->size, KM_SLEEP);
18763                 if (ha->nvram_cache->cache == NULL) {
18764                         cmn_err(CE_WARN, "%s(%d): can't get nvram cache buffer",
18765                             QL_NAME, ha->instance);
18766                         kmem_free(ha->nvram_cache,
18767                             sizeof (nvram_cache_desc_t));
18768                         ha->nvram_cache = 0;
18769                         rval = DDI_FAILURE;
18770                 } else {
18771                         ha->nvram_cache->valid = 0;
18772                 }
18773         }
18774 
18775         QL_PRINT_3(ha, "done\n");
18776 
18777         return (rval);
18778 }
18779 
18780 /*
18781  * ql_nvram_cache_desc_dtor - Destroy an nvram cache descriptor.
18782  *
18783  * Input:       Pointer to the adapter state structure.
18784  * Returns:     Success or Failure.
18785  * Context:     Kernel context.
18786  */
18787 int
18788 ql_nvram_cache_desc_dtor(ql_adapter_state_t *ha)
18789 {
18790         int     rval = DDI_SUCCESS;
18791 
18792         QL_PRINT_3(ha, "started\n");
18793 
18794         if (ha->nvram_cache == NULL) {
18795                 cmn_err(CE_WARN, "%s(%d): can't destroy nvram descriptor",
18796                     QL_NAME, ha->instance);
18797                 rval = DDI_FAILURE;
18798         } else {
18799                 if (ha->nvram_cache->cache != NULL) {
18800                         kmem_free(ha->nvram_cache->cache,
18801                             ha->nvram_cache->size);
18802                 }
18803                 kmem_free(ha->nvram_cache, sizeof (nvram_cache_desc_t));
18804         }
18805 
18806         QL_PRINT_3(ha, "done\n");
18807 
18808         return (rval);
18809 }
18810 
18811 /*
18812  * ql_plogi_params_desc_ctor - Construct an plogi retry params descriptor.
18813  *
18814  * Input:       Pointer to the adapter state structure.
18815  * Returns:     Success or Failure.
18816  * Context:     Kernel context.
18817  */
18818 int
18819 ql_plogi_params_desc_ctor(ql_adapter_state_t *ha)
18820 {
18821         int     rval = DDI_SUCCESS;
18822 
18823         QL_PRINT_3(ha, "started\n");
18824 
18825         ha->plogi_params =
18826             (plogi_params_desc_t *)kmem_zalloc(sizeof (plogi_params_desc_t),
18827             KM_SLEEP);
18828 
18829         if (ha->plogi_params == NULL) {
18830                 cmn_err(CE_WARN, "%s(%d): can't construct plogi params"
18831                     " descriptor", QL_NAME, ha->instance);
18832                 rval = DDI_FAILURE;
18833         } else {
18834                 /* default initializers. */
18835                 ha->plogi_params->retry_cnt = QL_PLOGI_RETRY_CNT;
18836                 ha->plogi_params->retry_dly_usec = QL_PLOGI_RETRY_DLY_USEC;
18837         }
18838 
18839         QL_PRINT_3(ha, "done\n");
18840 
18841         return (rval);
18842 }
18843 
18844 /*
18845  * ql_plogi_params_desc_dtor - Destroy an plogi retry params descriptor.
18846  *
18847  * Input:       Pointer to the adapter state structure.
18848  * Returns:     Success or Failure.
18849  * Context:     Kernel context.
18850  */
18851 int
18852 ql_plogi_params_desc_dtor(ql_adapter_state_t *ha)
18853 {
18854         int     rval = DDI_SUCCESS;
18855 
18856         QL_PRINT_3(ha, "started\n");
18857 
18858         if (ha->plogi_params == NULL) {
18859                 cmn_err(CE_WARN, "%s(%d): can't destroy plogi params"
18860                     " descriptor", QL_NAME, ha->instance);
18861                 rval = DDI_FAILURE;
18862         } else {
18863                 kmem_free(ha->plogi_params, sizeof (plogi_params_desc_t));
18864         }
18865 
18866         QL_PRINT_3(ha, "done\n");
18867 
18868         return (rval);
18869 }
18870 
18871 /*
18872  * ql_toggle_loop_state
18873  *      Changes looop state to offline and then online.
18874  *
18875  * Input:
18876  *      ha:     adapter state pointer.
18877  *
18878  * Context:
18879  *      Kernel context.
18880  */
18881 void
18882 ql_toggle_loop_state(ql_adapter_state_t *ha)
18883 {
18884         uint32_t        timer;
18885 
18886         if (LOOP_READY(ha)) {
18887                 ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
18888                 ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
18889                 for (timer = 30; timer; timer--) {
18890                         if (!(ha->task_daemon_flags & FC_STATE_CHANGE)) {
18891                                 break;
18892                         }
18893                         delay(100);
18894                 }
18895                 ql_loop_online(ha);
18896         }
18897 }
18898 
18899 /*
18900  * ql_create_queues
18901  *      Allocate request/response queues.
18902  *
18903  * Input:
18904  *      ha:     adapter state pointer.
18905  *
18906  * Returns:
18907  *      ql driver local function return status codes
18908  *
18909  * Context:
18910  *      Kernel context.
18911  */
18912 static int
18913 ql_create_queues(ql_adapter_state_t *ha)
18914 {
18915         int             rval;
18916         uint16_t        cnt;
18917 
18918         QL_PRINT_10(ha, "started\n");
18919 
18920         if (ha->req_q[0] != NULL) {
18921                 QL_PRINT_10(ha, "done, queues already exist\n");
18922                 return (QL_SUCCESS);
18923         }
18924         if (ha->vp_index != 0) {
18925                 QL_PRINT_10(ha, "done, no multi-req-q \n");
18926                 ha->req_q[0] = ha->pha->req_q[0];
18927                 ha->req_q[1] = ha->pha->req_q[1];
18928                 ha->rsp_queues = ha->pha->rsp_queues;
18929                 return (QL_SUCCESS);
18930         }
18931 
18932         /* Setup request queue buffer pointers. */
18933         ha->req_q[0] = kmem_zalloc(sizeof (ql_request_q_t), KM_SLEEP);
18934 
18935         /* Allocate request queue. */
18936         ha->req_q[0]->req_entry_cnt = REQUEST_ENTRY_CNT;
18937         ha->req_q[0]->req_ring.size = ha->req_q[0]->req_entry_cnt *
18938             REQUEST_ENTRY_SIZE;
18939         if (ha->flags & QUEUE_SHADOW_PTRS) {
18940                 ha->req_q[0]->req_ring.size += SHADOW_ENTRY_SIZE;
18941         }
18942         ha->req_q[0]->req_ring.type = LITTLE_ENDIAN_DMA;
18943         ha->req_q[0]->req_ring.max_cookie_count = 1;
18944         ha->req_q[0]->req_ring.alignment = 64;
18945         if ((rval = ql_alloc_phys(ha, &ha->req_q[0]->req_ring, KM_SLEEP)) !=
18946             QL_SUCCESS) {
18947                 EL(ha, "request queue status=%xh", rval);
18948                 ql_delete_queues(ha);
18949                 return (rval);
18950         }
18951         if (ha->flags & QUEUE_SHADOW_PTRS) {
18952                 ha->req_q[0]->req_out_shadow_ofst =
18953                     ha->req_q[0]->req_entry_cnt * REQUEST_ENTRY_SIZE;
18954                 ha->req_q[0]->req_out_shadow_ptr = (uint32_t *)
18955                     ((caddr_t)ha->req_q[0]->req_ring.bp +
18956                     ha->req_q[0]->req_out_shadow_ofst);
18957         }
18958         ha->fw_transfer_size = ha->req_q[0]->req_ring.size;
18959         if (ha->flags & MULTI_QUEUE) {
18960                 ha->req_q[0]->mbar_req_in = MBAR2_REQ_IN;
18961                 ha->req_q[0]->mbar_req_out = MBAR2_REQ_OUT;
18962                 if (ha->req_q[0]->mbar_req_in >= ha->mbar_size) {
18963                         EL(ha, "req_q index=0 exceeds mbar size=%xh",
18964                             ha->mbar_size);
18965                         ql_delete_queues(ha);
18966                         return (QL_FUNCTION_PARAMETER_ERROR);
18967                 }
18968         }
18969 
18970         /* Allocate response queues. */
18971         if (ha->rsp_queues == NULL) {
18972                 if (ha->intr_cnt > 1) {
18973                         ha->rsp_queues_cnt = (uint8_t)(ha->intr_cnt - 1);
18974                 } else {
18975                         ha->rsp_queues_cnt = 1;
18976                 }
18977                 ha->io_min_rsp_q_number = 0;
18978                 if (ha->rsp_queues_cnt > 1) {
18979                         /* Setup request queue buffer pointers. */
18980                         ha->req_q[1] = kmem_zalloc(sizeof (ql_request_q_t),
18981                             KM_SLEEP);
18982 
18983                         /* Allocate request queue. */
18984                         ha->req_q[1]->req_entry_cnt = REQUEST_ENTRY_CNT;
18985                         ha->req_q[1]->req_ring.size =
18986                             ha->req_q[1]->req_entry_cnt * REQUEST_ENTRY_SIZE;
18987                         if (ha->flags & QUEUE_SHADOW_PTRS) {
18988                                 ha->req_q[1]->req_ring.size +=
18989                                     SHADOW_ENTRY_SIZE;
18990                         }
18991                         ha->req_q[1]->req_ring.type = LITTLE_ENDIAN_DMA;
18992                         ha->req_q[1]->req_ring.max_cookie_count = 1;
18993                         ha->req_q[1]->req_ring.alignment = 64;
18994                         if ((rval = ql_alloc_phys(ha, &ha->req_q[1]->req_ring,
18995                             KM_SLEEP)) != QL_SUCCESS) {
18996                                 EL(ha, "ha request queue status=%xh", rval);
18997                                 ql_delete_queues(ha);
18998                                 return (rval);
18999                         }
19000                         if (ha->flags & QUEUE_SHADOW_PTRS) {
19001                                 ha->req_q[1]->req_out_shadow_ofst =
19002                                     ha->req_q[1]->req_entry_cnt *
19003                                     REQUEST_ENTRY_SIZE;
19004                                 ha->req_q[1]->req_out_shadow_ptr = (uint32_t *)
19005                                     ((caddr_t)ha->req_q[1]->req_ring.bp +
19006                                     ha->req_q[1]->req_out_shadow_ofst);
19007                         }
19008                         ha->req_q[1]->req_q_number = 1;
19009                         if (ha->flags & MULTI_QUEUE) {
19010                                 ha->req_q[1]->mbar_req_in =
19011                                     ha->mbar_queue_offset + MBAR2_REQ_IN;
19012                                 ha->req_q[1]->mbar_req_out =
19013                                     ha->mbar_queue_offset + MBAR2_REQ_OUT;
19014                                 if (ha->req_q[1]->mbar_req_in >=
19015                                     ha->mbar_size) {
19016                                         EL(ha, "ha req_q index=1 exceeds mbar "
19017                                             "size=%xh", ha->mbar_size);
19018                                         ql_delete_queues(ha);
19019                                         return (QL_FUNCTION_PARAMETER_ERROR);
19020                                 }
19021                         }
19022                 }
19023 
19024                 /* Allocate enough rsp_queue descriptors for IRM */
19025                 ha->rsp_queues_size = (ha->hsize / sizeof (ddi_intr_handle_t)) *
19026                     sizeof (ql_response_q_t *);
19027                 ha->rsp_queues = kmem_zalloc(ha->rsp_queues_size, KM_SLEEP);
19028 
19029                 /* Create rsp_queues for the current rsp_queue_cnt */
19030                 for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
19031                         rval = ql_create_rsp_queue(ha, cnt);
19032                         if (rval != QL_SUCCESS) {
19033                                 ql_delete_queues(ha);
19034                                 return (rval);
19035                         }
19036                 }
19037         }
19038 
19039         if (CFG_IST(ha, CFG_FCIP_TYPE_1)) {
19040                 /* Allocate IP receive queue. */
19041                 ha->rcv_ring.size = RCVBUF_QUEUE_SIZE;
19042                 ha->rcv_ring.type = LITTLE_ENDIAN_DMA;
19043                 ha->rcv_ring.max_cookie_count = 1;
19044                 ha->rcv_ring.alignment = 64;
19045                 if ((rval = ql_alloc_phys(ha, &ha->rcv_ring, KM_SLEEP)) !=
19046                     QL_SUCCESS) {
19047                         EL(ha, "receive queue status=%xh", rval);
19048                         ql_delete_queues(ha);
19049                         return (rval);
19050                 }
19051         }
19052 
19053         QL_PRINT_10(ha, "done\n");
19054 
19055         return (rval);
19056 }
19057 
19058 /*
19059  * ql_create_rsp_queue
19060  *      Allocate a response queues.
19061  *
19062  * Input:
19063  *      ha:     adapter state pointer.
19064  *
19065  * Returns:
19066  *      ql driver local function return status codes
19067  *
19068  * Context:
19069  *      Kernel context.
19070  */
19071 static int
19072 ql_create_rsp_queue(ql_adapter_state_t *ha, uint16_t rsp_q_indx)
19073 {
19074         ql_response_q_t *rsp_q;
19075         int             rval = QL_SUCCESS;
19076 
19077         QL_PRINT_3(ha, "started\n");
19078 
19079         ha->rsp_queues[rsp_q_indx] = rsp_q =
19080             kmem_zalloc(sizeof (ql_response_q_t), KM_SLEEP);
19081         /* ISP response ring and interrupt protection. */
19082         mutex_init(&rsp_q->intr_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
19083         rsp_q->rsp_q_number = rsp_q_indx;
19084         rsp_q->msi_x_vector = (uint16_t)(rsp_q_indx + 1);
19085         if (ha->flags & MULTI_QUEUE) {
19086                 rsp_q->mbar_rsp_in = rsp_q->rsp_q_number *
19087                     ha->mbar_queue_offset + MBAR2_RESP_IN;
19088                 rsp_q->mbar_rsp_out = rsp_q->rsp_q_number *
19089                     ha->mbar_queue_offset + MBAR2_RESP_OUT;
19090                 if (rsp_q->mbar_rsp_in >= ha->mbar_size) {
19091                         EL(ha, "rsp_q index=%xh exceeds mbar size=%xh",
19092                             rsp_q_indx, ha->mbar_size);
19093                         return (QL_FUNCTION_PARAMETER_ERROR);
19094                 }
19095         }
19096 
19097         rsp_q->rsp_entry_cnt = RESPONSE_ENTRY_CNT;
19098         rsp_q->rsp_ring.size = rsp_q->rsp_entry_cnt * RESPONSE_ENTRY_SIZE;
19099         if (ha->flags & QUEUE_SHADOW_PTRS) {
19100                 rsp_q->rsp_ring.size += SHADOW_ENTRY_SIZE;
19101         }
19102         rsp_q->rsp_ring.type = LITTLE_ENDIAN_DMA;
19103         rsp_q->rsp_ring.max_cookie_count = 1;
19104         rsp_q->rsp_ring.alignment = 64;
19105         rval = ql_alloc_phys(ha, &rsp_q->rsp_ring, KM_SLEEP);
19106         if (rval != QL_SUCCESS) {
19107                 EL(ha, "response queue status=%xh", rval);
19108         }
19109         if (ha->flags & QUEUE_SHADOW_PTRS) {
19110                 rsp_q->rsp_in_shadow_ofst =
19111                     rsp_q->rsp_entry_cnt * RESPONSE_ENTRY_SIZE;
19112                 rsp_q->rsp_in_shadow_ptr = (uint32_t *)
19113                     ((caddr_t)rsp_q->rsp_ring.bp +
19114                     rsp_q->rsp_in_shadow_ofst);
19115         }
19116 
19117         QL_PRINT_3(ha, "done\n");
19118         return (rval);
19119 }
19120 
19121 /*
19122  * ql_delete_queues
19123  *      Deletes request/response queues.
19124  *
19125  * Input:
19126  *      ha = adapter state pointer.
19127  *
19128  * Context:
19129  *      Kernel context.
19130  */
19131 static void
19132 ql_delete_queues(ql_adapter_state_t *ha)
19133 {
19134         uint32_t        cnt;
19135 
19136         QL_PRINT_10(ha, "started\n");
19137 
19138         if (ha->vp_index != 0) {
19139                 QL_PRINT_10(ha, "done, no multi-req-q \n");
19140                 ha->req_q[0] = ha->req_q[1] = NULL;
19141                 return;
19142         }
19143         if (ha->req_q[0] != NULL) {
19144                 ql_free_phys(ha, &ha->req_q[0]->req_ring);
19145                 kmem_free(ha->req_q[0], sizeof (ql_request_q_t));
19146                 ha->req_q[0] = NULL;
19147         }
19148         if (ha->req_q[1] != NULL) {
19149                 ql_free_phys(ha, &ha->req_q[1]->req_ring);
19150                 kmem_free(ha->req_q[1], sizeof (ql_request_q_t));
19151                 ha->req_q[1] = NULL;
19152         }
19153 
19154         if (ha->rsp_queues != NULL) {
19155                 ql_response_q_t *rsp_q;
19156 
19157                 for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
19158                         if ((rsp_q = ha->rsp_queues[cnt]) == NULL) {
19159                                 continue;
19160                         }
19161 
19162                         mutex_destroy(&rsp_q->intr_mutex);
19163                         ql_free_phys(ha, &rsp_q->rsp_ring);
19164                         kmem_free(rsp_q, sizeof (ql_response_q_t));
19165                         ha->rsp_queues[cnt] = NULL;
19166                 }
19167                 kmem_free(ha->rsp_queues, ha->rsp_queues_size);
19168                 ha->rsp_queues = NULL;
19169         }
19170 
19171         QL_PRINT_10(ha, "done\n");
19172 }
19173 
19174 /*
19175  * ql_multi_queue_support
19176  *      Test 2500 or 8100 adapters for support of multi-queue
19177  *
19178  * Input:
19179  *      ha:     adapter state pointer.
19180  *
19181  * Returns:
19182  *      ql local function return status code.
19183  *
19184  * Context:
19185  *      Kernel context.
19186  */
19187 static int
19188 ql_multi_queue_support(ql_adapter_state_t *ha)
19189 {
19190         uint32_t        data;
19191         int             rval;
19192 
19193         data = ql_get_cap_ofst(ha, PCI_CAP_ID_MSI_X);
19194         if ((ql_pci_config_get16(ha, data + PCI_MSIX_CTRL) &
19195             PCI_MSIX_TBL_SIZE_MASK) > 2) {
19196                 ha->mbar_size = MBAR2_MULTI_Q_MAX * MBAR2_REG_OFFSET;
19197 
19198                 if (ql_map_mem_bar(ha, &ha->mbar_dev_handle, &ha->mbar,
19199                     PCI_CONF_BASE3, ha->mbar_size) != DDI_SUCCESS) {
19200                         return (QL_FUNCTION_FAILED);
19201                 }
19202                 if ((rval = qlc_fm_check_acc_handle(ha,
19203                     ha->mbar_dev_handle)) != DDI_FM_OK) {
19204                         qlc_fm_report_err_impact(ha,
19205                             QL_FM_EREPORT_ACC_HANDLE_CHECK);
19206                         EL(ha, "fm_check_acc_handle mbar_dev_handle "
19207                             "status=%xh\n", rval);
19208                         return (QL_FUNCTION_FAILED);
19209                 }
19210                 return (QL_SUCCESS);
19211         }
19212         return (QL_FUNCTION_FAILED);
19213 }
19214 
19215 /*
19216  * ql_get_cap_ofst
19217  *      Locates PCI configuration space capability pointer
19218  *
19219  * Input:
19220  *      ha:     adapter state pointer.
19221  *      cap_id: Capability ID.
19222  *
19223  * Returns:
19224  *      capability offset
19225  *
19226  * Context:
19227  *      Kernel context.
19228  */
19229 int
19230 ql_get_cap_ofst(ql_adapter_state_t *ha, uint8_t cap_id)
19231 {
19232         int     cptr = PCI_CAP_NEXT_PTR_NULL;
19233 
19234         QL_PRINT_3(ha, "started\n");
19235 
19236         if (ql_pci_config_get16(ha, PCI_CONF_STAT) & PCI_STAT_CAP) {
19237                 cptr = ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
19238 
19239                 while (cptr != PCI_CAP_NEXT_PTR_NULL) {
19240                         if (ql_pci_config_get8(ha, cptr) == cap_id) {
19241                                 break;
19242                         }
19243                         cptr = ql_pci_config_get8(ha, cptr + PCI_CAP_NEXT_PTR);
19244                 }
19245         }
19246 
19247         QL_PRINT_3(ha, "done\n");
19248         return (cptr);
19249 }
19250 
19251 /*
19252  * ql_map_mem_bar
19253  *      Map Mem BAR
19254  *
19255  * Input:
19256  *      ha:              adapter state pointer.
19257  *      handlep:        access handle pointer.
19258  *      addrp:          address structure pointer.
19259  *      ofst:           BAR offset.
19260  *      len:            address space length.
19261  *
19262  * Returns:
19263  *      DDI_SUCCESS or DDI_FAILURE.
19264  *
19265  * Context:
19266  *      Kernel context.
19267  */
19268 static int
19269 ql_map_mem_bar(ql_adapter_state_t *ha, ddi_acc_handle_t *handlep,
19270     caddr_t *addrp, uint32_t ofst, uint32_t len)
19271 {
19272         caddr_t         nreg;
19273         pci_regspec_t   *reg, *reg2;
19274         int             rval;
19275         uint_t          rlen;
19276         uint32_t        rcnt, w32, nreg_size;
19277 
19278         QL_PRINT_10(ha, "started\n");
19279 
19280         /* Check for Mem BAR */
19281         w32 = ql_pci_config_get32(ha, ofst);
19282         if (w32 == 0) {
19283                 EL(ha, "no Mem BAR %xh\n", ofst);
19284                 return (DDI_FAILURE);
19285         }
19286 
19287         /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
19288         if ((rval = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, ha->dip,
19289             DDI_PROP_DONTPASS, "reg", (int **)®, &rlen)) !=
19290             DDI_PROP_SUCCESS) {
19291                 EL(ha, "ddi_prop_lookup_int_array status=%xh\n", rval);
19292                 return (DDI_FAILURE);
19293         }
19294         rlen = (uint_t)(rlen * sizeof (int));   /* in bytes */
19295         rcnt = (uint32_t)(rlen / sizeof (pci_regspec_t));
19296 
19297         /* Check if register already added. */
19298         reg2 = reg;
19299         for (w32 = 0; w32 < rcnt; w32++) {
19300                 if ((reg2->pci_phys_hi & PCI_REG_REG_M) == ofst) {
19301                         EL(ha, "already mapped\n");
19302                         break;
19303                 }
19304                 reg2++;
19305         }
19306         if (w32 == rcnt) {
19307                 /*
19308                  * Allocate memory for the existing reg(s) plus one and then
19309                  * build it.
19310                  */
19311                 nreg_size = (uint32_t)(rlen + sizeof (pci_regspec_t));
19312                 nreg = kmem_zalloc(nreg_size, KM_SLEEP);
19313 
19314                 /*
19315                  * Find a current map memory reg to copy.
19316                  */
19317                 reg2 = reg;
19318                 while ((reg2->pci_phys_hi & PCI_REG_ADDR_M) !=
19319                     PCI_ADDR_MEM32 && (reg2->pci_phys_hi & PCI_REG_ADDR_M) !=
19320                     PCI_ADDR_MEM64) {
19321                         reg2++;
19322                         if ((caddr_t)reg2 >= (caddr_t)reg + rlen) {
19323                                 reg2 = reg;
19324                                 break;
19325                         }
19326                 }
19327                 w32 = (reg2->pci_phys_hi & ~PCI_REG_REG_M) | ofst;
19328 
19329                 bcopy(reg, nreg, rlen);
19330                 reg2 = (pci_regspec_t *)(nreg + rlen);
19331 
19332                 reg2->pci_phys_hi = w32;
19333                 reg2->pci_phys_mid = 0;
19334                 reg2->pci_phys_low = 0;
19335                 reg2->pci_size_hi = 0;
19336                 reg2->pci_size_low = len;
19337 
19338                 /*
19339                  * Write out the new "reg" property
19340                  */
19341                 /*LINTED [Solaris DDI_DEV_T_NONE Lint error]*/
19342                 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, ha->dip,
19343                     "reg", (int *)nreg, (uint_t)(nreg_size / sizeof (int)));
19344 
19345                 w32 = (uint_t)(nreg_size / sizeof (pci_regspec_t) - 1);
19346                 kmem_free((caddr_t)nreg, nreg_size);
19347         }
19348 
19349         ddi_prop_free(reg);
19350 
19351         /* Map register */
19352         rval = ddi_regs_map_setup(ha->dip, w32, addrp, 0, len,
19353             &ql_dev_acc_attr, handlep);
19354         if (rval != DDI_SUCCESS || *addrp == NULL || *handlep == NULL) {
19355                 EL(ha, "regs_map status=%xh, base=%xh, handle=%xh\n",
19356                     rval, *addrp, *handlep);
19357                 if (*handlep != NULL) {
19358                         ddi_regs_map_free(handlep);
19359                         *handlep = NULL;
19360                 }
19361         }
19362 
19363         QL_PRINT_10(ha, "done\n");
19364 
19365         return (rval);
19366 }
19367 
19368 /*
19369  * ql_intr_lock
19370  *      Acquires all interrupt locks.
19371  *
19372  * Input:
19373  *      ha:     adapter state pointer.
19374  *
19375  * Context:
19376  *      Kernel/Interrupt context.
19377  */
19378 void
19379 ql_intr_lock(ql_adapter_state_t *ha)
19380 {
19381         uint16_t        cnt;
19382 
19383         QL_PRINT_3(ha, "started\n");
19384 
19385         if (ha->rsp_queues != NULL) {
19386                 for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
19387                         if (ha->rsp_queues[cnt] != NULL) {
19388                                 INDX_INTR_LOCK(ha, cnt);
19389                         }
19390                 }
19391         }
19392         QL_PRINT_3(ha, "done\n");
19393 }
19394 
19395 /*
19396  * ql_intr_unlock
19397  *      Releases all interrupt locks.
19398  *
19399  * Input:
19400  *      ha:     adapter state pointer.
19401  *
19402  * Context:
19403  *      Kernel/Interrupt context.
19404  */
19405 void
19406 ql_intr_unlock(ql_adapter_state_t *ha)
19407 {
19408         uint16_t        cnt;
19409 
19410         QL_PRINT_3(ha, "started\n");
19411 
19412         if (ha->rsp_queues != NULL) {
19413                 for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
19414                         if (ha->rsp_queues[cnt] != NULL) {
19415                                 INDX_INTR_UNLOCK(ha, cnt);
19416                         }
19417                 }
19418         }
19419         QL_PRINT_3(ha, "done\n");
19420 }
19421 
19422 /*
19423  * ql_completion_thread
19424  *      I/O completion thread.
19425  *
19426  * Input:
19427  *      arg:    port info pointer.
19428  *      COMP_Q_LOCK must be acquired prior to call.
19429  *
19430  * Context:
19431  *      Kernel context.
19432  */
19433 static void
19434 ql_completion_thread(void *arg)
19435 {
19436         ql_srb_t                *sp;
19437         ql_adapter_state_t      *ha = arg;
19438 
19439         QL_PRINT_3(ha, "started, hsp=%p\n", (void *)&sp);
19440 
19441         COMP_Q_LOCK(ha);
19442         ha->comp_thds_active++;
19443         ha->comp_thds_awake++;
19444         while (!(ha->flags & COMP_THD_TERMINATE)) {
19445                 /* process completion queue items */
19446                 while (ha->comp_q.first != NULL) {
19447                         sp = (ha->comp_q.first)->base_address;
19448                         /* Remove command from completion queue */
19449                         ql_remove_link(&ha->comp_q, &sp->cmd);
19450                         COMP_Q_UNLOCK(ha);
19451                         QL_PRINT_3(ha, "pkt_comp, sp=%p, pkt_state=%xh, "
19452                             "hsp=%p\n", (void*)sp, sp->pkt->pkt_state,
19453                             (void *)&sp);
19454                         (sp->pkt->pkt_comp)(sp->pkt);
19455                         COMP_Q_LOCK(ha);
19456                 }
19457                 ha->comp_thds_awake--;
19458                 QL_PRINT_3(ha, "sleep, hsp=%p\n", (void *)&sp);
19459                 cv_wait(&ha->cv_comp_thread, &ha->comp_q_mutex);
19460                 QL_PRINT_3(ha, "awoke, hsp=%p\n", (void *)&sp);
19461         }
19462         ha->comp_thds_awake--;
19463         ha->comp_thds_active--;
19464         COMP_Q_UNLOCK(ha);
19465 
19466         QL_PRINT_3(ha, "done\n");
19467 }
19468 
19469 /*
19470  * ql_io_comp
19471  *      Transport I/O completion
19472  *
19473  * Input:
19474  *      sp:     SRB structure pointer
19475  *
19476  * Context:
19477  *      Kernel context.
19478  */
19479 void
19480 ql_io_comp(ql_srb_t *sp)
19481 {
19482         ql_adapter_state_t      *ha = sp->ha->pha;
19483 
19484         QL_PRINT_3(ha, "started, sp=%ph, d_id=%xh\n", (void*)sp,
19485             sp->pkt->pkt_cmd_fhdr.d_id);
19486 
19487         if (sp->pkt->pkt_comp && !ddi_in_panic()) {
19488                 QL_PRINT_3(ha, "added to comp_q\n");
19489                 COMP_Q_LOCK(ha);
19490                 ql_add_link_b(&ha->comp_q, &sp->cmd);
19491                 if (ha->comp_thds_awake < ha->comp_thds_active) {
19492                         ha->comp_thds_awake++;
19493                         QL_PRINT_3(ha, "signal\n");
19494                         cv_signal(&ha->cv_comp_thread);
19495                 }
19496                 COMP_Q_UNLOCK(ha);
19497         }
19498 
19499         QL_PRINT_3(ha, "done\n");
19500 }
19501 
19502 /*
19503  * ql_process_comp_queue
19504  *      Process completion queue entries.
19505  *
19506  * Input:
19507  *      arg:    adapter state pointer.
19508  *
19509  * Context:
19510  *      Kernel context.
19511  */
19512 static void
19513 ql_process_comp_queue(void *arg)
19514 {
19515         ql_srb_t                *sp;
19516         ql_adapter_state_t      *ha = arg;
19517 
19518         QL_PRINT_3(ha, "started\n");
19519 
19520         COMP_Q_LOCK(ha);
19521 
19522         /* process completion queue items */
19523         while (ha->comp_q.first != NULL) {
19524                 sp = (ha->comp_q.first)->base_address;
19525                 QL_PRINT_3(ha, "sending comp=0x%p\n", (void *)sp);
19526                 /* Remove command from completion queue */
19527                 ql_remove_link(&ha->comp_q, &sp->cmd);
19528                 COMP_Q_UNLOCK(ha);
19529                 (sp->pkt->pkt_comp)(sp->pkt);
19530                 COMP_Q_LOCK(ha);
19531         }
19532 
19533         COMP_Q_UNLOCK(ha);
19534 
19535         QL_PRINT_3(ha, "done\n");
19536 }
19537 
19538 /*
19539  * ql_abort_io
19540  *      Abort I/O.
19541  *
19542  * Input:
19543  *      ha:     adapter state pointer.
19544  *      sp:     SRB pointer.
19545  *
19546  * Returns:
19547  *      ql local function return status code.
19548  *
19549  * Context:
19550  *      Kernel context.
19551  */
19552 static int
19553 ql_abort_io(ql_adapter_state_t *vha, ql_srb_t *sp)
19554 {
19555         ql_link_t               *link;
19556         ql_srb_t                *sp2;
19557         ql_tgt_t                *tq;
19558         ql_lun_t                *lq;
19559         int                     rval = QL_FUNCTION_FAILED;
19560         ql_adapter_state_t      *ha = vha->pha;
19561 
19562         QL_PRINT_10(ha, "started, sp=%ph, handle=%xh\n", (void *)sp,
19563             sp->handle);
19564 
19565         if ((lq = sp->lun_queue) != NULL) {
19566                 tq = lq->target_queue;
19567         } else {
19568                 tq = NULL;
19569         }
19570 
19571         /* Acquire target queue lock. */
19572         if (tq) {
19573                 DEVICE_QUEUE_LOCK(tq);
19574         }
19575         REQUEST_RING_LOCK(ha);
19576 
19577         /* If command not already started. */
19578         if (!(sp->flags & SRB_ISP_STARTED)) {
19579                 rval = QL_FUNCTION_PARAMETER_ERROR;
19580 
19581                 /* Check pending queue for command. */
19582                 for (link = ha->pending_cmds.first; link != NULL;
19583                     link = link->next) {
19584                         sp2 = link->base_address;
19585                         if (sp2 == sp) {
19586                                 rval = QL_SUCCESS;
19587                                 /* Remove srb from pending command queue */
19588                                 ql_remove_link(&ha->pending_cmds, &sp->cmd);
19589                                 break;
19590                         }
19591                 }
19592 
19593                 if (link == NULL && lq) {
19594                         /* Check for cmd on device queue. */
19595                         for (link = lq->cmd.first; link != NULL;
19596                             link = link->next) {
19597                                 sp2 = link->base_address;
19598                                 if (sp2 == sp) {
19599                                         rval = QL_SUCCESS;
19600                                         /* Remove srb from device queue. */
19601                                         ql_remove_link(&lq->cmd, &sp->cmd);
19602                                         sp->flags &= ~SRB_IN_DEVICE_QUEUE;
19603                                         break;
19604                                 }
19605                         }
19606                 }
19607         }
19608 
19609         REQUEST_RING_UNLOCK(ha);
19610         if (tq) {
19611                 DEVICE_QUEUE_UNLOCK(tq);
19612         }
19613 
19614         if (sp->flags & SRB_ISP_COMPLETED || rval == QL_SUCCESS) {
19615                 rval = QL_SUCCESS;
19616         } else {
19617                 uint32_t        index;
19618 
19619                 INTR_LOCK(ha);
19620                 sp->flags |= SRB_ABORTING;
19621                 if (sp->handle != 0) {
19622                         index = sp->handle & OSC_INDEX_MASK;
19623                         if (ha->outstanding_cmds[index] == sp) {
19624                                 ha->outstanding_cmds[index] =
19625                                     QL_ABORTED_SRB(ha);
19626                         }
19627                         /* Decrement outstanding commands on device. */
19628                         if (tq != NULL && tq->outcnt != 0) {
19629                                 tq->outcnt--;
19630                         }
19631                         if (lq != NULL && sp->flags & SRB_FCP_CMD_PKT &&
19632                             lq->lun_outcnt != 0) {
19633                                 lq->lun_outcnt--;
19634                         }
19635                         /* Remove command from watchdog queue. */
19636                         if (sp->flags & SRB_WATCHDOG_ENABLED) {
19637                                 if (tq != NULL) {
19638                                         ql_remove_link(&tq->wdg, &sp->wdg);
19639                                 }
19640                                 sp->flags &= ~SRB_WATCHDOG_ENABLED;
19641                         }
19642                         INTR_UNLOCK(ha);
19643                         (void) ql_abort_command(ha, sp);
19644                         sp->handle = 0;
19645                 } else {
19646                         INTR_UNLOCK(ha);
19647                 }
19648                 rval = QL_SUCCESS;
19649         }
19650 
19651         if (rval != QL_SUCCESS) {
19652                 EL(ha, "sp=%p not aborted=%xh\n", (void *)sp, rval);
19653         } else {
19654                 /*EMPTY*/
19655                 QL_PRINT_10(ha, "done\n");
19656         }
19657         return (rval);
19658 }
19659 
19660 /*
19661  *  ql_idc
19662  *      Inter driver communication thread.
19663  *
19664  * Input:
19665  *      ha = adapter state pointer.
19666  *
19667  * Context:
19668  *      Kernel context.
19669  */
19670 static void
19671 ql_idc(ql_adapter_state_t *ha)
19672 {
19673         int             rval;
19674         uint32_t        timer = 300;
19675 
19676         QL_PRINT_10(ha, "started\n");
19677 
19678         for (;;) {
19679                 /* IDC Stall needed. */
19680                 if (ha->flags & IDC_STALL_NEEDED) {
19681                         ADAPTER_STATE_LOCK(ha);
19682                         ha->flags &= ~IDC_STALL_NEEDED;
19683                         ADAPTER_STATE_UNLOCK(ha);
19684                         TASK_DAEMON_LOCK(ha);
19685                         ha->task_daemon_flags |= DRIVER_STALL;
19686                         TASK_DAEMON_UNLOCK(ha);
19687                         if (LOOP_READY(ha)) {
19688                                 if ((ha->idc_mb[1] & IDC_TIMEOUT_MASK) <
19689                                     IDC_TIMEOUT_MASK) {
19690                                         ha->idc_mb[1] = (uint16_t)
19691                                             (ha->idc_mb[1] | IDC_TIMEOUT_MASK);
19692                                         rval = ql_idc_time_extend(ha);
19693                                         if (rval != QL_SUCCESS) {
19694                                                 EL(ha, "idc_time_extend status"
19695                                                     "=%xh\n", rval);
19696                                         }
19697                                 }
19698                                 (void) ql_wait_outstanding(ha);
19699                         }
19700                 }
19701 
19702                 /* IDC ACK needed. */
19703                 if (ha->flags & IDC_ACK_NEEDED) {
19704                         ADAPTER_STATE_LOCK(ha);
19705                         ha->flags &= ~IDC_ACK_NEEDED;
19706                         ADAPTER_STATE_UNLOCK(ha);
19707                         rval = ql_idc_ack(ha);
19708                         if (rval != QL_SUCCESS) {
19709                                 EL(ha, "idc_ack status=%xh\n", rval);
19710                                 ADAPTER_STATE_LOCK(ha);
19711                                 ha->flags |= IDC_RESTART_NEEDED;
19712                                 ADAPTER_STATE_UNLOCK(ha);
19713                         }
19714                 }
19715 
19716                 /* IDC Restart needed. */
19717                 if (timer-- == 0 || ha->flags & ADAPTER_SUSPENDED ||
19718                     (ha->flags & IDC_RESTART_NEEDED &&
19719                     !(ha->flags & LOOPBACK_ACTIVE))) {
19720                         ADAPTER_STATE_LOCK(ha);
19721                         ha->flags &= ~(IDC_RESTART_NEEDED | IDC_STALL_NEEDED |
19722                             IDC_ACK_NEEDED);
19723                         ADAPTER_STATE_UNLOCK(ha);
19724                         TASK_DAEMON_LOCK(ha);
19725                         ha->task_daemon_flags &= ~DRIVER_STALL;
19726                         TASK_DAEMON_UNLOCK(ha);
19727                         if (LOOP_READY(ha)) {
19728                                 ql_restart_queues(ha);
19729                         }
19730                         break;
19731                 }
19732                 delay(10);
19733         }
19734 
19735         QL_PRINT_10(ha, "done\n");
19736 }
19737 
19738 /*
19739  * ql_get_lun_addr
19740  *      get the lunslun address.
19741  *
19742  * Input:
19743  *      tq:     target queue pointer.
19744  *      lun:    the lun number.
19745  *
19746  * Returns:
19747  *      the lun address.
19748  *
19749  * Context:
19750  *      Interrupt or Kernel context, no mailbox commands allowed.
19751  */
19752 uint64_t
19753 ql_get_lun_addr(ql_tgt_t *tq, uint16_t lun)
19754 {
19755         ql_lun_t                *lq;
19756         ql_link_t               *link = NULL;
19757         uint64_t                lun_addr = 0;
19758         fcp_ent_addr_t          *fcp_ent_addr = (fcp_ent_addr_t *)&lun_addr;
19759 
19760         /* If the lun queue exists */
19761         if (tq) {
19762                 for (link = tq->lun_queues.first; link != NULL;
19763                     link = link->next) {
19764                         lq = link->base_address;
19765                         if (lq->lun_no == lun) {
19766                                 break;
19767                         }
19768                 }
19769         }
19770         if (link == NULL) {
19771                 /* create an fcp_ent_addr from the lun number */
19772                 if (MSB(lun)) {
19773                         fcp_ent_addr->ent_addr_0 = CHAR_TO_SHORT(lobyte(lun),
19774                             (hibyte(lun) | QL_LUN_AM_FLAT));
19775                 } else {
19776                         fcp_ent_addr->ent_addr_0 = CHAR_TO_SHORT(lobyte(lun),
19777                             hibyte(lun));
19778                 }
19779         } else {
19780                 lun_addr = lq->lun_addr;
19781         }
19782 
19783         return (lun_addr);
19784 }
19785 
19786 
19787 /*
19788  * ql_83xx_binary_fw_dump
19789  *
19790  * Input:
19791  *      ha:     adapter state pointer.
19792  *      fw:     firmware dump context pointer.
19793  *
19794  * Returns:
19795  *      ql local function return status code.
19796  *
19797  * Context:
19798  *      Interrupt or Kernel context, no mailbox commands allowed.
19799  */
19800 static int
19801 ql_83xx_binary_fw_dump(ql_adapter_state_t *ha, ql_83xx_fw_dump_t *fw)
19802 {
19803         uint32_t        *reg32, cnt, *w32ptr, index, *dp;
19804         void            *bp;
19805         clock_t         timer;
19806         int             rv, rval = QL_SUCCESS;
19807 
19808         QL_PRINT_3(ha, "started\n");
19809 
19810         fw->req_q_size[0] = ha->req_q[0]->req_ring.size;
19811         if (ha->req_q[1] != NULL) {
19812                 fw->req_q_size[1] = ha->req_q[1]->req_ring.size;
19813         }
19814         fw->rsp_q_size = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
19815 
19816         fw->hccr = RD32_IO_REG(ha, hccr);
19817         fw->r2h_status = RD32_IO_REG(ha, risc2host);
19818         fw->aer_ues = ql_pci_config_get32(ha, 0x104);
19819 
19820         /* Disable ISP interrupts. */
19821         ql_disable_intr(ha);
19822 
19823         /* Pause RISC. */
19824         if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
19825                 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
19826                 for (timer = 30000;
19827                     (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
19828                     rval == QL_SUCCESS; timer--) {
19829                         if (timer) {
19830                                 drv_usecwait(100);
19831                                 if (timer % 10000 == 0) {
19832                                         EL(ha, "risc pause %d\n", timer);
19833                                 }
19834                         } else {
19835                                 EL(ha, "risc pause timeout\n");
19836                                 rval = QL_FUNCTION_TIMEOUT;
19837                         }
19838                 }
19839         }
19840 
19841         WRT32_IO_REG(ha, io_base_addr, 0x6000);
19842         WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0);
19843         WRT_REG_DWORD(ha, ha->iobase + 0xcc, 0);
19844 
19845         WRT32_IO_REG(ha, io_base_addr, 0x6010);
19846         WRT_REG_DWORD(ha, ha->iobase + 0xd4, 0);
19847 
19848         WRT32_IO_REG(ha, io_base_addr, 0x0F70);
19849         WRT_REG_DWORD(ha, ha->iobase + 0xf0, 0x60000000);
19850 
19851         /* Host Interface registers */
19852 
19853         /* HostRisc registers. */
19854         WRT32_IO_REG(ha, io_base_addr, 0x7000);
19855         bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
19856             16, 32);
19857         WRT32_IO_REG(ha, io_base_addr, 0x7010);
19858         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19859         WRT32_IO_REG(ha, io_base_addr, 0x7040);
19860         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19861 
19862         /* PCIe registers. */
19863         WRT32_IO_REG(ha, io_base_addr, 0x7c00);
19864         WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
19865         bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
19866             3, 32);
19867         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
19868         WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
19869 
19870         /* Host interface registers. */
19871         (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
19872             sizeof (fw->host_reg) / 4, 32);
19873 
19874         /* Shadow registers. */
19875 
19876         WRT32_IO_REG(ha, io_base_addr, 0x0F70);
19877         RD32_IO_REG(ha, io_base_addr);
19878 
19879         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19880         WRT_REG_DWORD(ha, reg32, 0xB0000000);
19881         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19882         fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
19883 
19884         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19885         WRT_REG_DWORD(ha, reg32, 0xB0100000);
19886         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19887         fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
19888 
19889         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19890         WRT_REG_DWORD(ha, reg32, 0xB0200000);
19891         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19892         fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
19893 
19894         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19895         WRT_REG_DWORD(ha, reg32, 0xB0300000);
19896         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19897         fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
19898 
19899         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19900         WRT_REG_DWORD(ha, reg32, 0xB0400000);
19901         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19902         fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
19903 
19904         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19905         WRT_REG_DWORD(ha, reg32, 0xB0500000);
19906         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19907         fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
19908 
19909         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19910         WRT_REG_DWORD(ha, reg32, 0xB0600000);
19911         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19912         fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
19913 
19914         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19915         WRT_REG_DWORD(ha, reg32, 0xB0700000);
19916         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19917         fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
19918 
19919         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19920         WRT_REG_DWORD(ha, reg32, 0xB0800000);
19921         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19922         fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
19923 
19924         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19925         WRT_REG_DWORD(ha, reg32, 0xB0900000);
19926         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19927         fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
19928 
19929         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19930         WRT_REG_DWORD(ha, reg32, 0xB0A00000);
19931         reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19932         fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
19933 
19934         /* RISC I/O register. */
19935 
19936         WRT32_IO_REG(ha, io_base_addr, 0x0010);
19937         (void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
19938             1, 32);
19939 
19940         /* Mailbox registers. */
19941 
19942         (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
19943             sizeof (fw->mailbox_reg) / 2, 16);
19944 
19945         /* Transfer sequence registers. */
19946 
19947         /* XSEQ GP */
19948         WRT32_IO_REG(ha, io_base_addr, 0xBE00);
19949         bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
19950             16, 32);
19951         WRT32_IO_REG(ha, io_base_addr, 0xBE10);
19952         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19953         WRT32_IO_REG(ha, io_base_addr, 0xBE20);
19954         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19955         WRT32_IO_REG(ha, io_base_addr, 0xBE30);
19956         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19957         WRT32_IO_REG(ha, io_base_addr, 0xBE40);
19958         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19959         WRT32_IO_REG(ha, io_base_addr, 0xBE50);
19960         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19961         WRT32_IO_REG(ha, io_base_addr, 0xBE60);
19962         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19963         WRT32_IO_REG(ha, io_base_addr, 0xBE70);
19964         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19965         WRT32_IO_REG(ha, io_base_addr, 0xBF00);
19966         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19967         WRT32_IO_REG(ha, io_base_addr, 0xBF10);
19968         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19969         WRT32_IO_REG(ha, io_base_addr, 0xBF20);
19970         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19971         WRT32_IO_REG(ha, io_base_addr, 0xBF30);
19972         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19973         WRT32_IO_REG(ha, io_base_addr, 0xBF40);
19974         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19975         WRT32_IO_REG(ha, io_base_addr, 0xBF50);
19976         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19977         WRT32_IO_REG(ha, io_base_addr, 0xBF60);
19978         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19979         WRT32_IO_REG(ha, io_base_addr, 0xBF70);
19980         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19981 
19982         /* XSEQ-0 */
19983         WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
19984         bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0, 16, 32);
19985         WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
19986         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19987         WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
19988         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19989 
19990         /* XSEQ-1 */
19991         WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
19992         (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
19993             16, 32);
19994 
19995         /* XSEQ-2 */
19996         WRT32_IO_REG(ha, io_base_addr, 0xBEF0);
19997         (void) ql_read_regs(ha, fw->xseq_2_reg, ha->iobase + 0xC0,
19998             16, 32);
19999 
20000         /* Receive sequence registers. */
20001 
20002         /* RSEQ GP */
20003         WRT32_IO_REG(ha, io_base_addr, 0xFE00);
20004         bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0, 16, 32);
20005         WRT32_IO_REG(ha, io_base_addr, 0xFE10);
20006         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20007         WRT32_IO_REG(ha, io_base_addr, 0xFE20);
20008         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20009         WRT32_IO_REG(ha, io_base_addr, 0xFE30);
20010         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20011         WRT32_IO_REG(ha, io_base_addr, 0xFE40);
20012         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20013         WRT32_IO_REG(ha, io_base_addr, 0xFE50);
20014         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20015         WRT32_IO_REG(ha, io_base_addr, 0xFE60);
20016         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20017         WRT32_IO_REG(ha, io_base_addr, 0xFE70);
20018         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20019         WRT32_IO_REG(ha, io_base_addr, 0xFF00);
20020         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20021         WRT32_IO_REG(ha, io_base_addr, 0xFF10);
20022         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20023         WRT32_IO_REG(ha, io_base_addr, 0xFF20);
20024         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20025         WRT32_IO_REG(ha, io_base_addr, 0xFF30);
20026         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20027         WRT32_IO_REG(ha, io_base_addr, 0xFF40);
20028         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20029         WRT32_IO_REG(ha, io_base_addr, 0xFF50);
20030         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20031         WRT32_IO_REG(ha, io_base_addr, 0xFF60);
20032         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20033         WRT32_IO_REG(ha, io_base_addr, 0xFF70);
20034         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20035 
20036         /* RSEQ-0 */
20037         WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
20038         bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
20039             16, 32);
20040         WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
20041         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20042 
20043         /* RSEQ-1 */
20044         WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
20045         (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
20046             sizeof (fw->rseq_1_reg) / 4, 32);
20047 
20048         /* RSEQ-2 */
20049         WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
20050         (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
20051             sizeof (fw->rseq_2_reg) / 4, 32);
20052 
20053         /* RSEQ-3 */
20054         WRT32_IO_REG(ha, io_base_addr, 0xFEF0);
20055         (void) ql_read_regs(ha, fw->rseq_3_reg, ha->iobase + 0xC0,
20056             sizeof (fw->rseq_3_reg) / 4, 32);
20057 
20058         /* Auxiliary sequencer registers. */
20059 
20060         /* ASEQ GP */
20061         WRT32_IO_REG(ha, io_base_addr, 0xB000);
20062         bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0, 16, 32);
20063         WRT32_IO_REG(ha, io_base_addr, 0xB010);
20064         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20065         WRT32_IO_REG(ha, io_base_addr, 0xB020);
20066         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20067         WRT32_IO_REG(ha, io_base_addr, 0xB030);
20068         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20069         WRT32_IO_REG(ha, io_base_addr, 0xB040);
20070         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20071         WRT32_IO_REG(ha, io_base_addr, 0xB050);
20072         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20073         WRT32_IO_REG(ha, io_base_addr, 0xB060);
20074         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20075         WRT32_IO_REG(ha, io_base_addr, 0xB070);
20076         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20077         WRT32_IO_REG(ha, io_base_addr, 0xB100);
20078         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20079         WRT32_IO_REG(ha, io_base_addr, 0xB110);
20080         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20081         WRT32_IO_REG(ha, io_base_addr, 0xB120);
20082         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20083         WRT32_IO_REG(ha, io_base_addr, 0xB130);
20084         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20085         WRT32_IO_REG(ha, io_base_addr, 0xB140);
20086         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20087         WRT32_IO_REG(ha, io_base_addr, 0xB150);
20088         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20089         WRT32_IO_REG(ha, io_base_addr, 0xB160);
20090         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20091         WRT32_IO_REG(ha, io_base_addr, 0xB170);
20092         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20093 
20094         /* ASEQ-0 */
20095         WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
20096         bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
20097             16, 32);
20098         WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
20099         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20100 
20101         /* ASEQ-1 */
20102         WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
20103         (void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
20104             16, 32);
20105 
20106         /* ASEQ-2 */
20107         WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
20108         (void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
20109             16, 32);
20110 
20111         /* ASEQ-3 */
20112         WRT32_IO_REG(ha, io_base_addr, 0xB1F0);
20113         (void) ql_read_regs(ha, fw->aseq_3_reg, ha->iobase + 0xC0,
20114             16, 32);
20115 
20116         /* Command DMA registers. */
20117 
20118         WRT32_IO_REG(ha, io_base_addr, 0x7100);
20119         bp = ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
20120             16, 32);
20121         WRT32_IO_REG(ha, io_base_addr, 0x7120);
20122         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20123         WRT32_IO_REG(ha, io_base_addr, 0x7130);
20124         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20125         WRT32_IO_REG(ha, io_base_addr, 0x71f0);
20126         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20127 
20128         /* Queues. */
20129 
20130         /* RequestQ0 */
20131         WRT32_IO_REG(ha, io_base_addr, 0x7200);
20132         bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
20133             8, 32);
20134         (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
20135 
20136         /* ResponseQ0 */
20137         WRT32_IO_REG(ha, io_base_addr, 0x7300);
20138         bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
20139             8, 32);
20140         (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
20141 
20142         /* RequestQ1 */
20143         WRT32_IO_REG(ha, io_base_addr, 0x7400);
20144         bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
20145             8, 32);
20146         (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
20147 
20148         /* Transmit DMA registers. */
20149 
20150         /* XMT0 */
20151         WRT32_IO_REG(ha, io_base_addr, 0x7600);
20152         bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
20153             16, 32);
20154         WRT32_IO_REG(ha, io_base_addr, 0x7610);
20155         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20156 
20157         /* XMT1 */
20158         WRT32_IO_REG(ha, io_base_addr, 0x7620);
20159         bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
20160             16, 32);
20161         WRT32_IO_REG(ha, io_base_addr, 0x7630);
20162         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20163 
20164         /* XMT2 */
20165         WRT32_IO_REG(ha, io_base_addr, 0x7640);
20166         bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
20167             16, 32);
20168         WRT32_IO_REG(ha, io_base_addr, 0x7650);
20169         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20170 
20171         /* XMT3 */
20172         WRT32_IO_REG(ha, io_base_addr, 0x7660);
20173         bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
20174             16, 32);
20175         WRT32_IO_REG(ha, io_base_addr, 0x7670);
20176         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20177 
20178         /* XMT4 */
20179         WRT32_IO_REG(ha, io_base_addr, 0x7680);
20180         bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
20181             16, 32);
20182         WRT32_IO_REG(ha, io_base_addr, 0x7690);
20183         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20184 
20185         /* XMT Common */
20186         WRT32_IO_REG(ha, io_base_addr, 0x76A0);
20187         (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
20188             ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
20189 
20190         /* Receive DMA registers. */
20191 
20192         /* RCVThread0 */
20193         WRT32_IO_REG(ha, io_base_addr, 0x7700);
20194         bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
20195             ha->iobase + 0xC0, 16, 32);
20196         WRT32_IO_REG(ha, io_base_addr, 0x7710);
20197         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20198 
20199         /* RCVThread1 */
20200         WRT32_IO_REG(ha, io_base_addr, 0x7720);
20201         bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
20202             ha->iobase + 0xC0, 16, 32);
20203         WRT32_IO_REG(ha, io_base_addr, 0x7730);
20204         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20205 
20206         /* RISC registers. */
20207 
20208         /* RISC GP */
20209         WRT32_IO_REG(ha, io_base_addr, 0x0F00);
20210         bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0, 16, 32);
20211         WRT32_IO_REG(ha, io_base_addr, 0x0F10);
20212         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20213         WRT32_IO_REG(ha, io_base_addr, 0x0F20);
20214         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20215         WRT32_IO_REG(ha, io_base_addr, 0x0F30);
20216         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20217         WRT32_IO_REG(ha, io_base_addr, 0x0F40);
20218         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20219         WRT32_IO_REG(ha, io_base_addr, 0x0F50);
20220         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20221         WRT32_IO_REG(ha, io_base_addr, 0x0F60);
20222         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20223         WRT32_IO_REG(ha, io_base_addr, 0x0F70);
20224         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20225 
20226         /* Local memory controller (LMC) registers. */
20227 
20228         /* LMC */
20229         WRT32_IO_REG(ha, io_base_addr, 0x3000);
20230         bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0, 16, 32);
20231         WRT32_IO_REG(ha, io_base_addr, 0x3010);
20232         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20233         WRT32_IO_REG(ha, io_base_addr, 0x3020);
20234         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20235         WRT32_IO_REG(ha, io_base_addr, 0x3030);
20236         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20237         WRT32_IO_REG(ha, io_base_addr, 0x3040);
20238         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20239         WRT32_IO_REG(ha, io_base_addr, 0x3050);
20240         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20241         WRT32_IO_REG(ha, io_base_addr, 0x3060);
20242         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20243         WRT32_IO_REG(ha, io_base_addr, 0x3070);
20244         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20245 
20246         /* Fibre Protocol Module registers. */
20247 
20248         /* FPM hardware */
20249         WRT32_IO_REG(ha, io_base_addr, 0x4000);
20250         bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0, 16, 32);
20251         WRT32_IO_REG(ha, io_base_addr, 0x4010);
20252         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20253         WRT32_IO_REG(ha, io_base_addr, 0x4020);
20254         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20255         WRT32_IO_REG(ha, io_base_addr, 0x4030);
20256         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20257         WRT32_IO_REG(ha, io_base_addr, 0x4040);
20258         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20259         WRT32_IO_REG(ha, io_base_addr, 0x4050);
20260         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20261         WRT32_IO_REG(ha, io_base_addr, 0x4060);
20262         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20263         WRT32_IO_REG(ha, io_base_addr, 0x4070);
20264         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20265         WRT32_IO_REG(ha, io_base_addr, 0x4080);
20266         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20267         WRT32_IO_REG(ha, io_base_addr, 0x4090);
20268         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20269         WRT32_IO_REG(ha, io_base_addr, 0x40A0);
20270         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20271         WRT32_IO_REG(ha, io_base_addr, 0x40B0);
20272         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20273         WRT32_IO_REG(ha, io_base_addr, 0x40C0);
20274         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20275         WRT32_IO_REG(ha, io_base_addr, 0x40D0);
20276         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20277         WRT32_IO_REG(ha, io_base_addr, 0x40E0);
20278         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20279         WRT32_IO_REG(ha, io_base_addr, 0x40F0);
20280         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20281 
20282         /* Pointer arrays registers */
20283 
20284         /* RQ0 Array registers. */
20285         WRT32_IO_REG(ha, io_base_addr, 0x5C00);
20286         bp = ql_read_regs(ha, fw->rq0_array_reg, ha->iobase + 0xC0,
20287             16, 32);
20288         WRT32_IO_REG(ha, io_base_addr, 0x5C10);
20289         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20290         WRT32_IO_REG(ha, io_base_addr, 0x5C20);
20291         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20292         WRT32_IO_REG(ha, io_base_addr, 0x5C30);
20293         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20294         WRT32_IO_REG(ha, io_base_addr, 0x5C40);
20295         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20296         WRT32_IO_REG(ha, io_base_addr, 0x5C50);
20297         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20298         WRT32_IO_REG(ha, io_base_addr, 0x5C60);
20299         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20300         WRT32_IO_REG(ha, io_base_addr, 0x5C70);
20301         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20302         WRT32_IO_REG(ha, io_base_addr, 0x5C80);
20303         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20304         WRT32_IO_REG(ha, io_base_addr, 0x5C90);
20305         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20306         WRT32_IO_REG(ha, io_base_addr, 0x5CA0);
20307         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20308         WRT32_IO_REG(ha, io_base_addr, 0x5CB0);
20309         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20310         WRT32_IO_REG(ha, io_base_addr, 0x5CC0);
20311         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20312         WRT32_IO_REG(ha, io_base_addr, 0x5CD0);
20313         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20314         WRT32_IO_REG(ha, io_base_addr, 0x5CE0);
20315         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20316         WRT32_IO_REG(ha, io_base_addr, 0x5CF0);
20317         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20318 
20319         /* RQ1 Array registers. */
20320         WRT32_IO_REG(ha, io_base_addr, 0x5D00);
20321         bp = ql_read_regs(ha, fw->rq1_array_reg, ha->iobase + 0xC0, 16, 32);
20322         WRT32_IO_REG(ha, io_base_addr, 0x5D10);
20323         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20324         WRT32_IO_REG(ha, io_base_addr, 0x5D20);
20325         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20326         WRT32_IO_REG(ha, io_base_addr, 0x5D30);
20327         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20328         WRT32_IO_REG(ha, io_base_addr, 0x5D40);
20329         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20330         WRT32_IO_REG(ha, io_base_addr, 0x5D50);
20331         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20332         WRT32_IO_REG(ha, io_base_addr, 0x5D60);
20333         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20334         WRT32_IO_REG(ha, io_base_addr, 0x5D70);
20335         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20336         WRT32_IO_REG(ha, io_base_addr, 0x5D80);
20337         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20338         WRT32_IO_REG(ha, io_base_addr, 0x5D90);
20339         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20340         WRT32_IO_REG(ha, io_base_addr, 0x5DA0);
20341         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20342         WRT32_IO_REG(ha, io_base_addr, 0x5DB0);
20343         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20344         WRT32_IO_REG(ha, io_base_addr, 0x5DC0);
20345         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20346         WRT32_IO_REG(ha, io_base_addr, 0x5DD0);
20347         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20348         WRT32_IO_REG(ha, io_base_addr, 0x5DE0);
20349         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20350         WRT32_IO_REG(ha, io_base_addr, 0x5DF0);
20351         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20352 
20353         /* RP0 Array registers. */
20354         WRT32_IO_REG(ha, io_base_addr, 0x5E00);
20355         bp = ql_read_regs(ha, fw->rp0_array_reg, ha->iobase + 0xC0, 16, 32);
20356         WRT32_IO_REG(ha, io_base_addr, 0x5E10);
20357         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20358         WRT32_IO_REG(ha, io_base_addr, 0x5E20);
20359         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20360         WRT32_IO_REG(ha, io_base_addr, 0x5E30);
20361         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20362         WRT32_IO_REG(ha, io_base_addr, 0x5E40);
20363         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20364         WRT32_IO_REG(ha, io_base_addr, 0x5E50);
20365         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20366         WRT32_IO_REG(ha, io_base_addr, 0x5E60);
20367         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20368         WRT32_IO_REG(ha, io_base_addr, 0x5E70);
20369         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20370         WRT32_IO_REG(ha, io_base_addr, 0x5E80);
20371         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20372         WRT32_IO_REG(ha, io_base_addr, 0x5E90);
20373         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20374         WRT32_IO_REG(ha, io_base_addr, 0x5EA0);
20375         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20376         WRT32_IO_REG(ha, io_base_addr, 0x5EB0);
20377         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20378         WRT32_IO_REG(ha, io_base_addr, 0x5EC0);
20379         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20380         WRT32_IO_REG(ha, io_base_addr, 0x5ED0);
20381         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20382         WRT32_IO_REG(ha, io_base_addr, 0x5EE0);
20383         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20384         WRT32_IO_REG(ha, io_base_addr, 0x5EF0);
20385         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20386 
20387         /* RP1 Array registers. */
20388         WRT32_IO_REG(ha, io_base_addr, 0x5F00);
20389         bp = ql_read_regs(ha, fw->rp1_array_reg, ha->iobase + 0xC0, 16, 32);
20390         WRT32_IO_REG(ha, io_base_addr, 0x5F10);
20391         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20392         WRT32_IO_REG(ha, io_base_addr, 0x5F20);
20393         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20394         WRT32_IO_REG(ha, io_base_addr, 0x5F30);
20395         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20396         WRT32_IO_REG(ha, io_base_addr, 0x5F40);
20397         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20398         WRT32_IO_REG(ha, io_base_addr, 0x5F50);
20399         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20400         WRT32_IO_REG(ha, io_base_addr, 0x5F60);
20401         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20402         WRT32_IO_REG(ha, io_base_addr, 0x5F70);
20403         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20404         WRT32_IO_REG(ha, io_base_addr, 0x5F80);
20405         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20406         WRT32_IO_REG(ha, io_base_addr, 0x5F90);
20407         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20408         WRT32_IO_REG(ha, io_base_addr, 0x5FA0);
20409         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20410         WRT32_IO_REG(ha, io_base_addr, 0x5FB0);
20411         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20412         WRT32_IO_REG(ha, io_base_addr, 0x5FC0);
20413         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20414         WRT32_IO_REG(ha, io_base_addr, 0x5FD0);
20415         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20416         WRT32_IO_REG(ha, io_base_addr, 0x5FE0);
20417         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20418         WRT32_IO_REG(ha, io_base_addr, 0x5FF0);
20419         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20420 
20421         /* AT0 Array Registers */
20422         WRT32_IO_REG(ha, io_base_addr, 0x7080);
20423         bp = ql_read_regs(ha, fw->ato_array_reg, ha->iobase + 0xC0, 16, 32);
20424         WRT32_IO_REG(ha, io_base_addr, 0x7090);
20425         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20426         WRT32_IO_REG(ha, io_base_addr, 0x70A0);
20427         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20428         WRT32_IO_REG(ha, io_base_addr, 0x70B0);
20429         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20430         WRT32_IO_REG(ha, io_base_addr, 0x70C0);
20431         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20432         WRT32_IO_REG(ha, io_base_addr, 0x70D0);
20433         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20434         WRT32_IO_REG(ha, io_base_addr, 0x70E0);
20435         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20436         WRT32_IO_REG(ha, io_base_addr, 0x70F0);
20437         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20438 
20439         /* I/O queue control registers */
20440 
20441         /* Queue Control Registers. */
20442         WRT32_IO_REG(ha, io_base_addr, 0x7800);
20443         (void) ql_read_regs(ha, fw->queue_control_reg, ha->iobase + 0xC0,
20444             16, 32);
20445 
20446         /* Frame Buffer registers. */
20447 
20448         /* FB hardware */
20449         WRT32_IO_REG(ha, io_base_addr, 0x6000);
20450         bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0, 16, 32);
20451         WRT32_IO_REG(ha, io_base_addr, 0x6010);
20452         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20453         WRT32_IO_REG(ha, io_base_addr, 0x6020);
20454         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20455         WRT32_IO_REG(ha, io_base_addr, 0x6030);
20456         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20457         WRT32_IO_REG(ha, io_base_addr, 0x6040);
20458         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20459         WRT32_IO_REG(ha, io_base_addr, 0x6060);
20460         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20461         WRT32_IO_REG(ha, io_base_addr, 0x6070);
20462         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20463         WRT32_IO_REG(ha, io_base_addr, 0x6100);
20464         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20465         WRT32_IO_REG(ha, io_base_addr, 0x6130);
20466         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20467         WRT32_IO_REG(ha, io_base_addr, 0x6150);
20468         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20469         WRT32_IO_REG(ha, io_base_addr, 0x6170);
20470         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20471         WRT32_IO_REG(ha, io_base_addr, 0x6190);
20472         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20473         WRT32_IO_REG(ha, io_base_addr, 0x61B0);
20474         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20475         WRT32_IO_REG(ha, io_base_addr, 0x61C0);
20476         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20477         WRT32_IO_REG(ha, io_base_addr, 0x6530);
20478         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20479         WRT32_IO_REG(ha, io_base_addr, 0x6540);
20480         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20481         WRT32_IO_REG(ha, io_base_addr, 0x6550);
20482         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20483         WRT32_IO_REG(ha, io_base_addr, 0x6560);
20484         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20485         WRT32_IO_REG(ha, io_base_addr, 0x6570);
20486         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20487         WRT32_IO_REG(ha, io_base_addr, 0x6580);
20488         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20489         WRT32_IO_REG(ha, io_base_addr, 0x6590);
20490         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20491         WRT32_IO_REG(ha, io_base_addr, 0x65A0);
20492         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20493         WRT32_IO_REG(ha, io_base_addr, 0x65B0);
20494         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20495         WRT32_IO_REG(ha, io_base_addr, 0x65C0);
20496         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20497         WRT32_IO_REG(ha, io_base_addr, 0x65D0);
20498         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20499         WRT32_IO_REG(ha, io_base_addr, 0x65E0);
20500         bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20501         WRT32_IO_REG(ha, io_base_addr, 0x6F00);
20502         (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20503 
20504         /* Get the Queue Pointers */
20505         dp = fw->req_rsp_ext_mem;
20506         for (index = 0; index < ha->rsp_queues_cnt; index++) {
20507                 if (index == 0) {
20508                         *dp = RD32_MBAR_REG(ha, ha->req_q[0]->mbar_req_in);
20509                         LITTLE_ENDIAN_32(dp);
20510                         dp++;
20511                         *dp = RD32_MBAR_REG(ha, ha->req_q[0]->mbar_req_out);
20512                         LITTLE_ENDIAN_32(dp);
20513                         dp++;
20514                 } else if (index == 1) {
20515                         *dp = RD32_MBAR_REG(ha, ha->req_q[1]->mbar_req_in);
20516                         LITTLE_ENDIAN_32(dp);
20517                         dp++;
20518                         *dp = RD32_MBAR_REG(ha, ha->req_q[1]->mbar_req_out);
20519                         LITTLE_ENDIAN_32(dp);
20520                         dp++;
20521                 } else {
20522                         *dp++ = 0;
20523                         *dp++ = 0;
20524                 }
20525                 *dp = RD32_MBAR_REG(ha, ha->rsp_queues[index]->mbar_rsp_in);
20526                 LITTLE_ENDIAN_32(dp);
20527                 dp++;
20528                 *dp = RD32_MBAR_REG(ha, ha->rsp_queues[index]->mbar_rsp_out);
20529                 LITTLE_ENDIAN_32(dp);
20530                 dp++;
20531         }
20532 
20533         /* Get the request queue */
20534         (void) ddi_dma_sync(ha->req_q[0]->req_ring.dma_handle, 0, 0,
20535             DDI_DMA_SYNC_FORCPU);
20536         w32ptr = (uint32_t *)ha->req_q[0]->req_ring.bp;
20537         for (cnt = 0; cnt < fw->req_q_size[0] / 4; cnt++) {
20538                 *dp = *w32ptr++;
20539                 LITTLE_ENDIAN_32(dp);
20540                 dp++;
20541         }
20542         if (ha->req_q[1] != NULL) {
20543                 (void) ddi_dma_sync(ha->req_q[1]->req_ring.dma_handle, 0, 0,
20544                     DDI_DMA_SYNC_FORCPU);
20545                 w32ptr = (uint32_t *)ha->req_q[1]->req_ring.bp;
20546                 for (cnt = 0; cnt < fw->req_q_size[1] / 4; cnt++) {
20547                         *dp = *w32ptr++;
20548                         LITTLE_ENDIAN_32(dp);
20549                         dp++;
20550                 }
20551         }
20552 
20553         /* Get the response queues */
20554         for (index = 0; index < ha->rsp_queues_cnt; index++) {
20555                 (void) ddi_dma_sync(ha->rsp_queues[index]->rsp_ring.dma_handle,
20556                     0, 0, DDI_DMA_SYNC_FORCPU);
20557                 w32ptr = (uint32_t *)ha->rsp_queues[index]->rsp_ring.bp;
20558                 for (cnt = 0; cnt < ha->rsp_queues[index]->rsp_ring.size / 4;
20559                     cnt++) {
20560                         *dp = *w32ptr++;
20561                         LITTLE_ENDIAN_32(dp);
20562                         dp++;
20563                 }
20564         }
20565 
20566         /* Reset RISC. */
20567         ql_reset_chip(ha);
20568 
20569         /* Code RAM. */
20570         rv = ql_read_risc_ram(ha, 0x20000, sizeof (fw->code_ram) / 4,
20571             fw->code_ram);
20572         if (rval == QL_SUCCESS) {
20573                 rval = rv;
20574         }
20575         rv = ql_read_risc_ram(ha, 0x100000,
20576             ha->fw_ext_memory_size / 4, dp);
20577         if (rval == QL_SUCCESS) {
20578                 rval = rv;
20579         }
20580 
20581         /* Get the extended trace buffer */
20582         if (ha->fwexttracebuf.dma_handle != NULL) {
20583                 /* Sync DMA buffer. */
20584                 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
20585                     FWEXTSIZE, DDI_DMA_SYNC_FORCPU);
20586 
20587                 w32ptr = ha->fwexttracebuf.bp;
20588                 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
20589                         fw->ext_trace_buf[cnt] = *w32ptr++;
20590                 }
20591         }
20592 
20593         /* Get the FC event trace buffer */
20594         if (ha->fwfcetracebuf.dma_handle != NULL) {
20595                 /* Sync DMA buffer. */
20596                 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
20597                     FWFCESIZE, DDI_DMA_SYNC_FORCPU);
20598 
20599                 w32ptr = ha->fwfcetracebuf.bp;
20600                 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
20601                         fw->fce_trace_buf[cnt] = *w32ptr++;
20602                 }
20603         }
20604 
20605         if (rval != QL_SUCCESS) {
20606                 EL(ha, "failed, rval = %xh\n", rval);
20607         } else {
20608                 /*EMPTY*/
20609                 QL_PRINT_10(ha, "done\n");
20610         }
20611         return (QL_SUCCESS);
20612 }
20613 
20614 /*
20615  * ql_83xx_ascii_fw_dump
20616  *      Converts ISP83xx firmware binary dump to ascii.
20617  *
20618  * Input:
20619  *      ha = adapter state pointer.
20620  *      bptr = buffer pointer.
20621  *
20622  * Returns:
20623  *      Amount of data buffer used.
20624  *
20625  * Context:
20626  *      Kernel context.
20627  */
20628 static size_t
20629 ql_83xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
20630 {
20631         uint32_t                cnt, cnt1, len, *dp, *dp2;
20632         caddr_t                 bp = bufp;
20633         ql_83xx_fw_dump_t       *fw = ha->ql_dump_ptr;
20634 
20635         QL_PRINT_3(ha, "started\n");
20636 
20637         if ((len = ha->risc_dump_size) == 0) {
20638                 QL_PRINT_10(ha, "no buffer\n");
20639                 return (0);
20640         }
20641         (void) snprintf(bp, len, "\nISP FW Version %d.%02d.%02d Attributes "
20642             "%X\n", ha->fw_major_version, ha->fw_minor_version,
20643             ha->fw_subminor_version, ha->fw_attributes);
20644         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20645                 return (strlen(bufp));
20646         }
20647 
20648         (void) snprintf(bp, len, "\nHCCR Register\n%08x\n", fw->hccr);
20649         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20650                 return (strlen(bufp));
20651         }
20652 
20653         (void) snprintf(bp, len, "\nR2H Status Register\n%08x\n",
20654             fw->r2h_status);
20655         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20656                 return (strlen(bufp));
20657         }
20658 
20659         (void) snprintf(bp, len,
20660             "\nAER Uncorrectable Error Status Register\n%08x\n", fw->aer_ues);
20661         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20662                 return (strlen(bufp));
20663         }
20664 
20665         (void) snprintf(bp, len, "\nHostRisc Registers");
20666         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20667                 return (strlen(bufp));
20668         }
20669         for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
20670                 if (cnt % 8 == 0) {
20671                         (void) snprintf(bp, len, "\n");
20672                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20673                                 return (strlen(bufp));
20674                         }
20675                 }
20676                 (void) snprintf(bp, len, "%08x ", fw->hostrisc_reg[cnt]);
20677                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20678                         return (strlen(bufp));
20679                 }
20680         }
20681 
20682         (void) snprintf(bp, len, "\n\nPCIe Registers");
20683         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20684                 return (strlen(bufp));
20685         }
20686         for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
20687                 if (cnt % 8 == 0) {
20688                         (void) snprintf(bp, len, "\n");
20689                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20690                                 return (strlen(bufp));
20691                         }
20692                 }
20693                 (void) snprintf(bp, len, "%08x ", fw->pcie_reg[cnt]);
20694                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20695                         return (strlen(bufp));
20696                 }
20697         }
20698 
20699         dp = fw->req_rsp_ext_mem;
20700         for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
20701                 (void) snprintf(bp, len, "\n\nQueue Pointers #%d:\n", cnt);
20702                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20703                         return (strlen(bufp));
20704                 }
20705                 for (cnt1 = 0; cnt1 < 4; cnt1++) {
20706                         (void) snprintf(bp, len, "%08x ", *dp++);
20707                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20708                                 return (strlen(bufp));
20709                         }
20710                 }
20711         }
20712 
20713         (void) snprintf(bp, len, "\n\nHost Interface Registers");
20714         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20715                 return (strlen(bufp));
20716         }
20717         for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
20718                 if (cnt % 8 == 0) {
20719                         (void) snprintf(bp, len, "\n");
20720                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20721                                 return (strlen(bufp));
20722                         }
20723                 }
20724                 (void) snprintf(bp, len, "%08x ", fw->host_reg[cnt]);
20725                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20726                         return (strlen(bufp));
20727                 }
20728         }
20729 
20730         (void) snprintf(bp, len, "\n\nShadow Registers");
20731         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20732                 return (strlen(bufp));
20733         }
20734         for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
20735                 if (cnt % 8 == 0) {
20736                         (void) snprintf(bp, len, "\n");
20737                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20738                                 return (strlen(bufp));
20739                         }
20740                 }
20741                 (void) snprintf(bp, len, "%08x ", fw->shadow_reg[cnt]);
20742                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20743                         return (strlen(bufp));
20744                 }
20745         }
20746 
20747         (void) snprintf(bp, len, "\n\nRISC IO Register\n%08x", fw->risc_io);
20748         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20749                 return (strlen(bufp));
20750         }
20751 
20752         (void) snprintf(bp, len, "\n\nMailbox Registers");
20753         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20754                 return (strlen(bufp));
20755         }
20756         for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
20757                 if (cnt % 16 == 0) {
20758                         (void) snprintf(bp, len, "\n");
20759                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20760                                 return (strlen(bufp));
20761                         }
20762                 }
20763                 (void) snprintf(bp, len, "%04x ", fw->mailbox_reg[cnt]);
20764                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20765                         return (strlen(bufp));
20766                 }
20767         }
20768 
20769         (void) snprintf(bp, len, "\n\nXSEQ GP Registers");
20770         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20771                 return (strlen(bufp));
20772         }
20773         for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
20774                 if (cnt % 8 == 0) {
20775                         (void) snprintf(bp, len, "\n");
20776                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20777                                 return (strlen(bufp));
20778                         }
20779                 }
20780                 (void) snprintf(bp, len, "%08x ", fw->xseq_gp_reg[cnt]);
20781                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20782                         return (strlen(bufp));
20783                 }
20784         }
20785 
20786         (void) snprintf(bp, len, "\n\nXSEQ-0 Registers");
20787         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20788                 return (strlen(bufp));
20789         }
20790         for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
20791                 if (cnt % 8 == 0) {
20792                         (void) snprintf(bp, len, "\n");
20793                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20794                                 return (strlen(bufp));
20795                         }
20796                 }
20797                 (void) snprintf(bp, len, "%08x ", fw->xseq_0_reg[cnt]);
20798                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20799                         return (strlen(bufp));
20800                 }
20801         }
20802 
20803         (void) snprintf(bp, len, "\n\nXSEQ-1 Registers");
20804         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20805                 return (strlen(bufp));
20806         }
20807         for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
20808                 if (cnt % 8 == 0) {
20809                         (void) snprintf(bp, len, "\n");
20810                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20811                                 return (strlen(bufp));
20812                         }
20813                 }
20814                 (void) snprintf(bp, len, "%08x ", fw->xseq_1_reg[cnt]);
20815                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20816                         return (strlen(bufp));
20817                 }
20818         }
20819 
20820         (void) snprintf(bp, len, "\n\nXSEQ-2 Registers");
20821         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20822                 return (strlen(bufp));
20823         }
20824         for (cnt = 0; cnt < sizeof (fw->xseq_2_reg) / 4; cnt++) {
20825                 if (cnt % 8 == 0) {
20826                         (void) snprintf(bp, len, "\n");
20827                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20828                                 return (strlen(bufp));
20829                         }
20830                 }
20831                 (void) snprintf(bp, len, "%08x ", fw->xseq_2_reg[cnt]);
20832                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20833                         return (strlen(bufp));
20834                 }
20835         }
20836 
20837         (void) snprintf(bp, len, "\n\nRSEQ GP Registers");
20838         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20839                 return (strlen(bufp));
20840         }
20841         for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
20842                 if (cnt % 8 == 0) {
20843                         (void) snprintf(bp, len, "\n");
20844                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20845                                 return (strlen(bufp));
20846                         }
20847                 }
20848                 (void) snprintf(bp, len, "%08x ", fw->rseq_gp_reg[cnt]);
20849                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20850                         return (strlen(bufp));
20851                 }
20852         }
20853 
20854         (void) snprintf(bp, len, "\n\nRSEQ-0 Registers");
20855         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20856                 return (strlen(bufp));
20857         }
20858         for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
20859                 if (cnt % 8 == 0) {
20860                         (void) snprintf(bp, len, "\n");
20861                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20862                                 return (strlen(bufp));
20863                         }
20864                 }
20865                 (void) snprintf(bp, len, "%08x ", fw->rseq_0_reg[cnt]);
20866                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20867                         return (strlen(bufp));
20868                 }
20869         }
20870 
20871         (void) snprintf(bp, len, "\n\nRSEQ-1 Registers");
20872         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20873                 return (strlen(bufp));
20874         }
20875         for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
20876                 if (cnt % 8 == 0) {
20877                         (void) snprintf(bp, len, "\n");
20878                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20879                                 return (strlen(bufp));
20880                         }
20881                 }
20882                 (void) snprintf(bp, len, "%08x ", fw->rseq_1_reg[cnt]);
20883                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20884                         return (strlen(bufp));
20885                 }
20886         }
20887 
20888         (void) snprintf(bp, len, "\n\nRSEQ-2 Registers");
20889         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20890                 return (strlen(bufp));
20891         }
20892         for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
20893                 if (cnt % 8 == 0) {
20894                         (void) snprintf(bp, len, "\n");
20895                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20896                                 return (strlen(bufp));
20897                         }
20898                 }
20899                 (void) snprintf(bp, len, "%08x ", fw->rseq_2_reg[cnt]);
20900                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20901                         return (strlen(bufp));
20902                 }
20903         }
20904 
20905         (void) snprintf(bp, len, "\n\nRSEQ-3 Registers");
20906         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20907                 return (strlen(bufp));
20908         }
20909         for (cnt = 0; cnt < sizeof (fw->rseq_3_reg) / 4; cnt++) {
20910                 if (cnt % 8 == 0) {
20911                         (void) snprintf(bp, len, "\n");
20912                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20913                                 return (strlen(bufp));
20914                         }
20915                 }
20916                 (void) snprintf(bp, len, "%08x ", fw->rseq_3_reg[cnt]);
20917                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20918                         return (strlen(bufp));
20919                 }
20920         }
20921 
20922         (void) snprintf(bp, len, "\n\nASEQ GP Registers");
20923         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20924                 return (strlen(bufp));
20925         }
20926         for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
20927                 if (cnt % 8 == 0) {
20928                         (void) snprintf(bp, len, "\n");
20929                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20930                                 return (strlen(bufp));
20931                         }
20932                 }
20933                 (void) snprintf(bp, len, "%08x ", fw->aseq_gp_reg[cnt]);
20934                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20935                         return (strlen(bufp));
20936                 }
20937         }
20938 
20939         (void) snprintf(bp, len, "\n\nASEQ-0 Registers");
20940         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20941                 return (strlen(bufp));
20942         }
20943         for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
20944                 if (cnt % 8 == 0) {
20945                         (void) snprintf(bp, len, "\n");
20946                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20947                                 return (strlen(bufp));
20948                         }
20949                 }
20950                 (void) snprintf(bp, len, "%08x ", fw->aseq_0_reg[cnt]);
20951                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20952                         return (strlen(bufp));
20953                 }
20954         }
20955 
20956         (void) snprintf(bp, len, "\n\nASEQ-1 Registers");
20957         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20958                 return (strlen(bufp));
20959         }
20960         for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
20961                 if (cnt % 8 == 0) {
20962                         (void) snprintf(bp, len, "\n");
20963                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20964                                 return (strlen(bufp));
20965                         }
20966                 }
20967                 (void) snprintf(bp, len, "%08x ", fw->aseq_1_reg[cnt]);
20968                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20969                         return (strlen(bufp));
20970                 }
20971         }
20972 
20973         (void) snprintf(bp, len, "\n\nASEQ-2 Registers");
20974         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20975                 return (strlen(bufp));
20976         }
20977         for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
20978                 if (cnt % 8 == 0) {
20979                         (void) snprintf(bp, len, "\n");
20980                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20981                                 return (strlen(bufp));
20982                         }
20983                 }
20984                 (void) snprintf(bp, len, "%08x ", fw->aseq_2_reg[cnt]);
20985                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20986                         return (strlen(bufp));
20987                 }
20988         }
20989 
20990         (void) snprintf(bp, len, "\n\nASEQ-3 Registers");
20991         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20992                 return (strlen(bufp));
20993         }
20994         for (cnt = 0; cnt < sizeof (fw->aseq_3_reg) / 4; cnt++) {
20995                 if (cnt % 8 == 0) {
20996                         (void) snprintf(bp, len, "\n");
20997                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20998                                 return (strlen(bufp));
20999                         }
21000                 }
21001                 (void) snprintf(bp, len, "%08x ", fw->aseq_3_reg[cnt]);
21002                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21003                         return (strlen(bufp));
21004                 }
21005         }
21006 
21007         (void) snprintf(bp, len, "\n\nCommand DMA Registers");
21008         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21009                 return (strlen(bufp));
21010         }
21011         for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
21012                 if (cnt % 8 == 0) {
21013                         (void) snprintf(bp, len, "\n");
21014                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21015                                 return (strlen(bufp));
21016                         }
21017                 }
21018                 (void) snprintf(bp, len, "%08x ", fw->cmd_dma_reg[cnt]);
21019                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21020                         return (strlen(bufp));
21021                 }
21022         }
21023 
21024         (void) snprintf(bp, len, "\n\nRequest0 Queue DMA Channel Registers");
21025         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21026                 return (strlen(bufp));
21027         }
21028         for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
21029                 if (cnt % 8 == 0) {
21030                         (void) snprintf(bp, len, "\n");
21031                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21032                                 return (strlen(bufp));
21033                         }
21034                 }
21035                 (void) snprintf(bp, len, "%08x ", fw->req0_dma_reg[cnt]);
21036                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21037                         return (strlen(bufp));
21038                 }
21039         }
21040 
21041         (void) snprintf(bp, len, "\n\nResponse0 Queue DMA Channel Registers");
21042         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21043                 return (strlen(bufp));
21044         }
21045         for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
21046                 if (cnt % 8 == 0) {
21047                         (void) snprintf(bp, len, "\n");
21048                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21049                                 return (strlen(bufp));
21050                         }
21051                 }
21052                 (void) snprintf(bp, len, "%08x ", fw->resp0_dma_reg[cnt]);
21053                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21054                         return (strlen(bufp));
21055                 }
21056         }
21057 
21058         (void) snprintf(bp, len, "\n\nRequest1 Queue DMA Channel Registers");
21059         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21060                 return (strlen(bufp));
21061         }
21062         for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
21063                 if (cnt % 8 == 0) {
21064                         (void) snprintf(bp, len, "\n");
21065                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21066                                 return (strlen(bufp));
21067                         }
21068                 }
21069                 (void) snprintf(bp, len, "%08x ", fw->req1_dma_reg[cnt]);
21070                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21071                         return (strlen(bufp));
21072                 }
21073         }
21074 
21075         (void) snprintf(bp, len, "\n\nXMT0 Data DMA Registers");
21076         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21077                 return (strlen(bufp));
21078         }
21079         for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
21080                 if (cnt % 8 == 0) {
21081                         (void) snprintf(bp, len, "\n");
21082                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21083                                 return (strlen(bufp));
21084                         }
21085                 }
21086                 (void) snprintf(bp, len, "%08x ", fw->xmt0_dma_reg[cnt]);
21087                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21088                         return (strlen(bufp));
21089                 }
21090         }
21091 
21092         (void) snprintf(bp, len, "\n\nXMT1 Data DMA Registers");
21093         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21094                 return (strlen(bufp));
21095         }
21096         for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
21097                 if (cnt % 8 == 0) {
21098                         (void) snprintf(bp, len, "\n");
21099                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21100                                 return (strlen(bufp));
21101                         }
21102                 }
21103                 (void) snprintf(bp, len, "%08x ", fw->xmt1_dma_reg[cnt]);
21104                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21105                         return (strlen(bufp));
21106                 }
21107         }
21108 
21109         (void) snprintf(bp, len, "\n\nXMT2 Data DMA Registers");
21110         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21111                 return (strlen(bufp));
21112         }
21113         for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
21114                 if (cnt % 8 == 0) {
21115                         (void) snprintf(bp, len, "\n");
21116                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21117                                 return (strlen(bufp));
21118                         }
21119                 }
21120                 (void) snprintf(bp, len, "%08x ", fw->xmt2_dma_reg[cnt]);
21121                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21122                         return (strlen(bufp));
21123                 }
21124         }
21125 
21126         (void) snprintf(bp, len, "\n\nXMT3 Data DMA Registers");
21127         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21128                 return (strlen(bufp));
21129         }
21130         for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
21131                 if (cnt % 8 == 0) {
21132                         (void) snprintf(bp, len, "\n");
21133                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21134                                 return (strlen(bufp));
21135                         }
21136                 }
21137                 (void) snprintf(bp, len, "%08x ", fw->xmt3_dma_reg[cnt]);
21138                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21139                         return (strlen(bufp));
21140                 }
21141         }
21142 
21143         (void) snprintf(bp, len, "\n\nXMT4 Data DMA Registers");
21144         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21145                 return (strlen(bufp));
21146         }
21147         for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
21148                 if (cnt % 8 == 0) {
21149                         (void) snprintf(bp, len, "\n");
21150                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21151                                 return (strlen(bufp));
21152                         }
21153                 }
21154                 (void) snprintf(bp, len, "%08x ", fw->xmt4_dma_reg[cnt]);
21155                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21156                         return (strlen(bufp));
21157                 }
21158         }
21159 
21160         (void) snprintf(bp, len, "\n\nXMT Data DMA Common Registers");
21161         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21162                 return (strlen(bufp));
21163         }
21164         for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
21165                 if (cnt % 8 == 0) {
21166                         (void) snprintf(bp, len, "\n");
21167                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21168                                 return (strlen(bufp));
21169                         }
21170                 }
21171                 (void) snprintf(bp, len, "%08x ", fw->xmt_data_dma_reg[cnt]);
21172                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21173                         return (strlen(bufp));
21174                 }
21175         }
21176 
21177         (void) snprintf(bp, len, "\n\nRCV Thread 0 Data DMA Registers");
21178         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21179                 return (strlen(bufp));
21180         }
21181         for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
21182                 if (cnt % 8 == 0) {
21183                         (void) snprintf(bp, len, "\n");
21184                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21185                                 return (strlen(bufp));
21186                         }
21187                 }
21188                 (void) snprintf(bp, len, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
21189                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21190                         return (strlen(bufp));
21191                 }
21192         }
21193 
21194         (void) snprintf(bp, len, "\n\nRCV Thread 1 Data DMA Registers");
21195         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21196                 return (strlen(bufp));
21197         }
21198         for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
21199                 if (cnt % 8 == 0) {
21200                         (void) snprintf(bp, len, "\n");
21201                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21202                                 return (strlen(bufp));
21203                         }
21204                 }
21205                 (void) snprintf(bp, len, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
21206                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21207                         return (strlen(bufp));
21208                 }
21209         }
21210 
21211         (void) snprintf(bp, len, "\n\nRISC GP Registers");
21212         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21213                 return (strlen(bufp));
21214         }
21215         for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
21216                 if (cnt % 8 == 0) {
21217                         (void) snprintf(bp, len, "\n");
21218                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21219                                 return (strlen(bufp));
21220                         }
21221                 }
21222                 (void) snprintf(bp, len, "%08x ", fw->risc_gp_reg[cnt]);
21223                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21224                         return (strlen(bufp));
21225                 }
21226         }
21227 
21228         (void) snprintf(bp, len, "\n\nLMC Registers");
21229         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21230                 return (strlen(bufp));
21231         }
21232         for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
21233                 if (cnt % 8 == 0) {
21234                         (void) snprintf(bp, len, "\n");
21235                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21236                                 return (strlen(bufp));
21237                         }
21238                 }
21239                 (void) snprintf(bp, len, "%08x ", fw->lmc_reg[cnt]);
21240                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21241                         return (strlen(bufp));
21242                 }
21243         }
21244 
21245         (void) snprintf(bp, len, "\n\nFPM Hardware Registers");
21246         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21247                 return (strlen(bufp));
21248         }
21249         cnt1 = (uint32_t)(sizeof (fw->fpm_hdw_reg));
21250         for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21251                 if (cnt % 8 == 0) {
21252                         (void) snprintf(bp, len, "\n");
21253                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21254                                 return (strlen(bufp));
21255                         }
21256                 }
21257                 (void) snprintf(bp, len, "%08x ", fw->fpm_hdw_reg[cnt]);
21258                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21259                         return (strlen(bufp));
21260                 }
21261         }
21262 
21263         (void) snprintf(bp, len, "\n\nRQ0 Array Registers");
21264         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21265                 return (strlen(bufp));
21266         }
21267         cnt1 = (uint32_t)(sizeof (fw->rq0_array_reg));
21268         for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21269                 if (cnt % 8 == 0) {
21270                         (void) snprintf(bp, len, "\n");
21271                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21272                                 return (strlen(bufp));
21273                         }
21274                 }
21275                 (void) snprintf(bp, len, "%08x ", fw->rq0_array_reg[cnt]);
21276                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21277                         return (strlen(bufp));
21278                 }
21279         }
21280 
21281         (void) snprintf(bp, len, "\n\nRQ1 Array Registers");
21282         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21283                 return (strlen(bufp));
21284         }
21285         cnt1 = (uint32_t)(sizeof (fw->rq1_array_reg));
21286         for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21287                 if (cnt % 8 == 0) {
21288                         (void) snprintf(bp, len, "\n");
21289                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21290                                 return (strlen(bufp));
21291                         }
21292                 }
21293                 (void) snprintf(bp, len, "%08x ", fw->rq1_array_reg[cnt]);
21294                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21295                         return (strlen(bufp));
21296                 }
21297         }
21298 
21299         (void) snprintf(bp, len, "\n\nRP0 Array Registers");
21300         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21301                 return (strlen(bufp));
21302         }
21303         cnt1 = (uint32_t)(sizeof (fw->rp0_array_reg));
21304         for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21305                 if (cnt % 8 == 0) {
21306                         (void) snprintf(bp, len, "\n");
21307                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21308                                 return (strlen(bufp));
21309                         }
21310                 }
21311                 (void) snprintf(bp, len, "%08x ", fw->rp0_array_reg[cnt]);
21312                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21313                         return (strlen(bufp));
21314                 }
21315         }
21316 
21317         (void) snprintf(bp, len, "\n\nRP1 Array Registers");
21318         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21319                 return (strlen(bufp));
21320         }
21321         cnt1 = (uint32_t)(sizeof (fw->rp1_array_reg));
21322         for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21323                 if (cnt % 8 == 0) {
21324                         (void) snprintf(bp, len, "\n");
21325                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21326                                 return (strlen(bufp));
21327                         }
21328                 }
21329                 (void) snprintf(bp, len, "%08x ", fw->rp1_array_reg[cnt]);
21330                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21331                         return (strlen(bufp));
21332                 }
21333         }
21334 
21335         (void) snprintf(bp, len, "\n\nAT0 Array Registers");
21336         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21337                 return (strlen(bufp));
21338         }
21339         cnt1 = (uint32_t)(sizeof (fw->ato_array_reg));
21340         for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21341                 if (cnt % 8 == 0) {
21342                         (void) snprintf(bp, len, "\n");
21343                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21344                                 return (strlen(bufp));
21345                         }
21346                 }
21347                 (void) snprintf(bp, len, "%08x ", fw->ato_array_reg[cnt]);
21348                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21349                         return (strlen(bufp));
21350                 }
21351         }
21352 
21353         (void) snprintf(bp, len, "\n\nQueue Control Registers");
21354         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21355                 return (strlen(bufp));
21356         }
21357         cnt1 = (uint32_t)(sizeof (fw->queue_control_reg));
21358         for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21359                 if (cnt % 8 == 0) {
21360                         (void) snprintf(bp, len, "\n");
21361                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21362                                 return (strlen(bufp));
21363                         }
21364                 }
21365                 (void) snprintf(bp, len, "%08x ", fw->queue_control_reg[cnt]);
21366                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21367                         return (strlen(bufp));
21368                 }
21369         }
21370 
21371         (void) snprintf(bp, len, "\n\nFB Hardware Registers");
21372         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21373                 return (strlen(bufp));
21374         }
21375         cnt1 = (uint32_t)(sizeof (fw->fb_hdw_reg));
21376         for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21377                 if (cnt % 8 == 0) {
21378                         (void) snprintf(bp, len, "\n");
21379                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21380                                 return (strlen(bufp));
21381                         }
21382                 }
21383                 (void) snprintf(bp, len, "%08x ", fw->fb_hdw_reg[cnt]);
21384                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21385                         return (strlen(bufp));
21386                 }
21387         }
21388 
21389         (void) snprintf(bp, len, "\n\nCode RAM");
21390         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21391                 return (strlen(bufp));
21392         }
21393         for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
21394                 if (cnt % 8 == 0) {
21395                         (void) snprintf(bp, len, "\n%08x: ", cnt + 0x20000);
21396                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21397                                 return (strlen(bufp));
21398                         }
21399                 }
21400                 (void) snprintf(bp, len, "%08x ", fw->code_ram[cnt]);
21401                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21402                         return (strlen(bufp));
21403                 }
21404         }
21405 
21406         (void) snprintf(bp, len, "\n\nExternal Memory");
21407         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21408                 return (strlen(bufp));
21409         }
21410         dp = (uint32_t *)((caddr_t)fw->req_rsp_ext_mem + fw->req_q_size[0] +
21411             fw->req_q_size[1] + fw->rsp_q_size + (ha->rsp_queues_cnt * 16));
21412         for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
21413                 if (cnt % 8 == 0) {
21414                         (void) snprintf(bp, len, "\n%08x: ", cnt + 0x100000);
21415                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21416                                 return (strlen(bufp));
21417                         }
21418                 }
21419                 (void) snprintf(bp, len, "%08x ", *dp++);
21420                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21421                         return (strlen(bufp));
21422                 }
21423         }
21424 
21425         (void) snprintf(bp, len, "\n\n[<==END] ISP Debug Dump");
21426         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21427                 return (strlen(bufp));
21428         }
21429 
21430         dp = fw->req_rsp_ext_mem + (ha->rsp_queues_cnt * 4);
21431         for (cnt = 0; cnt < 2 && fw->req_q_size[cnt]; cnt++) {
21432                 dp2 = dp;
21433                 for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
21434                         if (*dp2++) {
21435                                 break;
21436                         }
21437                 }
21438                 if (cnt1 == fw->req_q_size[cnt] / 4) {
21439                         dp = dp2;
21440                         continue;
21441                 }
21442                 (void) snprintf(bp, len, "\n\nRequest Queue\nQueue %d:", cnt);
21443                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21444                         return (strlen(bufp));
21445                 }
21446                 for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
21447                         if (cnt1 % 8 == 0) {
21448                                 (void) snprintf(bp, len, "\n%08x: ", cnt1);
21449                                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21450                                         return (strlen(bufp));
21451                                 }
21452                         }
21453                         (void) snprintf(bp, len, "%08x ", *dp++);
21454                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21455                                 return (strlen(bufp));
21456                         }
21457                 }
21458         }
21459 
21460         for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
21461                 dp2 = dp;
21462                 for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
21463                     cnt1++) {
21464                         if (*dp2++) {
21465                                 break;
21466                         }
21467                 }
21468                 if (cnt1 == ha->rsp_queues[cnt]->rsp_ring.size / 4) {
21469                         dp = dp2;
21470                         continue;
21471                 }
21472                 (void) snprintf(bp, len, "\n\nResponse Queue\nQueue %d:", cnt);
21473                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21474                         return (strlen(bufp));
21475                 }
21476                 for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
21477                     cnt1++) {
21478                         if (cnt1 % 8 == 0) {
21479                                 (void) snprintf(bp, len, "\n%08x: ", cnt1);
21480                                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21481                                         return (strlen(bufp));
21482                                 }
21483                         }
21484                         (void) snprintf(bp, len, "%08x ", *dp++);
21485                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21486                                 return (strlen(bufp));
21487                         }
21488                 }
21489         }
21490 
21491         if (ha->fwexttracebuf.dma_handle != NULL) {
21492                 uint32_t        cnt_b;
21493                 uint64_t        w64 = (uintptr_t)ha->fwexttracebuf.bp;
21494 
21495                 (void) snprintf(bp, len, "\n\nExtended Trace Buffer Memory");
21496                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21497                         return (strlen(bufp));
21498                 }
21499                 /* show data address as a byte address, data as long words */
21500                 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
21501                         cnt_b = cnt * 4;
21502                         if (cnt_b % 32 == 0) {
21503                                 (void) snprintf(bp, len, "\n%08x: ",
21504                                     (int)(w64 + cnt_b));
21505                                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21506                                         return (strlen(bufp));
21507                                 }
21508                         }
21509                         (void) snprintf(bp, len, "%08x ",
21510                             fw->ext_trace_buf[cnt]);
21511                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21512                                 return (strlen(bufp));
21513                         }
21514                 }
21515         }
21516 
21517         if (ha->fwfcetracebuf.dma_handle != NULL) {
21518                 uint32_t        cnt_b;
21519                 uint64_t        w64 = (uintptr_t)ha->fwfcetracebuf.bp;
21520 
21521                 (void) snprintf(bp, len, "\n\nFC Event Trace Buffer Memory");
21522                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21523                         return (strlen(bufp));
21524                 }
21525                 /* show data address as a byte address, data as long words */
21526                 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
21527                         cnt_b = cnt * 4;
21528                         if (cnt_b % 32 == 0) {
21529                                 (void) snprintf(bp, len, "\n%08x: ",
21530                                     (int)(w64 + cnt_b));
21531                                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21532                                         return (strlen(bufp));
21533                                 }
21534                         }
21535                         (void) snprintf(bp, len, "%08x ",
21536                             fw->fce_trace_buf[cnt]);
21537                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21538                                 return (strlen(bufp));
21539                         }
21540                 }
21541         }
21542 
21543         QL_PRINT_10(ha, "done=%xh\n", strlen(bufp));
21544 
21545         return (strlen(bufp));
21546 }
21547 
21548 
21549 /*
21550  * ql_str_ptr
21551  *      Verifies buffer is not full
21552  *
21553  * Input:
21554  *      ha:     adapter state pointer.
21555  *      bp:     string buffer pointer
21556  *      len:    buffer length
21557  *
21558  * Returns:
21559  *      NULL = buffer full else adjusted buffer pointer
21560  *
21561  * Context:
21562  *      Kernel context.
21563  */
21564 /*ARGSUSED*/
21565 static caddr_t
21566 ql_str_ptr(ql_adapter_state_t *ha, caddr_t bp, uint32_t *len)
21567 {
21568         uint32_t        i;
21569 
21570         i = strlen(bp);
21571         if (i > *len || !(*len -= i)) {
21572                 QL_PRINT_10(ha, "full buffer\n");
21573                 return (NULL);
21574         }
21575         return (bp += i);
21576 }
21577 
21578 /*
21579  * ql_27xx_binary_fw_dump
21580  *
21581  * Input:
21582  *      ha:     adapter state pointer.
21583  *      dmp:    firmware dump pointer.
21584  *
21585  * Returns:
21586  *      ql local function return status code.
21587  *
21588  * Context:
21589  *      Interrupt or Kernel context, no mailbox commands allowed.
21590  */
21591 static int
21592 ql_27xx_binary_fw_dump(ql_adapter_state_t *ha)
21593 {
21594         ql_dmp_template_t       *template_buff;
21595         int                     rval;
21596         uint32_t                cnt, *dp, *bp, tsize;
21597 
21598         QL_PRINT_10(ha, "started\n");
21599 
21600         if (ha->dmp_template.dma_handle == NULL) {
21601                 rval = CFG_IST(ha, CFG_LOAD_FLASH_FW) ?
21602                     ql_2700_get_flash_dmp_template(ha) :
21603                     ql_2700_get_module_dmp_template(ha);
21604                 if (rval != QL_SUCCESS) {
21605                         EL(ha, "no dump template, status=%xh\n", rval);
21606                         return (QL_FUNCTION_PARAMETER_ERROR);
21607                 }
21608         }
21609         template_buff = ha->dmp_template.bp;
21610         tsize = template_buff->hdr.size_of_template;
21611 
21612         if (ha->md_capture_size == 0) {
21613                 ha->ql_dump_ptr = kmem_zalloc(tsize, KM_NOSLEEP);
21614                 if (ha->ql_dump_ptr == NULL) {
21615                         QL_PRINT_10(ha, "done, failed alloc\n");
21616                         return (QL_MEMORY_ALLOC_FAILED);
21617                 }
21618                 cnt = (uint32_t)(tsize / sizeof (uint32_t));
21619                 dp = (uint32_t *)ha->ql_dump_ptr;
21620                 bp = (uint32_t *)&template_buff->hdr;
21621                 while (cnt--) {
21622                         *dp++ = ddi_get32(ha->dmp_template.acc_handle, bp++);
21623                 }
21624                 ha->md_capture_size = ql_2700_dmp_parse_template(ha,
21625                     (ql_dt_hdr_t *)ha->ql_dump_ptr, NULL, 0);
21626                 kmem_free(ha->ql_dump_ptr, tsize);
21627                 ha->ql_dump_ptr = NULL;
21628 
21629                 if (ha->md_capture_size == 0) {
21630                         return (QL_MEMORY_ALLOC_FAILED);
21631                 }
21632 
21633                 /*
21634                  * Determine ascii dump file size
21635                  * 2 ascii bytes per binary byte + a space and
21636                  * a newline every 16 binary bytes
21637                  */
21638                 ha->risc_dump_size = ha->md_capture_size << 1;
21639                 ha->risc_dump_size += ha->md_capture_size;
21640                 ha->risc_dump_size += ha->md_capture_size / 16 + 1;
21641                 QL_PRINT_10(ha, "md_capture_size=%xh, "
21642                     "risc_dump_size=%xh\n", ha->md_capture_size,
21643                     ha->risc_dump_size);
21644         }
21645 
21646         ha->ql_dump_ptr = kmem_zalloc(ha->md_capture_size, KM_NOSLEEP);
21647         if (ha->ql_dump_ptr == NULL) {
21648                 QL_PRINT_10(ha, "done, failed alloc\n");
21649                 return (QL_MEMORY_ALLOC_FAILED);
21650         }
21651         ha->ql_dump_size = ha->md_capture_size;
21652 
21653         /* Disable ISP interrupts. */
21654         ql_disable_intr(ha);
21655 
21656         cnt = (uint32_t)(tsize / sizeof (uint32_t));
21657         dp = (uint32_t *)ha->ql_dump_ptr;
21658         bp = (uint32_t *)&template_buff->hdr;
21659         while (cnt--) {
21660                 *dp++ = ddi_get32(ha->dmp_template.acc_handle, bp++);
21661         }
21662 
21663         (void) ql_2700_dmp_parse_template(ha,
21664             (ql_dt_hdr_t *)ha->ql_dump_ptr,
21665             (uint8_t *)dp, ha->ql_dump_size);
21666 
21667 #ifdef _BIG_ENDIAN
21668         cnt = (uint32_t)(tsize / sizeof (uint32_t));
21669         dp = (uint32_t *)ha->ql_dump_ptr;
21670         while (cnt--) {
21671                 ql_chg_endian((uint8_t *)dp, 4);
21672                 dp++;
21673         }
21674 #endif
21675         QL_PRINT_10(ha, "done\n");
21676         return (QL_SUCCESS);
21677 }
21678 
21679 /*
21680  * ql_27xx_ascii_fw_dump
21681  *      Converts ISP27xx firmware binary dump to ascii.
21682  *
21683  * Input:
21684  *      ha:     port info pointer.
21685  *      bptr:   buffer pointer.
21686  *
21687  * Returns:
21688  *      Amount of data buffer used.
21689  *
21690  * Context:
21691  *      Kernel context.
21692  */
21693 static size_t
21694 ql_27xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
21695 {
21696         uint32_t        cnt, len, dsize;
21697         uint8_t         *fw;
21698         caddr_t         bp;
21699 
21700         QL_PRINT_10(ha, "started\n");
21701 
21702         if ((len = ha->risc_dump_size) == 0) {
21703                 QL_PRINT_10(ha, "no buffer\n");
21704                 return (0);
21705         }
21706 
21707         dsize = ha->ql_dump_size;
21708         fw = (uint8_t *)ha->ql_dump_ptr;
21709         bp = bufp;
21710 
21711         QL_PRINT_10(ha, "fw_dump_buffer=%ph, fw_bin_dump_size=%xh\n",
21712             (void *)ha->ql_dump_ptr, ha->ql_dump_size);
21713 
21714         /*
21715          * 2 ascii bytes per binary byte + a space and
21716          * a newline every 16 binary bytes
21717          */
21718         cnt = 0;
21719         while (cnt < dsize) {
21720                 (void) snprintf(bp, len, "%02x ", *fw++);
21721                 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21722                         return (strlen(bufp));
21723                 }
21724                 if (++cnt % 16 == 0) {
21725                         (void) snprintf(bp, len, "\n");
21726                         if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21727                                 return (strlen(bufp));
21728                         }
21729                 }
21730         }
21731         if (cnt % 16 != 0) {
21732                 (void) snprintf(bp, len, "\n");
21733                 bp = ql_str_ptr(ha, bp, &len);
21734                 if (bp == NULL) {
21735                         return (strlen(bufp));
21736                 }
21737         }
21738 
21739         QL_PRINT_10(ha, "done=%xh\n", strlen(bufp));
21740 
21741         return (strlen(bufp));
21742 }
21743 
21744 /* ******************************************************************* */
21745 /* ********************* Dump Template Functions ********************* */
21746 /* ******************************************************************* */
21747 
21748 /*
21749  * ql_2700_get_module_dmp_template
21750  *      Get dump template from firmware module
21751  *
21752  * Input:
21753  *      ha:     adapter state pointer.
21754  *
21755  * Returns:
21756  *      ql local function return status code.
21757  *
21758  * Context:
21759  *      Kernel context.
21760  */
21761 int
21762 ql_2700_get_module_dmp_template(ql_adapter_state_t *ha)
21763 {
21764         int             rval;
21765         uint32_t        word_count, cnt, *bp, *dp;
21766 
21767         QL_PRINT_10(ha, "started\n");
21768 
21769         if (ha->dmp_template.dma_handle != NULL) {
21770                 return (QL_SUCCESS);
21771         }
21772 
21773         if ((word_count = ha->risc_fw[2].length) == 0) {
21774                 EL(ha, "no dump template, length=0\n");
21775                 return (QL_FUNCTION_PARAMETER_ERROR);
21776         }
21777 
21778         /* Allocate template buffer. */
21779         ha->dmp_template.size = word_count << 2;
21780         ha->dmp_template.type = LITTLE_ENDIAN_DMA;
21781         ha->dmp_template.max_cookie_count = 1;
21782         ha->dmp_template.alignment = 8;
21783         rval = ql_alloc_phys(ha, &ha->dmp_template, KM_SLEEP);
21784         if (rval != QL_SUCCESS) {
21785                 EL(ha, "unable to allocate template buffer, "
21786                     "status=%xh\n", rval);
21787                 return (rval);
21788         }
21789 
21790         /* Get big endian template. */
21791         bp = ha->dmp_template.bp;
21792         dp = (uint32_t *)ha->risc_fw[2].code;
21793         for (cnt = 0; cnt < word_count; cnt++) {
21794                 ddi_put32(ha->dmp_template.acc_handle, bp, *dp++);
21795                 if (cnt > 6) {
21796                         ql_chg_endian((uint8_t *)bp, 4);
21797                 }
21798                 bp++;
21799         }
21800 
21801         QL_PRINT_10(ha, "done\n");
21802         return (rval);
21803 }
21804 
21805 /*
21806  * ql_2700_get_flash_dmp_template
21807  *      Get dump template from flash
21808  *
21809  * Input:
21810  *      pi:     port info pointer.
21811  *
21812  * Returns:
21813  *      ql local function return status code.
21814  *
21815  * Context:
21816  *      Kernel context.
21817  */
21818 int
21819 ql_2700_get_flash_dmp_template(ql_adapter_state_t *ha)
21820 {
21821         int             rval;
21822         uint32_t        word_count, cnt, *bp;
21823         uint32_t        faddr = ha->flash_data_addr | ha->flash_fw_addr;
21824         uint32_t        fdata = 0;
21825 
21826         QL_PRINT_10(ha, "started, fw_addr=%xh\n", ha->flash_fw_addr);
21827 
21828         if (ha->dmp_template.dma_handle != NULL) {
21829                 ql_free_phys(ha, &ha->dmp_template);
21830         }
21831 
21832         /* First array length */
21833         rval = ql_24xx_read_flash(ha, faddr + 3, &fdata);
21834         QL_PRINT_7(ha, "read_flash, fw_addr=0x%x, data=0x%x\n",
21835             faddr + 3, fdata);
21836         if (rval != QL_SUCCESS) {
21837                 EL(ha, "2700_read_flash status=%xh\n", rval);
21838                 return (rval);
21839         }
21840         if (fdata == 0 || fdata == 0xffffffff) {
21841                 EL(ha, "Invalid first array length = %xh\n", fdata);
21842                 return (QL_FUNCTION_PARAMETER_ERROR);
21843         }
21844         ql_chg_endian((uint8_t *)&fdata, 4);
21845         QL_PRINT_7(ha, "First array length = %xh\n", fdata);
21846         faddr += fdata;
21847 
21848         /* Second array length */
21849         rval = ql_24xx_read_flash(ha, faddr + 3, &fdata);
21850         QL_PRINT_7(ha, "read_flash, fw_addr=0x%x, data=0x%x\n",
21851             faddr + 3, fdata);
21852         if (rval != QL_SUCCESS) {
21853                 EL(ha, "2700_read_flash status=%xh\n", rval);
21854                 return (rval);
21855         }
21856         if (fdata == 0 || fdata == 0xffffffff) {
21857                 EL(ha, "Invalid second array length = %xh\n", fdata);
21858                 return (QL_FUNCTION_PARAMETER_ERROR);
21859         }
21860         ql_chg_endian((uint8_t *)&fdata, 4);
21861         QL_PRINT_7(ha, "Second array length = %xh\n", fdata);
21862         faddr += fdata;
21863 
21864         /* Third array length (dump template) */
21865         rval = ql_24xx_read_flash(ha, faddr + 2, &fdata);
21866         QL_PRINT_7(ha, "read_flash, fw_addr=0x%x, data=0x%x\n",
21867             faddr + 2, fdata);
21868         if (rval != QL_SUCCESS) {
21869                 EL(ha, "2700_read_flash status=%xh\n", rval);
21870                 return (rval);
21871         }
21872         if (fdata == 0 || fdata == 0xffffffff) {
21873                 EL(ha, "Invalid third array length = %xh\n", fdata);
21874                 return (QL_FUNCTION_PARAMETER_ERROR);
21875         }
21876         ql_chg_endian((uint8_t *)&fdata, 4);
21877         QL_PRINT_7(ha, "Third array length = %xh\n", fdata);
21878         word_count = fdata;
21879 
21880         /* Allocate template buffer. */
21881         ha->dmp_template.size = word_count << 2;
21882         ha->dmp_template.type = LITTLE_ENDIAN_DMA;
21883         ha->dmp_template.max_cookie_count = 1;
21884         ha->dmp_template.alignment = 8;
21885         rval = ql_alloc_phys(ha, &ha->dmp_template, KM_SLEEP);
21886         if (rval != QL_SUCCESS) {
21887                 EL(ha, "unable to allocate template buffer, "
21888                     "status=%xh\n", rval);
21889                 return (rval);
21890         }
21891 
21892         /* Get big endian template. */
21893         bp = ha->dmp_template.bp;
21894         for (cnt = 0; cnt < word_count; cnt++) {
21895                 rval = ql_24xx_read_flash(ha, faddr++, &fdata);
21896                 if (rval != QL_SUCCESS) {
21897                         EL(ha, "2700_read_flash status=%xh\n", rval);
21898                         ql_free_phys(ha, &ha->dmp_template);
21899                         return (rval);
21900                 }
21901                 ddi_put32(ha->dmp_template.acc_handle, bp, fdata);
21902                 bp++;
21903         }
21904 
21905         QL_PRINT_10(ha, "done\n");
21906         return (rval);
21907 }
21908 
21909 static uint32_t
21910 ql_2700_dmp_parse_template(ql_adapter_state_t *ha, ql_dt_hdr_t *template_hdr,
21911     uint8_t *dump_buff, uint32_t buff_size)
21912 {
21913         int             e_cnt, esize, num_of_entries;
21914         uint32_t        bsize;
21915         time_t          time;
21916         uint8_t         *dbuff, *dbuff_end;
21917         ql_dt_entry_t   *entry;
21918         int             sane_end = 0;
21919 
21920         dbuff = dump_buff;      /* dbuff = NULL size determination. */
21921         dbuff_end = dump_buff + buff_size;
21922 
21923         template_hdr->ver_attr[0] = ha->fw_major_version;
21924         template_hdr->ver_attr[1] = ha->fw_minor_version;
21925         template_hdr->ver_attr[2] = ha->fw_subminor_version;
21926         template_hdr->ver_attr[3] = ha->fw_attributes;
21927         template_hdr->ver_attr[4] = ha->fw_ext_attributes;
21928 
21929         QL_PRINT_7(ha, "started, template_hdr=%ph, dump_buff=%ph, "
21930             "buff_size=%xh, buff_end=%ph\n", (void *)template_hdr,
21931             (void *)dbuff, buff_size, (void *)dbuff_end);
21932 
21933         /* Setup parameters */
21934         QL_PRINT_7(ha, "type=%d, first_entry_offset=%xh, "
21935             "num_of_entries=%xh ver_attr=%xh,%xh,%xh,%xh,%xh\n",
21936             template_hdr->type, template_hdr->first_entry_offset,
21937             template_hdr->num_of_entries, template_hdr->ver_attr[0],
21938             template_hdr->ver_attr[1], template_hdr->ver_attr[2],
21939             template_hdr->ver_attr[3], template_hdr->ver_attr[4]);
21940 
21941         if (template_hdr->type != DT_THDR) {
21942                 EL(ha, "Template header not found\n");
21943                 return (0);
21944         }
21945         if (dbuff != NULL) {
21946                 (void) drv_getparm(TIME, &time);
21947                 template_hdr->driver_timestamp = LSD(time);
21948         }
21949 
21950         num_of_entries = template_hdr->num_of_entries;
21951         entry = (ql_dt_entry_t *)((caddr_t)template_hdr +
21952             template_hdr->first_entry_offset);
21953 
21954         bsize = template_hdr->size_of_template;
21955         for (e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
21956                 QL_PRINT_7(ha, "e_cnt=%xh, entry=%ph, type=%d, size=%xh, "
21957                     "capture_flags=%xh, driver_flags=%xh, bofst=%xh\n",
21958                     e_cnt, (void *)entry, entry->h.type, entry->h.size,
21959                     entry->h.capture_flags, entry->h.driver_flags,
21960                     dbuff != NULL ? (uintptr_t)dbuff - (uintptr_t)template_hdr :
21961                     bsize);
21962                 /*
21963                  * Decode the entry type and process it accordingly
21964                  */
21965                 esize = 0;
21966                 switch (entry->h.type) {
21967                 case DT_NOP:
21968                         if (dbuff != NULL) {
21969                                 entry->h.driver_flags = (uint8_t)
21970                                     (entry->h.driver_flags | SKIPPED_FLAG);
21971                         }
21972                         QL_PRINT_3(ha, "Skipping Entry ID=%d, type=%d\n",
21973                             e_cnt, entry->h.type);
21974                         break;
21975                 case DT_TEND:
21976                         if (dbuff != NULL) {
21977                                 entry->h.driver_flags = (uint8_t)
21978                                     (entry->h.driver_flags | SKIPPED_FLAG);
21979                         }
21980                         QL_PRINT_3(ha, "Skipping Entry ID=%d, type=%d\n",
21981                             e_cnt, entry->h.type);
21982                         sane_end++;
21983                         break;
21984                 case DT_RIOB1:
21985                         esize = ql_2700_dt_riob1(ha, (ql_dt_riob1_t *)entry,
21986                             dbuff, dbuff_end);
21987                         break;
21988                 case DT_WIOB1:
21989                         ql_2700_dt_wiob1(ha, (ql_dt_wiob1_t *)entry,
21990                             dbuff, dbuff_end);
21991                         break;
21992                 case DT_RIOB2:
21993                         esize = ql_2700_dt_riob2(ha, (ql_dt_riob2_t *)entry,
21994                             dbuff, dbuff_end);
21995                         break;
21996                 case DT_WIOB2:
21997                         ql_2700_dt_wiob2(ha, (ql_dt_wiob2_t *)entry,
21998                             dbuff, dbuff_end);
21999                         break;
22000                 case DT_RPCI:
22001                         esize = ql_2700_dt_rpci(ha, (ql_dt_rpci_t *)entry,
22002                             dbuff, dbuff_end);
22003                         break;
22004                 case DT_WPCI:
22005                         ql_2700_dt_wpci(ha, (ql_dt_wpci_t *)entry,
22006                             dbuff, dbuff_end);
22007                         break;
22008                 case DT_RRAM:
22009                         esize = ql_2700_dt_rram(ha, (ql_dt_rram_t *)entry,
22010                             dbuff, dbuff_end);
22011                         break;
22012                 case DT_GQUE:
22013                         esize = ql_2700_dt_gque(ha, (ql_dt_gque_t *)entry,
22014                             dbuff, dbuff_end);
22015                         break;
22016                 case DT_GFCE:
22017                         esize = ql_2700_dt_gfce(ha, (ql_dt_gfce_t *)entry,
22018                             dbuff, dbuff_end);
22019                         break;
22020                 case DT_PRISC:
22021                         ql_2700_dt_prisc(ha, (ql_dt_prisc_t *)entry,
22022                             dbuff, dbuff_end);
22023                         break;
22024                 case DT_RRISC:
22025                         ql_2700_dt_rrisc(ha, (ql_dt_rrisc_t *)entry,
22026                             dbuff, dbuff_end);
22027                         break;
22028                 case DT_DINT:
22029                         ql_2700_dt_dint(ha, (ql_dt_dint_t *)entry,
22030                             dbuff, dbuff_end);
22031                         break;
22032                 case DT_GHBD:
22033                         esize = ql_2700_dt_ghbd(ha, (ql_dt_ghbd_t *)entry,
22034                             dbuff, dbuff_end);
22035                         break;
22036                 case DT_SCRA:
22037                         esize = ql_2700_dt_scra(ha, (ql_dt_scra_t *)entry,
22038                             dbuff, dbuff_end);
22039                         break;
22040                 case DT_RRREG:
22041                         esize = ql_2700_dt_rrreg(ha, (ql_dt_rrreg_t *)entry,
22042                             dbuff, dbuff_end);
22043                         break;
22044                 case DT_WRREG:
22045                         ql_2700_dt_wrreg(ha, (ql_dt_wrreg_t *)entry,
22046                             dbuff, dbuff_end);
22047                         break;
22048                 case DT_RRRAM:
22049                         esize = ql_2700_dt_rrram(ha, (ql_dt_rrram_t *)entry,
22050                             dbuff, dbuff_end);
22051                         break;
22052                 case DT_RPCIC:
22053                         esize = ql_2700_dt_rpcic(ha, (ql_dt_rpcic_t *)entry,
22054                             dbuff, dbuff_end);
22055                         break;
22056                 case DT_GQUES:
22057                         esize = ql_2700_dt_gques(ha, (ql_dt_gques_t *)entry,
22058                             dbuff, dbuff_end);
22059                         break;
22060                 case DT_WDMP:
22061                         esize = ql_2700_dt_wdmp(ha, (ql_dt_wdmp_t *)entry,
22062                             dbuff, dbuff_end);
22063                         break;
22064                 default:
22065                         entry->h.driver_flags = (uint8_t)
22066                             (entry->h.driver_flags | SKIPPED_FLAG);
22067                         EL(ha, "Entry ID=%d, type=%d unknown\n", e_cnt,
22068                             entry->h.type);
22069                         break;
22070                 }
22071                 if (dbuff != NULL && esize) {
22072                         QL_PRINT_7(ha, "entry=%d, esize=%xh, capture data\n",
22073                             entry->h.type, esize);
22074                         QL_DUMP_3(dbuff, 8, esize);
22075                         dbuff += esize;
22076                 }
22077                 bsize += esize;
22078                 /* next entry in the template */
22079                 entry = (ql_dt_entry_t *)((caddr_t)entry + entry->h.size);
22080         }
22081         if (sane_end > 1) {
22082                 EL(ha, "Template configuration error. Check Template\n");
22083         }
22084 
22085         QL_PRINT_7(ha, "done, num of entries=%xh, size=%xh\n",
22086             template_hdr->num_of_entries, bsize);
22087         return (bsize);
22088 }
22089 
22090 static int
22091 ql_2700_dt_riob1(ql_adapter_state_t *ha, ql_dt_riob1_t *entry,
22092     uint8_t *dbuff, uint8_t *dbuff_end)
22093 {
22094         int             esize;
22095         uint32_t        i, cnt;
22096         uint8_t         *bp = dbuff;
22097         uint32_t        addr = entry->addr;
22098         uint8_t         *reg = (uint8_t *)ha->iobase + entry->pci_offset;
22099 
22100         QL_PRINT_7(ha, "started, buf=%ph, addr=%xh, reg_size=%xh, "
22101             "reg_count=%x%02xh, pci_offset=%xh\n", (void *)dbuff, entry->addr,
22102             entry->reg_size, entry->reg_count_h, entry->reg_count_l,
22103             entry->pci_offset);
22104 
22105         cnt = CHAR_TO_SHORT(entry->reg_count_l, entry->reg_count_h);
22106         esize = cnt * 4;                /* addr */
22107         esize += cnt * entry->reg_size;      /* data */
22108 
22109         if (dbuff == NULL) {
22110                 QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22111                 return (esize);
22112         }
22113         if (esize + dbuff >= dbuff_end) {
22114                 EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22115                 entry->h.driver_flags = (uint8_t)
22116                     (entry->h.driver_flags | SKIPPED_FLAG);
22117                 return (0);
22118         }
22119 
22120         WRT32_IO_REG(ha, io_base_addr, addr);
22121         while (cnt--) {
22122                 *bp++ = LSB(LSW(addr));
22123                 *bp++ = MSB(LSW(addr));
22124                 *bp++ = LSB(MSW(addr));
22125                 *bp++ = MSB(MSW(addr));
22126                 for (i = 0; i < entry->reg_size; i++) {
22127                         *bp++ = RD_REG_BYTE(ha, reg++);
22128                 }
22129                 addr++;
22130         }
22131 
22132         QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22133         return (esize);
22134 }
22135 
22136 static void
22137 ql_2700_dt_wiob1(ql_adapter_state_t *ha, ql_dt_wiob1_t *entry,
22138     uint8_t *dbuff, uint8_t *dbuff_end)
22139 {
22140         uint8_t *reg = (uint8_t *)ha->iobase + entry->pci_offset;
22141 
22142         QL_PRINT_7(ha, "started, addr=%xh, data=%xh, pci_offset=%xh\n",
22143             entry->addr, entry->data, entry->pci_offset);
22144 
22145         if (dbuff == NULL) {
22146                 QL_PRINT_7(ha, "null buf done\n");
22147                 return;
22148         }
22149         if (dbuff >= dbuff_end) {
22150                 EL(ha, "skipped, no buffer space, needed=0\n");
22151                 entry->h.driver_flags = (uint8_t)
22152                     (entry->h.driver_flags | SKIPPED_FLAG);
22153                 return;
22154         }
22155 
22156         WRT32_IO_REG(ha, io_base_addr, entry->addr);
22157         WRT_REG_DWORD(ha, reg, entry->data);
22158 
22159         QL_PRINT_7(ha, "done\n");
22160 }
22161 
22162 static int
22163 ql_2700_dt_riob2(ql_adapter_state_t *ha, ql_dt_riob2_t *entry,
22164     uint8_t *dbuff, uint8_t *dbuff_end)
22165 {
22166         int             esize;
22167         uint32_t        i, cnt;
22168         uint8_t         *bp = dbuff;
22169         uint8_t         *reg = (uint8_t *)ha->iobase + entry->pci_offset;
22170         uint32_t        addr = entry->addr;
22171 
22172         QL_PRINT_7(ha, "started, buf=%ph, addr=%xh, reg_size=%xh, "
22173             "reg_count=%x%02xh, pci_offset=%xh, bank_sel_offset=%xh, "
22174             "reg_bank=%xh\n", (void *)dbuff, entry->addr,
22175             entry->reg_size, entry->reg_count_h, entry->reg_count_l,
22176             entry->pci_offset, entry->bank_sel_offset, entry->reg_bank);
22177 
22178         cnt = CHAR_TO_SHORT(entry->reg_count_l, entry->reg_count_h);
22179         esize = cnt * 4;                /* addr */
22180         esize += cnt * entry->reg_size;      /* data */
22181 
22182         if (dbuff == NULL) {
22183                 QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22184                 return (esize);
22185         }
22186         if (esize + dbuff >= dbuff_end) {
22187                 EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22188                 entry->h.driver_flags = (uint8_t)
22189                     (entry->h.driver_flags | SKIPPED_FLAG);
22190                 return (0);
22191         }
22192 
22193         WRT32_IO_REG(ha, io_base_addr, addr);
22194         WRT_REG_DWORD(ha, ha->iobase + entry->bank_sel_offset, entry->reg_bank);
22195         while (cnt--) {
22196                 *bp++ = LSB(LSW(addr));
22197                 *bp++ = MSB(LSW(addr));
22198                 *bp++ = LSB(MSW(addr));
22199                 *bp++ = MSB(MSW(addr));
22200                 for (i = 0; i < entry->reg_size; i++) {
22201                         *bp++ = RD_REG_BYTE(ha, reg++);
22202                 }
22203                 addr++;
22204         }
22205 
22206         QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22207         return (esize);
22208 }
22209 
22210 static void
22211 ql_2700_dt_wiob2(ql_adapter_state_t *ha, ql_dt_wiob2_t *entry,
22212     uint8_t *dbuff, uint8_t *dbuff_end)
22213 {
22214         uint16_t        data;
22215         uint8_t         *reg = (uint8_t *)ha->iobase + entry->pci_offset;
22216 
22217         QL_PRINT_7(ha, "started, addr=%xh, data=%x%02xh, pci_offset=%xhh, "
22218             "bank_sel_offset=%xh, reg_bank=%xh\n", entry->addr, entry->data_h,
22219             entry->data_l, entry->pci_offset, entry->bank_sel_offset,
22220             entry->reg_bank);
22221 
22222         if (dbuff == NULL) {
22223                 QL_PRINT_7(ha, "null buf done\n");
22224                 return;
22225         }
22226         if (dbuff >= dbuff_end) {
22227                 EL(ha, "skipped, no buffer space, needed=0\n");
22228                 entry->h.driver_flags = (uint8_t)
22229                     (entry->h.driver_flags | SKIPPED_FLAG);
22230                 return;
22231         }
22232 
22233         data = CHAR_TO_SHORT(entry->data_l, entry->data_h);
22234 
22235         WRT32_IO_REG(ha, io_base_addr, entry->addr);
22236         WRT_REG_DWORD(ha, ha->iobase + entry->bank_sel_offset, entry->reg_bank);
22237         WRT_REG_WORD(ha, reg, data);
22238 
22239         QL_PRINT_7(ha, "done\n");
22240 }
22241 
22242 static int
22243 ql_2700_dt_rpci(ql_adapter_state_t *ha, ql_dt_rpci_t *entry, uint8_t *dbuff,
22244     uint8_t *dbuff_end)
22245 {
22246         int             esize;
22247         uint32_t        i;
22248         uint8_t         *bp = dbuff;
22249         uint8_t         *reg = (uint8_t *)ha->iobase + entry->addr;
22250 
22251         QL_PRINT_7(ha, "started, addr=%xh, reg=%ph\n", entry->addr,
22252             (void *)reg);
22253 
22254         esize = 4;      /* addr */
22255         esize += 4;     /* data */
22256 
22257         if (dbuff == NULL) {
22258                 QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22259                 return (esize);
22260         }
22261         if (esize + dbuff >= dbuff_end) {
22262                 EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22263                 entry->h.driver_flags = (uint8_t)
22264                     (entry->h.driver_flags | SKIPPED_FLAG);
22265                 return (0);
22266         }
22267 
22268         *bp++ = LSB(LSW(entry->addr));
22269         *bp++ = MSB(LSW(entry->addr));
22270         *bp++ = LSB(MSW(entry->addr));
22271         *bp++ = MSB(MSW(entry->addr));
22272         for (i = 0; i < 4; i++) {
22273                 *bp++ = RD_REG_BYTE(ha, reg++);
22274         }
22275 
22276         QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22277         return (esize);
22278 }
22279 
22280 static void
22281 ql_2700_dt_wpci(ql_adapter_state_t *ha, ql_dt_wpci_t *entry,
22282     uint8_t *dbuff, uint8_t *dbuff_end)
22283 {
22284         uint8_t *reg = (uint8_t *)ha->iobase + entry->addr;
22285 
22286         QL_PRINT_7(ha, "started, addr=%xh, data=%xh, reg=%ph\n",
22287             entry->addr, entry->data, (void *)reg);
22288 
22289         if (dbuff == NULL) {
22290                 QL_PRINT_7(ha, "null buf done\n");
22291                 return;
22292         }
22293         if (dbuff >= dbuff_end) {
22294                 EL(ha, "skipped, no buffer space, needed=0\n");
22295                 entry->h.driver_flags = (uint8_t)
22296                     (entry->h.driver_flags | SKIPPED_FLAG);
22297                 return;
22298         }
22299 
22300         WRT_REG_DWORD(ha, reg, entry->data);
22301 
22302         QL_PRINT_7(ha, "done\n");
22303 }
22304 
22305 static int
22306 ql_2700_dt_rram(ql_adapter_state_t *ha, ql_dt_rram_t *entry,
22307     uint8_t *dbuff, uint8_t *dbuff_end)
22308 {
22309         int             esize, rval;
22310         uint32_t        start = entry->start_addr;
22311         uint32_t        end = entry->end_addr;
22312 
22313         QL_PRINT_7(ha, "started, buf=%ph, ram_area=%xh, start_addr=%xh, "
22314             "end_addr=%xh\n", (void *)dbuff, entry->ram_area,
22315             entry->start_addr, entry->end_addr);
22316 
22317         if (entry->ram_area == 2) {
22318                 end = ha->fw_ext_memory_end;
22319         } else if (entry->ram_area == 3) {
22320                 start = ha->fw_shared_ram_start;
22321                 end = ha->fw_shared_ram_end;
22322         } else if (entry->ram_area == 4) {
22323                 start = ha->fw_ddr_ram_start;
22324                 end = ha->fw_ddr_ram_end;
22325         } else if (entry->ram_area != 1) {
22326                 EL(ha, "skipped, unknown RAM_AREA %d\n", entry->ram_area);
22327                 start = 0;
22328                 end = 0;
22329         }
22330         esize = end > start ? end - start : 0;
22331         if (esize) {
22332                 esize = (esize + 1) * 4;
22333         }
22334 
22335         if (dbuff == NULL) {
22336                 QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22337                 return (esize);
22338         }
22339         if (esize == 0 || esize + dbuff >= dbuff_end) {
22340                 if (esize != 0) {
22341                         EL(ha, "skipped, no buffer space, needed=%xh\n",
22342                             esize);
22343                 } else {
22344                         /*EMPTY*/
22345                         QL_PRINT_7(ha, "skipped, no ram_area=%xh, start=%xh, "
22346                             "end=%xh\n", entry->ram_area, start, end);
22347                 }
22348                 entry->h.driver_flags = (uint8_t)
22349                     (entry->h.driver_flags | SKIPPED_FLAG);
22350                 return (0);
22351         }
22352         entry->end_addr = end;
22353         entry->start_addr = start;
22354 
22355         if ((rval = ql_2700_dump_ram(ha, MBC_DUMP_RAM_EXTENDED,
22356             start, esize / 4, dbuff)) != QL_SUCCESS) {
22357                 EL(ha, "dump_ram failed, rval=%xh, addr=%xh, len=%xh, "
22358                     "esize=0\n", rval, start, esize / 4);
22359                 return (0);
22360         }
22361 
22362         QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22363         return (esize);
22364 }
22365 
22366 static int
22367 ql_2700_dt_gque(ql_adapter_state_t *ha, ql_dt_gque_t *entry,
22368     uint8_t *dbuff, uint8_t *dbuff_end)
22369 {
22370         int             esize;
22371         uint32_t        cnt, q_cnt, e_cnt, i;
22372         uint8_t         *bp = dbuff, *dp;
22373 
22374         QL_PRINT_7(ha, "started, buf=%ph, num_queues=%xh, queue_type=%xh\n",
22375             (void *)dbuff, entry->num_queues, entry->queue_type);
22376 
22377         if (entry->queue_type == 1) {
22378                 ql_request_q_t  *req_q;
22379 
22380                 e_cnt = ha->rsp_queues_cnt > 1 ? 2 : 1;
22381                 esize = e_cnt * 2;      /* queue number */
22382                 esize += e_cnt * 2;     /* queue entries */
22383 
22384                 /* queue size */
22385                 esize += ha->req_q[0]->req_entry_cnt * REQUEST_ENTRY_SIZE;
22386                 if (e_cnt > 1) {
22387                         esize += ha->req_q[1]->req_entry_cnt *
22388                             REQUEST_ENTRY_SIZE;
22389                 }
22390 
22391                 if (dbuff == NULL) {
22392                         QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22393                         return (esize);
22394                 }
22395                 if (esize + dbuff >= dbuff_end) {
22396                         EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22397                         entry->h.driver_flags = (uint8_t)
22398                             (entry->h.driver_flags | SKIPPED_FLAG);
22399                         return (0);
22400                 }
22401                 entry->num_queues = e_cnt;
22402 
22403                 for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
22404                         req_q = q_cnt == 0 ? ha->req_q[0] : ha->req_q[1];
22405                         e_cnt = req_q->req_entry_cnt;
22406                         dp = req_q->req_ring.bp;
22407                         *bp++ = LSB(q_cnt);
22408                         *bp++ = MSB(q_cnt);
22409                         *bp++ = LSB(e_cnt);
22410                         *bp++ = MSB(e_cnt);
22411                         for (cnt = 0; cnt < e_cnt; cnt++) {
22412                                 for (i = 0; i < REQUEST_ENTRY_SIZE; i++) {
22413                                         *bp++ = *dp++;
22414                                 }
22415                         }
22416                 }
22417         } else if (entry->queue_type == 2) {
22418                 ql_response_q_t *rsp_q;
22419 
22420                 e_cnt = ha->rsp_queues_cnt;
22421                 esize = e_cnt * 2;      /* queue number */
22422                 esize += e_cnt * 2;     /* queue entries */
22423 
22424                 /* queue size */
22425                 for (q_cnt = 0; q_cnt < ha->rsp_queues_cnt; q_cnt++) {
22426                         rsp_q = ha->rsp_queues[q_cnt];
22427                         esize += rsp_q->rsp_entry_cnt * RESPONSE_ENTRY_SIZE;
22428                 }
22429 
22430                 if (dbuff == NULL) {
22431                         QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22432                         return (esize);
22433                 }
22434                 if (esize + dbuff >= dbuff_end) {
22435                         EL(ha, "skipped2, no buffer space, needed=%xh\n",
22436                             esize);
22437                         entry->h.driver_flags = (uint8_t)
22438                             (entry->h.driver_flags | SKIPPED_FLAG);
22439                         return (0);
22440                 }
22441                 entry->num_queues = e_cnt;
22442 
22443                 for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
22444                         rsp_q = ha->rsp_queues[q_cnt];
22445                         e_cnt = rsp_q->rsp_entry_cnt;
22446                         dp = rsp_q->rsp_ring.bp;
22447                         *bp++ = LSB(q_cnt);
22448                         *bp++ = MSB(q_cnt);
22449                         *bp++ = LSB(e_cnt);
22450                         *bp++ = MSB(e_cnt);
22451                         for (cnt = 0; cnt < e_cnt; cnt++) {
22452                                 for (i = 0; i < RESPONSE_ENTRY_SIZE; i++) {
22453                                         *bp++ = *dp++;
22454                                 }
22455                         }
22456                 }
22457         } else if (entry->queue_type == 3) {
22458                 QL_PRINT_7(ha, "skipped, no ATIO queue, esize=0\n");
22459                 if (dbuff != NULL) {
22460                         entry->num_queues = 0;
22461                         entry->h.driver_flags = (uint8_t)
22462                             (entry->h.driver_flags | SKIPPED_FLAG);
22463                 }
22464                 return (0);
22465         } else {
22466                 EL(ha, "skipped, unknown queue_type %d, esize=0\n",
22467                     entry->queue_type);
22468                 if (dbuff != NULL) {
22469                         entry->h.driver_flags = (uint8_t)
22470                             (entry->h.driver_flags | SKIPPED_FLAG);
22471                 }
22472                 return (0);
22473         }
22474 
22475         QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22476         return (esize);
22477 }
22478 
22479 /*ARGSUSED*/
22480 static int
22481 ql_2700_dt_gfce(ql_adapter_state_t *ha, ql_dt_gfce_t *entry,
22482     uint8_t *dbuff, uint8_t *dbuff_end)
22483 {
22484         QL_PRINT_7(ha, "started\n");
22485 
22486         QL_PRINT_7(ha, "skipped, not supported, esize=0\n");
22487         if (dbuff != NULL) {
22488                 entry->h.driver_flags = (uint8_t)
22489                     (entry->h.driver_flags | SKIPPED_FLAG);
22490         }
22491 
22492         return (0);
22493 }
22494 
22495 static void
22496 ql_2700_dt_prisc(ql_adapter_state_t *ha, ql_dt_prisc_t *entry,
22497     uint8_t *dbuff, uint8_t *dbuff_end)
22498 {
22499         clock_t timer;
22500 
22501         QL_PRINT_7(ha, "started\n");
22502 
22503         if (dbuff == NULL) {
22504                 QL_PRINT_7(ha, "null buf done\n");
22505                 return;
22506         }
22507         if (dbuff >= dbuff_end) {
22508                 EL(ha, "skipped, no buffer space, needed=0\n");
22509                 entry->h.driver_flags = (uint8_t)
22510                     (entry->h.driver_flags | SKIPPED_FLAG);
22511                 return;
22512         }
22513 
22514         /* Pause RISC. */
22515         if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
22516                 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
22517                 for (timer = 30000;
22518                     (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0;
22519                     timer--) {
22520                         if (timer) {
22521                                 drv_usecwait(100);
22522                                 if (timer % 10000 == 0) {
22523                                         EL(ha, "risc pause %d\n", timer);
22524                                 }
22525                         } else {
22526                                 EL(ha, "risc pause timeout\n");
22527                                 break;
22528                         }
22529                 }
22530         }
22531 
22532         QL_PRINT_7(ha, "done\n");
22533 }
22534 
22535 static void
22536 ql_2700_dt_rrisc(ql_adapter_state_t *ha, ql_dt_rrisc_t *entry,
22537     uint8_t *dbuff, uint8_t *dbuff_end)
22538 {
22539         clock_t timer;
22540 
22541         QL_PRINT_7(ha, "started\n");
22542 
22543         if (dbuff == NULL) {
22544                 QL_PRINT_7(ha, "null buf done\n");
22545                 return;
22546         }
22547         if (dbuff >= dbuff_end) {
22548                 EL(ha, "skipped, no buffer space, needed=0\n");
22549                 entry->h.driver_flags = (uint8_t)
22550                     (entry->h.driver_flags | SKIPPED_FLAG);
22551                 return;
22552         }
22553 
22554         /* Shutdown DMA. */
22555         WRT32_IO_REG(ha, ctrl_status, DMA_SHUTDOWN);
22556 
22557         /* Wait for DMA to stop. */
22558         for (timer = 0; timer < 30000; timer++) {
22559                 if (!(RD32_IO_REG(ha, ctrl_status) & DMA_ACTIVE)) {
22560                         break;
22561                 }
22562                 drv_usecwait(100);
22563         }
22564 
22565         /* Reset the chip. */
22566         WRT32_IO_REG(ha, ctrl_status, ISP_RESET);
22567         drv_usecwait(200);
22568 
22569         /* Wait for RISC to recover from reset. */
22570         for (timer = 30000; timer; timer--) {
22571                 ha->rom_status = RD16_IO_REG(ha, mailbox_out[0]);
22572                 if ((ha->rom_status & MBS_ROM_STATUS_MASK) != MBS_ROM_BUSY) {
22573                         break;
22574                 }
22575                 drv_usecwait(100);
22576         }
22577 
22578         /* Wait for reset to finish. */
22579         for (timer = 30000; timer; timer--) {
22580                 if (!(RD32_IO_REG(ha, ctrl_status) & ISP_RESET)) {
22581                         break;
22582                 }
22583                 drv_usecwait(100);
22584         }
22585 
22586         ADAPTER_STATE_LOCK(ha);
22587         ha->flags &= ~FIRMWARE_UP;
22588         ADAPTER_STATE_UNLOCK(ha);
22589 
22590         QL_PRINT_7(ha, "done\n");
22591 }
22592 
22593 static void
22594 ql_2700_dt_dint(ql_adapter_state_t *ha, ql_dt_dint_t *entry,
22595     uint8_t *dbuff, uint8_t *dbuff_end)
22596 {
22597         QL_PRINT_7(ha, "started, pci_offset=%xh, data=%xh\n",
22598             entry->pci_offset, entry->data);
22599 
22600         if (dbuff == NULL) {
22601                 QL_PRINT_7(ha, "null buf done\n");
22602                 return;
22603         }
22604         if (dbuff >= dbuff_end) {
22605                 EL(ha, "skipped, no buffer space, needed=0\n");
22606                 entry->h.driver_flags = (uint8_t)
22607                     (entry->h.driver_flags | SKIPPED_FLAG);
22608                 return;
22609         }
22610 
22611         ql_pci_config_put32(ha, entry->pci_offset, entry->data);
22612 
22613         QL_PRINT_7(ha, "done\n");
22614 }
22615 
22616 /*ARGSUSED*/
22617 static int
22618 ql_2700_dt_ghbd(ql_adapter_state_t *ha, ql_dt_ghbd_t *entry,
22619     uint8_t *dbuff, uint8_t *dbuff_end)
22620 {
22621         QL_PRINT_7(ha, "started\n");
22622 
22623         QL_PRINT_7(ha, "skipped, not supported\n");
22624         if (dbuff != NULL) {
22625                 entry->h.driver_flags = (uint8_t)
22626                     (entry->h.driver_flags | SKIPPED_FLAG);
22627         }
22628 
22629         return (0);
22630 }
22631 
22632 /*ARGSUSED*/
22633 static int
22634 ql_2700_dt_scra(ql_adapter_state_t *ha, ql_dt_scra_t *entry,
22635     uint8_t *dbuff, uint8_t *dbuff_end)
22636 {
22637         QL_PRINT_7(ha, "started\n");
22638 
22639         QL_PRINT_7(ha, "skipped, not supported, esize=0\n");
22640         if (dbuff != NULL) {
22641                 entry->h.driver_flags = (uint8_t)
22642                     (entry->h.driver_flags | SKIPPED_FLAG);
22643         }
22644 
22645         return (0);
22646 }
22647 
22648 static int
22649 ql_2700_dt_rrreg(ql_adapter_state_t *ha, ql_dt_rrreg_t *entry,
22650     uint8_t *dbuff, uint8_t *dbuff_end)
22651 {
22652         int             esize;
22653         uint32_t        i;
22654         uint8_t         *bp = dbuff;
22655         uint8_t         *reg = (uint8_t *)ha->iobase + 0xc4;
22656         uint32_t        addr = entry->addr;
22657         uint32_t        cnt = entry->count;
22658 
22659         QL_PRINT_7(ha, "started, buf=%ph, addr=%xh, count=%xh\n",
22660             (void *)dbuff, entry->addr, entry->count);
22661 
22662         esize = cnt * 4;        /* addr */
22663         esize += cnt * 4;       /* data */
22664 
22665         if (dbuff == NULL) {
22666                 QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22667                 return (esize);
22668         }
22669         if (esize + dbuff >= dbuff_end) {
22670                 EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22671                 entry->h.driver_flags = (uint8_t)
22672                     (entry->h.driver_flags | SKIPPED_FLAG);
22673                 return (0);
22674         }
22675 
22676         WRT32_IO_REG(ha, io_base_addr, 0x40);
22677         while (cnt--) {
22678                 WRT_REG_DWORD(ha, ha->iobase + 0xc0, addr | 0x80000000);
22679                 *bp++ = LSB(LSW(addr));
22680                 *bp++ = MSB(LSW(addr));
22681                 *bp++ = LSB(MSW(addr));
22682                 *bp++ = MSB(MSW(addr));
22683                 for (i = 0; i < 4; i++) {
22684                         *bp++ = RD_REG_BYTE(ha, reg + i);
22685                 }
22686                 addr += 4;
22687         }
22688 
22689         QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22690         return (esize);
22691 }
22692 
22693 static void
22694 ql_2700_dt_wrreg(ql_adapter_state_t *ha, ql_dt_wrreg_t *entry,
22695     uint8_t *dbuff, uint8_t *dbuff_end)
22696 {
22697         QL_PRINT_7(ha, "started, addr=%xh, data=%xh\n", entry->addr,
22698             entry->data);
22699 
22700         if (dbuff == NULL) {
22701                 QL_PRINT_7(ha, "null buf done\n");
22702                 return;
22703         }
22704         if (dbuff >= dbuff_end) {
22705                 EL(ha, "skipped, no buffer space, needed=0\n");
22706                 entry->h.driver_flags = (uint8_t)
22707                     (entry->h.driver_flags | SKIPPED_FLAG);
22708                 return;
22709         }
22710 
22711         WRT32_IO_REG(ha, io_base_addr, 0x40);
22712         WRT_REG_DWORD(ha, ha->iobase + 0xc4, entry->data);
22713         WRT_REG_DWORD(ha, ha->iobase + 0xc0, entry->addr);
22714 
22715         QL_PRINT_7(ha, "done\n");
22716 }
22717 
22718 static int
22719 ql_2700_dt_rrram(ql_adapter_state_t *ha, ql_dt_rrram_t *entry,
22720     uint8_t *dbuff, uint8_t *dbuff_end)
22721 {
22722         int     rval, esize;
22723 
22724         QL_PRINT_7(ha, "started, buf=%ph, addr=%xh, count=%xh\n",
22725             (void *)dbuff, entry->addr, entry->count);
22726 
22727         esize = entry->count * 4;    /* data */
22728 
22729         if (dbuff == NULL) {
22730                 QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22731                 return (esize);
22732         }
22733         if (esize + dbuff >= dbuff_end) {
22734                 EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22735                 entry->h.driver_flags = (uint8_t)
22736                     (entry->h.driver_flags | SKIPPED_FLAG);
22737                 return (0);
22738         }
22739 
22740         if ((rval = ql_2700_dump_ram(ha, MBC_MPI_RAM, entry->addr,
22741             entry->count, dbuff)) != QL_SUCCESS) {
22742                 EL(ha, "dump_ram failed, rval=%xh, addr=%xh, len=%xh, "
22743                     "esize=0\n", rval, entry->addr, entry->count);
22744                 return (0);
22745         }
22746 
22747         QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22748         return (esize);
22749 }
22750 
22751 static int
22752 ql_2700_dt_rpcic(ql_adapter_state_t *ha, ql_dt_rpcic_t *entry,
22753     uint8_t *dbuff, uint8_t *dbuff_end)
22754 {
22755         int             esize;
22756         uint32_t        i;
22757         uint8_t         *bp = dbuff;
22758         uint32_t        addr = entry->addr;
22759         uint32_t        cnt = entry->count;
22760 
22761         QL_PRINT_7(ha, "started, buf=%ph, addr=%xh, count=%xh\n",
22762             (void *)dbuff, entry->addr, entry->count);
22763 
22764         esize = cnt * 4;        /* addr */
22765         esize += cnt * 4;       /* data */
22766 
22767         if (dbuff == NULL) {
22768                 QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22769                 return (esize);
22770         }
22771         if (esize + dbuff >= dbuff_end) {
22772                 EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22773                 entry->h.driver_flags = (uint8_t)
22774                     (entry->h.driver_flags | SKIPPED_FLAG);
22775                 return (0);
22776         }
22777 
22778         while (cnt--) {
22779                 *bp++ = LSB(LSW(addr));
22780                 *bp++ = MSB(LSW(addr));
22781                 *bp++ = LSB(MSW(addr));
22782                 *bp++ = MSB(MSW(addr));
22783                 for (i = 0; i < 4; i++) {
22784                         *bp++ = ql_pci_config_get8(ha, addr++);
22785                 }
22786         }
22787 
22788         QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22789         return (esize);
22790 }
22791 
22792 static int
22793 ql_2700_dt_gques(ql_adapter_state_t *ha, ql_dt_gques_t *entry,
22794     uint8_t *dbuff, uint8_t *dbuff_end)
22795 {
22796         int             esize;
22797         uint32_t        q_cnt, e_cnt, data;
22798         uint8_t         *bp = dbuff;
22799 
22800         QL_PRINT_7(ha, "started, buf=%ph, num_queues=%xh, queue_type=%xh\n",
22801             (void *)dbuff, entry->num_queues, entry->queue_type);
22802 
22803         if (entry->queue_type == 1) {
22804                 ql_request_q_t  *req_q;
22805 
22806                 e_cnt = ha->rsp_queues_cnt > 1 ? 2 : 1;
22807                 esize = e_cnt * 2;      /* queue number */
22808                 esize += e_cnt * 2;     /* shadow entries */
22809 
22810                 /* shadow size */
22811                 esize += SHADOW_ENTRY_SIZE;
22812                 if (e_cnt > 1) {
22813                         esize += SHADOW_ENTRY_SIZE;
22814                 }
22815                 if (dbuff == NULL) {
22816                         QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22817                         return (esize);
22818                 }
22819                 if (esize + dbuff >= dbuff_end) {
22820                         EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22821                         entry->h.driver_flags = (uint8_t)
22822                             (entry->h.driver_flags | SKIPPED_FLAG);
22823                         return (0);
22824                 }
22825                 entry->num_queues = e_cnt;
22826 
22827                 for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
22828                         req_q = q_cnt == 0 ? ha->req_q[0] : ha->req_q[1];
22829                         e_cnt = 1;
22830                         data = ddi_get32(req_q->req_ring.acc_handle,
22831                             req_q->req_out_shadow_ptr);
22832                         *bp++ = LSB(q_cnt);
22833                         *bp++ = MSB(q_cnt);
22834                         *bp++ = LSB(e_cnt);
22835                         *bp++ = MSB(e_cnt);
22836                         *bp++ = LSB(LSW(data));
22837                         *bp++ = MSB(LSW(data));
22838                         *bp++ = LSB(MSW(data));
22839                         *bp++ = MSB(MSW(data));
22840                 }
22841         } else if (entry->queue_type == 2) {
22842                 ql_response_q_t *rsp_q;
22843 
22844                 e_cnt = ha->rsp_queues_cnt;
22845                 esize = e_cnt * 2;      /* queue number */
22846                 esize += e_cnt * 2;     /* shadow entries */
22847 
22848                 /* shadow size */
22849                 for (q_cnt = 0; q_cnt < ha->rsp_queues_cnt; q_cnt++) {
22850                         esize += SHADOW_ENTRY_SIZE;
22851                 }
22852 
22853                 if (dbuff == NULL) {
22854                         QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22855                         return (esize);
22856                 }
22857                 if (esize + dbuff >= dbuff_end) {
22858                         EL(ha, "skipped2, no buffer space, needed=%xh\n",
22859                             esize);
22860                         entry->h.driver_flags = (uint8_t)
22861                             (entry->h.driver_flags | SKIPPED_FLAG);
22862                         return (0);
22863                 }
22864                 entry->num_queues = e_cnt;
22865 
22866                 for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
22867                         rsp_q = ha->rsp_queues[q_cnt];
22868                         e_cnt = 1;
22869                         data = ddi_get32(rsp_q->rsp_ring.acc_handle,
22870                             rsp_q->rsp_in_shadow_ptr);
22871                         *bp++ = LSB(q_cnt);
22872                         *bp++ = MSB(q_cnt);
22873                         *bp++ = LSB(e_cnt);
22874                         *bp++ = MSB(e_cnt);
22875                         *bp++ = LSB(LSW(data));
22876                         *bp++ = MSB(LSW(data));
22877                         *bp++ = LSB(MSW(data));
22878                         *bp++ = MSB(MSW(data));
22879                 }
22880         } else if (entry->queue_type == 3) {
22881                 EL(ha, "skipped, no ATIO queue, esize=0\n");
22882                 if (dbuff != NULL) {
22883                         entry->num_queues = 0;
22884                         entry->h.driver_flags = (uint8_t)
22885                             (entry->h.driver_flags | SKIPPED_FLAG);
22886                 }
22887                 return (0);
22888         } else {
22889                 EL(ha, "skipped, unknown queue_type %d, esize=0\n",
22890                     entry->queue_type);
22891                 if (dbuff != NULL) {
22892                         entry->h.driver_flags = (uint8_t)
22893                             (entry->h.driver_flags | SKIPPED_FLAG);
22894                 }
22895                 return (0);
22896         }
22897 
22898         QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22899         return (esize);
22900 }
22901 
22902 static int
22903 ql_2700_dt_wdmp(ql_adapter_state_t *ha, ql_dt_wdmp_t *entry,
22904     uint8_t *dbuff, uint8_t *dbuff_end)
22905 {
22906         int             esize;
22907         uint8_t         *bp = dbuff;
22908         uint32_t        data, cnt = entry->length, *dp = entry->data;
22909 
22910         QL_PRINT_7(ha, "started, buf=%ph, length=%xh\n",
22911             (void *)dbuff, entry->length);
22912 
22913         esize = cnt;
22914         if (dbuff == NULL) {
22915                 QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22916                 return (esize);
22917         }
22918         if (esize + dbuff >= dbuff_end) {
22919                 EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22920                 entry->h.driver_flags = (uint8_t)
22921                     (entry->h.driver_flags | SKIPPED_FLAG);
22922                 return (0);
22923         }
22924 
22925         while (cnt--) {
22926                 data = *dp++;
22927                 *bp++ = LSB(LSW(data));
22928                 *bp++ = MSB(LSW(data));
22929                 *bp++ = LSB(MSW(data));
22930                 *bp++ = MSB(MSW(data));
22931         }
22932         QL_PRINT_7(ha, "%s\n", dbuff);
22933 
22934         QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22935         return (esize);
22936 }
22937 
22938 /*
22939  * ql_2700_dump_ram
22940  *      Dumps RAM.
22941  *      Risc interrupts must be disabled when this routine is called.
22942  *
22943  * Input:
22944  *      ha:             adapter state pointer.
22945  *      cmd:            MBC_DUMP_RAM_EXTENDED/MBC_MPI_RAM.
22946  *      risc_address:   RISC code start address.
22947  *      len:            Number of words.
22948  *      bp:             buffer pointer.
22949  *
22950  * Returns:
22951  *      ql local function return status code.
22952  *
22953  * Context:
22954  *      Interrupt or Kernel context, no mailbox commands allowed.
22955  */
22956 static int
22957 ql_2700_dump_ram(ql_adapter_state_t *ha, uint16_t cmd, uint32_t risc_address,
22958     uint32_t len, uint8_t *bp)
22959 {
22960         dma_mem_t       mem;
22961         uint32_t        i, stat, timer;
22962         uint8_t         *dp;
22963         int             rval = QL_SUCCESS;
22964 
22965         QL_PRINT_7(ha, "started, cmd=%xh, risc_address=%xh, len=%xh, "
22966             "bp=%ph\n", cmd, risc_address, len, (void *)bp);
22967 
22968         mem.size = len * 4;
22969         mem.type = LITTLE_ENDIAN_DMA;
22970         mem.max_cookie_count = 1;
22971         mem.alignment = 8;
22972         if ((rval = ql_alloc_phys(ha, &mem, KM_SLEEP)) != QL_SUCCESS) {
22973                 EL(ha, "alloc status=%xh\n", rval);
22974                 return (rval);
22975         }
22976 
22977         WRT16_IO_REG(ha, mailbox_in[0], cmd);
22978         WRT16_IO_REG(ha, mailbox_in[1], LSW(risc_address));
22979         WRT16_IO_REG(ha, mailbox_in[2], MSW(LSD(mem.cookie.dmac_laddress)));
22980         WRT16_IO_REG(ha, mailbox_in[3], LSW(LSD(mem.cookie.dmac_laddress)));
22981         WRT16_IO_REG(ha, mailbox_in[4], MSW(len));
22982         WRT16_IO_REG(ha, mailbox_in[5], LSW(len));
22983         WRT16_IO_REG(ha, mailbox_in[6], MSW(MSD(mem.cookie.dmac_laddress)));
22984         WRT16_IO_REG(ha, mailbox_in[7], LSW(MSD(mem.cookie.dmac_laddress)));
22985         WRT16_IO_REG(ha, mailbox_in[8], MSW(risc_address));
22986         if (cmd == MBC_MPI_RAM) {
22987                 WRT16_IO_REG(ha, mailbox_in[9], BIT_0);
22988         }
22989 
22990         WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
22991         for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
22992                 stat = RD32_IO_REG(ha, risc2host);
22993                 if (stat & RH_RISC_INT) {
22994                         stat &= 0xff;
22995                         if ((stat == 1) || (stat == 0x10)) {
22996                                 break;
22997                         } else if ((stat == 2) || (stat == 0x11)) {
22998                                 rval = RD16_IO_REG(ha, mailbox_out[0]);
22999                                 break;
23000                         }
23001                         WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
23002                 }
23003                 drv_usecwait(5);
23004         }
23005         WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
23006 
23007         if (timer == 0) {
23008                 QL_PRINT_7(ha, "timeout addr=%xh\n", risc_address);
23009                 rval = QL_FUNCTION_TIMEOUT;
23010         } else {
23011                 (void) ddi_dma_sync(mem.dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
23012                 dp = mem.bp;
23013                 for (i = 0; i < mem.size; i++) {
23014                         *bp++ = *dp++;
23015                 }
23016         }
23017 
23018         ql_free_phys(ha, &mem);
23019 
23020         QL_PRINT_7(ha, "done\n");
23021         return (rval);
23022 }
 |