1 /*
   2  * This file and its contents are supplied under the terms of the
   3  * Common Development and Distribution License ("CDDL"), version 1.0.
   4  * You may only use this file in accordance with the terms of version
   5  * 1.0 of the CDDL.
   6  *
   7  * A full copy of the text of the CDDL should have accompanied this
   8  * source.  A copy of the CDDL is also available via the Internet at
   9  * http://www.illumos.org/license/CDDL.
  10  */
  11 
  12 /*
  13  * Copyright 2018 Nexenta Systems, Inc.
  14  */
  15 
  16 /*
  17  * This file contains the start up code to initialize the HBA for use
  18  * with the PQI interface.
  19  */
  20 #include <smartpqi.h>
  21 
  22 #define PQI_DEVICE_SIGNATURE                    "PQI DREG"
  23 #define PQI_STATUS_IDLE                         0x0
  24 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY    0x2
  25 
  26 typedef struct _func_list_ {
  27         char            *func_name;
  28         boolean_t       (*func)(pqi_state_t);
  29 } func_list_t;
  30 
  31 /* BEGIN CSTYLED */
  32 #define FORWARD_DECLS() \
  33         item(pqi_calculate_io_resources) \
  34         item(pqi_check_alloc) \
  35         item(pqi_wait_for_mode_ready) \
  36         item(save_ctrl_mode_pqi) \
  37         item(pqi_process_config_table) \
  38         item(pqi_alloc_admin_queue) \
  39         item(pqi_create_admin_queues) \
  40         item(pqi_report_device_capability) \
  41         item(pqi_valid_device_capability) \
  42         item(pqi_calculate_queue_resources) \
  43         item(pqi_alloc_io_resource) \
  44         item(pqi_alloc_operation_queues) \
  45         item(pqi_init_operational_queues) \
  46         item(pqi_create_queues) \
  47         item(pqi_change_irq_mode) \
  48         item(pqi_start_heartbeat_timer) \
  49         item(pqi_enable_events) \
  50         item(pqi_get_hba_version) \
  51         item(pqi_version_to_hba) \
  52         item(pqi_schedule_update_time_worker) \
  53         item(pqi_scan_scsi_devices)
  54 
  55 #define item(a) static boolean_t a(pqi_state_t);
  56 FORWARD_DECLS()
  57 #undef item
  58 /* END CSTYLED */
  59 
  60 #define STARTUP_FUNCS \
  61     item(sis_wait_for_ctrl_ready) \
  62     item(sis_get_ctrl_props) \
  63     item(sis_get_pqi_capabilities) \
  64     item(pqi_calculate_io_resources) \
  65     item(pqi_check_alloc) \
  66     item(sis_init_base_struct_addr) \
  67     item(pqi_wait_for_mode_ready) \
  68     item(save_ctrl_mode_pqi) \
  69     item(pqi_process_config_table) \
  70     item(pqi_alloc_admin_queue) \
  71     item(pqi_create_admin_queues) \
  72     item(pqi_report_device_capability) \
  73     item(pqi_valid_device_capability) \
  74     item(pqi_calculate_queue_resources) \
  75     item(pqi_alloc_io_resource) \
  76     item(pqi_alloc_operation_queues) \
  77     item(pqi_init_operational_queues) \
  78     item(pqi_create_queues) \
  79     item(pqi_change_irq_mode) \
  80     item(pqi_start_heartbeat_timer) \
  81     item(pqi_enable_events) \
  82     item(pqi_get_hba_version) \
  83     item(pqi_version_to_hba) \
  84     item(pqi_schedule_update_time_worker) \
  85     item(pqi_scan_scsi_devices) \
  86 
  87 func_list_t startup_funcs[] =
  88 {
  89 #define item(a) { #a, a },
  90         STARTUP_FUNCS
  91 #undef item
  92         NULL, NULL
  93 };
  94 
  95 /* ---- Forward declarations for utility functions ---- */
  96 static void bcopy_fromregs(pqi_state_t s, uint8_t *iomem, uint8_t *dst,
  97     uint32_t len);
  98 static boolean_t submit_admin_rqst_sync(pqi_state_t s,
  99     pqi_general_admin_request_t *rqst, pqi_general_admin_response_t *rsp);
 100 static boolean_t create_event_queue(pqi_state_t s);
 101 static boolean_t create_queue_group(pqi_state_t s, int idx);
 102 static boolean_t submit_raid_rqst_sync(pqi_state_t s, pqi_iu_header_t *rqst,
 103     pqi_raid_error_info_t e_info);
 104 static boolean_t identify_controller(pqi_state_t s,
 105     bmic_identify_controller_t *ident);
 106 static boolean_t write_host_wellness(pqi_state_t s, void *buf, size_t len);
 107 static boolean_t get_device_list(pqi_state_t s, report_phys_lun_extended_t **pl,
 108     report_log_lun_extended_t **ll);
 109 static boolean_t build_raid_path_request(pqi_raid_path_request_t *rqst, int cmd,
 110     caddr_t lun, uint32_t len, int vpd_page);
 111 static boolean_t identify_physical_device(pqi_state_t s, pqi_device_t devp,
 112     bmic_identify_physical_device_t *buf);
 113 static pqi_device_t create_phys_dev(pqi_state_t s,
 114     report_phys_lun_extended_entry_t *e);
 115 static pqi_device_t create_logical_dev(pqi_state_t s,
 116     report_log_lun_extended_entry_t *e);
 117 static boolean_t is_new_dev(pqi_state_t s, pqi_device_t new_dev);
 118 static boolean_t revert_to_sis(pqi_state_t s);
 119 static void save_ctrl_mode(pqi_state_t s, int mode);
 120 static boolean_t scsi_common(pqi_state_t s, pqi_raid_path_request_t *rqst,
 121     caddr_t buf, int len);
 122 static void update_time(void *v);
 123 
 124 static int reset_devices = 1;
 125 
 126 boolean_t
 127 pqi_check_firmware(pqi_state_t s)
 128 {
 129         uint32_t        status;
 130 
 131         status = G32(s, sis_firmware_status);
 132         if (status & SIS_CTRL_KERNEL_PANIC)
 133                 return (B_FALSE);
 134 
 135         if (sis_read_scratch(s) == SIS_MODE)
 136                 return (B_TRUE);
 137 
 138         if (status & SIS_CTRL_KERNEL_UP) {
 139                 sis_write_scratch(s, SIS_MODE);
 140                 return (B_TRUE);
 141         } else {
 142                 return (revert_to_sis(s));
 143         }
 144 }
 145 
 146 boolean_t
 147 pqi_prep_full(pqi_state_t s)
 148 {
 149         func_list_t     *f;
 150 
 151         for (f = startup_funcs; f->func_name != NULL; f++)
 152                 if (f->func(s) == B_FALSE)
 153                         return (B_FALSE);
 154 
 155         return (B_TRUE);
 156 }
 157 
 158 /*
 159  * []----------------------------------------------------------[]
 160  * | Startup functions called in sequence to initialize HBA.    |
 161  * []----------------------------------------------------------[]
 162  */
 163 
 164 static boolean_t
 165 pqi_calculate_io_resources(pqi_state_t s)
 166 {
 167         uint32_t        max_xfer_size;
 168         uint32_t        max_sg_entries;
 169 
 170         s->s_max_io_slots = min(PQI_MAX_OUTSTANDING_REQUESTS,
 171             s->s_max_outstanding_requests);
 172 
 173         max_xfer_size = min(s->s_max_xfer_size, PQI_MAX_TRANSFER_SIZE);
 174 
 175         /* ---- add 1 when buf is not page aligned ---- */
 176         max_sg_entries = max_xfer_size / PAGESIZE + 1;
 177         max_sg_entries = min(max_sg_entries, s->s_max_sg_entries);
 178         max_xfer_size = (max_sg_entries - 1) * PAGESIZE;
 179 
 180         s->s_sg_chain_buf_length = (max_sg_entries * sizeof (pqi_sg_entry_t)) +
 181             PQI_EXTRA_SGL_MEMORY;
 182 
 183         s->s_max_sectors = max_xfer_size / 512;
 184 
 185         return (B_TRUE);
 186 }
 187 
 188 static boolean_t
 189 pqi_check_alloc(pqi_state_t s)
 190 {
 191         if (s->s_max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
 192                 s->s_max_outstanding_requests = PQI_MAX_OUTSTANDING_REQUESTS;
 193 
 194         s->s_error_dma = pqi_alloc_single(s, (s->s_max_outstanding_requests *
 195             PQI_ERROR_BUFFER_ELEMENT_LENGTH) + SIS_BASE_STRUCT_ALIGNMENT);
 196         if (s->s_error_dma == NULL)
 197                 return (B_FALSE);
 198 
 199         return (B_TRUE);
 200 }
 201 
 202 #define MILLISECOND     1000
 203 #define MS_TO_SEC       1000
 204 
 205 static boolean_t
 206 pqi_wait_for_mode_ready(pqi_state_t s)
 207 {
 208         uint64_t        signature;
 209         int32_t         count = MS_TO_SEC;
 210 
 211         for (;;) {
 212                 signature = G64(s, pqi_registers.signature);
 213                 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
 214                     sizeof (signature)) == 0)
 215                         break;
 216                 if (count-- == 0)
 217                         return (B_FALSE);
 218                 drv_usecwait(MILLISECOND);
 219         }
 220 
 221         count = MS_TO_SEC;
 222         for (;;) {
 223                 if (G64(s, pqi_registers.function_and_status_code) ==
 224                     PQI_STATUS_IDLE)
 225                         break;
 226                 if (count-- == 0)
 227                         return (B_FALSE);
 228                 drv_usecwait(MILLISECOND);
 229         }
 230 
 231         count = MS_TO_SEC;
 232         for (;;) {
 233                 if (G32(s, pqi_registers.device_status) ==
 234                     PQI_DEVICE_STATE_ALL_REGISTERS_READY)
 235                         break;
 236                 if (count-- == 0)
 237                         return (B_FALSE);
 238                 drv_usecwait(MILLISECOND);
 239         }
 240 
 241         return (B_TRUE);
 242 }
 243 
 244 static boolean_t
 245 save_ctrl_mode_pqi(pqi_state_t s)
 246 {
 247         save_ctrl_mode(s, PQI_MODE);
 248         return (B_TRUE);
 249 }
 250 
 251 static boolean_t
 252 pqi_process_config_table(pqi_state_t s)
 253 {
 254         pqi_config_table_t                      *c_table;
 255         pqi_config_table_section_header_t       *section;
 256         uint32_t                                section_offset;
 257 
 258         c_table = PQI_ZALLOC(s->s_config_table_len, KM_SLEEP);
 259         bcopy_fromregs(s, (uint8_t *)s->s_reg + s->s_config_table_offset,
 260             (uint8_t *)c_table, s->s_config_table_len);
 261 
 262         section_offset = c_table->first_section_offset;
 263         while (section_offset) {
 264                 section = (pqi_config_table_section_header_t *)
 265                     ((caddr_t)c_table + section_offset);
 266                 switch (section->section_id) {
 267                 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
 268                         /* LINTED E_BAD_PTR_CAST_ALIGN */
 269                         s->s_heartbeat_counter = (uint32_t *)
 270                             ((caddr_t)s->s_reg +
 271                             s->s_config_table_offset + section_offset +
 272                             offsetof(struct pqi_config_table_heartbeat,
 273                             heartbeat_counter));
 274                         break;
 275                 }
 276                 section_offset = section->next_section_offset;
 277         }
 278         PQI_FREE(c_table, s->s_config_table_len);
 279         return (B_TRUE);
 280 }
 281 
 282 static boolean_t
 283 pqi_alloc_admin_queue(pqi_state_t s)
 284 {
 285         pqi_admin_queues_t              *aq;
 286         pqi_admin_queues_aligned_t      *aq_aligned;
 287         int                             len;
 288 
 289         len = sizeof (*aq_aligned) + PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
 290         if ((s->s_adminq_dma = pqi_alloc_single(s, len)) == NULL)
 291                 return (B_FALSE);
 292         (void) memset(s->s_adminq_dma->alloc_memory, 0,
 293             s->s_adminq_dma->len_to_alloc);
 294         (void) ddi_dma_sync(s->s_adminq_dma->handle, 0,
 295             s->s_adminq_dma->len_to_alloc, DDI_DMA_SYNC_FORDEV);
 296 
 297         aq = &s->s_admin_queues;
 298         aq_aligned = PQIALIGN_TYPED(s->s_adminq_dma->alloc_memory,
 299             PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, pqi_admin_queues_aligned_t *);
 300         aq->iq_element_array = (caddr_t)&aq_aligned->iq_element_array;
 301         aq->oq_element_array = (caddr_t)&aq_aligned->oq_element_array;
 302         aq->iq_ci = &aq_aligned->iq_ci;
 303         aq->oq_pi = &aq_aligned->oq_pi;
 304 
 305         aq->iq_element_array_bus_addr = s->s_adminq_dma->dma_addr +
 306             ((uintptr_t)aq->iq_element_array -
 307             (uintptr_t)s->s_adminq_dma->alloc_memory);
 308         aq->oq_element_array_bus_addr = s->s_adminq_dma->dma_addr +
 309             ((uintptr_t)aq->oq_element_array -
 310             (uintptr_t)s->s_adminq_dma->alloc_memory);
 311 
 312         aq->iq_ci_bus_addr = s->s_adminq_dma->dma_addr +
 313             ((uintptr_t)aq->iq_ci - (uintptr_t)s->s_adminq_dma->alloc_memory);
 314         aq->oq_pi_bus_addr = s->s_adminq_dma->dma_addr +
 315             ((uintptr_t)aq->oq_pi - (uintptr_t)s->s_adminq_dma->alloc_memory);
 316         return (B_TRUE);
 317 }
 318 
 319 static boolean_t
 320 pqi_create_admin_queues(pqi_state_t s)
 321 {
 322         pqi_admin_queues_t *aq = &s->s_admin_queues;
 323         int                     val;
 324         int                     status;
 325         int                     countdown = 1000;
 326 
 327         S64(s, pqi_registers.admin_iq_element_array_addr,
 328             aq->iq_element_array_bus_addr);
 329         S64(s, pqi_registers.admin_oq_element_array_addr,
 330             aq->oq_element_array_bus_addr);
 331         S64(s, pqi_registers.admin_iq_ci_addr,
 332             aq->iq_ci_bus_addr);
 333         S64(s, pqi_registers.admin_oq_pi_addr,
 334             aq->oq_pi_bus_addr);
 335 
 336         val = PQI_ADMIN_IQ_NUM_ELEMENTS | PQI_ADMIN_OQ_NUM_ELEMENTS << 8 |
 337             aq->int_msg_num << 16;
 338         S32(s, pqi_registers.admin_queue_params, val);
 339         S64(s, pqi_registers.function_and_status_code,
 340             PQI_CREATE_ADMIN_QUEUE_PAIR);
 341 
 342         while (countdown-- > 0) {
 343                 status = G64(s, pqi_registers.function_and_status_code);
 344                 if (status == PQI_STATUS_IDLE)
 345                         break;
 346                 drv_usecwait(1000);     /* ---- Wait 1ms ---- */
 347         }
 348         if (countdown == 0)
 349                 return (B_FALSE);
 350 
 351         /*
 352          * The offset registers are not initialized to the correct
 353          * offsets until *after* the create admin queue pair command
 354          * completes successfully.
 355          */
 356         aq->iq_pi = (void *)(intptr_t)((intptr_t)s->s_reg +
 357             PQI_DEVICE_REGISTERS_OFFSET +
 358             G64(s, pqi_registers.admin_iq_pi_offset));
 359         ASSERT((G64(s, pqi_registers.admin_iq_pi_offset) +
 360             PQI_DEVICE_REGISTERS_OFFSET) < 0x8000);
 361 
 362         aq->oq_ci = (void *)(intptr_t)((intptr_t)s->s_reg +
 363             PQI_DEVICE_REGISTERS_OFFSET +
 364             G64(s, pqi_registers.admin_oq_ci_offset));
 365         ASSERT((G64(s, pqi_registers.admin_oq_ci_offset) +
 366             PQI_DEVICE_REGISTERS_OFFSET) < 0x8000);
 367 
 368         return (B_TRUE);
 369 }
 370 
 371 static boolean_t
 372 pqi_report_device_capability(pqi_state_t s)
 373 {
 374         pqi_general_admin_request_t     rqst;
 375         pqi_general_admin_response_t    rsp;
 376         pqi_device_capability_t         *cap;
 377         pqi_iu_layer_descriptor_t       *iu_layer;
 378         pqi_dma_overhead_t              *dma;
 379         boolean_t                       rval;
 380         pqi_sg_entry_t                  *sg;
 381 
 382         (void) memset(&rqst, 0, sizeof (rqst));
 383 
 384         rqst.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
 385         rqst.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
 386         rqst.function_code =
 387             PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
 388         rqst.data.report_device_capability.buffer_length =
 389             sizeof (*cap);
 390 
 391         if ((dma = pqi_alloc_single(s, sizeof (*cap))) == NULL)
 392                 return (B_FALSE);
 393 
 394         sg = &rqst.data.report_device_capability.sg_descriptor;
 395         sg->sg_addr = dma->dma_addr;
 396         sg->sg_len = dma->len_to_alloc;
 397         sg->sg_flags = CISS_SG_LAST;
 398 
 399         rval = submit_admin_rqst_sync(s, &rqst, &rsp);
 400         (void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
 401         cap = (pqi_device_capability_t *)dma->alloc_memory;
 402 
 403         s->s_max_inbound_queues = cap->max_inbound_queues;
 404         s->s_max_elements_per_iq = cap->max_elements_per_iq;
 405         s->s_max_iq_element_length = cap->max_iq_element_length * 16;
 406         s->s_max_outbound_queues = cap->max_outbound_queues;
 407         s->s_max_elements_per_oq = cap->max_elements_per_oq;
 408         s->s_max_oq_element_length = cap->max_oq_element_length * 16;
 409 
 410         iu_layer = &cap->iu_layer_descriptors[PQI_PROTOCOL_SOP];
 411         s->s_max_inbound_iu_length_per_firmware =
 412             iu_layer->max_inbound_iu_length;
 413         s->s_inbound_spanning_supported = iu_layer->inbound_spanning_supported;
 414         s->s_outbound_spanning_supported =
 415             iu_layer->outbound_spanning_supported;
 416 
 417         pqi_free_single(s, dma);
 418         return (rval);
 419 }
 420 
 421 static boolean_t
 422 pqi_valid_device_capability(pqi_state_t s)
 423 {
 424         if (s->s_max_iq_element_length < PQI_OPERATIONAL_IQ_ELEMENT_LENGTH)
 425                 return (B_FALSE);
 426         if (s->s_max_oq_element_length < PQI_OPERATIONAL_OQ_ELEMENT_LENGTH)
 427                 return (B_FALSE);
 428         if (s->s_max_inbound_iu_length_per_firmware <
 429             PQI_OPERATIONAL_IQ_ELEMENT_LENGTH)
 430                 return (B_FALSE);
 431         /* ---- Controller doesn't support spanning but we need it ---- */
 432         if (!s->s_inbound_spanning_supported)
 433                 return (B_FALSE);
 434         /* ---- Controller wants outbound spanning, the driver doesn't ---- */
 435         if (s->s_outbound_spanning_supported)
 436                 return (B_FALSE);
 437 
 438         return (B_TRUE);
 439 }
 440 
 441 static boolean_t
 442 pqi_calculate_queue_resources(pqi_state_t s)
 443 {
 444         int     max_queue_groups;
 445         int     num_queue_groups;
 446         int     num_elements_per_iq;
 447         int     num_elements_per_oq;
 448 
 449         if (reset_devices) {
 450                 num_queue_groups = 1;
 451         } else {
 452                 max_queue_groups = min(s->s_max_inbound_queues / 2,
 453                     s->s_max_outbound_queues - 1);
 454                 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
 455 
 456                 num_queue_groups = min(ncpus, s->s_intr_cnt);
 457                 num_queue_groups = min(num_queue_groups, max_queue_groups);
 458         }
 459         s->s_num_queue_groups = num_queue_groups;
 460 
 461         s->s_max_inbound_iu_length =
 462             (s->s_max_inbound_iu_length_per_firmware /
 463             PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
 464             PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
 465 
 466         num_elements_per_iq = s->s_max_inbound_iu_length /
 467             PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
 468         /* ---- add one because one element in each queue is unusable ---- */
 469         num_elements_per_iq++;
 470 
 471         num_elements_per_iq = min(num_elements_per_iq,
 472             s->s_max_elements_per_iq);
 473 
 474         num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
 475         num_elements_per_oq = min(num_elements_per_oq,
 476             s->s_max_elements_per_oq);
 477 
 478         s->s_num_elements_per_iq = num_elements_per_iq;
 479         s->s_num_elements_per_oq = num_elements_per_oq;
 480 
 481         s->s_max_sg_per_iu = ((s->s_max_inbound_iu_length -
 482             PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
 483             sizeof (struct pqi_sg_entry)) +
 484             PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
 485         return (B_TRUE);
 486 }
 487 
 488 static boolean_t
 489 pqi_alloc_io_resource(pqi_state_t s)
 490 {
 491         pqi_io_request_t        *io;
 492         size_t                  sg_chain_len;
 493         int                     i;
 494 
 495         s->s_io_rqst_pool = PQI_ZALLOC(s->s_max_io_slots * sizeof (*io),
 496             KM_SLEEP);
 497 
 498         sg_chain_len = s->s_sg_chain_buf_length;
 499         io = s->s_io_rqst_pool;
 500         for (i = 0; i < s->s_max_io_slots; i++) {
 501                 io->io_iu = PQI_ZALLOC(s->s_max_inbound_iu_length, KM_SLEEP);
 502 
 503                 /*
 504                  * TODO: Don't allocate dma space here. Move this to
 505                  * init_pkt when it's clear the data being transferred
 506                  * will not fit in the four SG slots provided by each
 507                  * command.
 508                  */
 509                 io->io_sg_chain_dma = pqi_alloc_single(s, sg_chain_len);
 510                 if (io->io_sg_chain_dma == NULL)
 511                         goto error_out;
 512 
 513                 list_link_init(&io->io_list_node);
 514                 io->io_index = (uint16_t)i;
 515                 io->io_softc = s;
 516                 io++;
 517         }
 518 
 519         return (B_TRUE);
 520 
 521 error_out:
 522         for (i = 0; i < s->s_max_io_slots; i++) {
 523                 if (io->io_iu != NULL) {
 524                         PQI_FREE(io->io_iu, s->s_max_inbound_iu_length);
 525                         io->io_iu = NULL;
 526                 }
 527                 if (io->io_sg_chain_dma != NULL) {
 528                         pqi_free_single(s, io->io_sg_chain_dma);
 529                         io->io_sg_chain_dma = NULL;
 530                 }
 531         }
 532         PQI_FREE(s->s_io_rqst_pool, s->s_max_io_slots * sizeof (*io));
 533         s->s_io_rqst_pool = NULL;
 534 
 535         return (B_FALSE);
 536 }
 537 
 538 static boolean_t
 539 pqi_alloc_operation_queues(pqi_state_t s)
 540 {
 541         uint32_t        niq = s->s_num_queue_groups * 2;
 542         uint32_t        noq = s->s_num_queue_groups;
 543         uint32_t        queue_idx = (s->s_num_queue_groups * 3) + 1;
 544         uint32_t        i;
 545         size_t          array_len_iq;
 546         size_t          array_len_oq;
 547         size_t          alloc_len;
 548         caddr_t         aligned_pointer = NULL;
 549         pqi_queue_group_t       *qg;
 550 
 551         array_len_iq = PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
 552             s->s_num_elements_per_iq;
 553         array_len_oq = PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
 554             s->s_num_elements_per_oq;
 555 
 556         for (i = 0; i < niq; i++) {
 557                 aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
 558                     PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
 559                 aligned_pointer += array_len_iq;
 560         }
 561 
 562         for (i = 0; i < noq; i++) {
 563                 aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
 564                     PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
 565                 aligned_pointer += array_len_oq;
 566         }
 567 
 568         aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
 569             PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
 570         aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
 571             PQI_EVENT_OQ_ELEMENT_LENGTH;
 572 
 573         for (i = 0; i < queue_idx; i++) {
 574                 aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
 575                     PQI_OPERATIONAL_INDEX_ALIGNMENT, caddr_t);
 576                 aligned_pointer += sizeof (pqi_index_t);
 577         }
 578 
 579         alloc_len = (size_t)aligned_pointer +
 580             PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT + PQI_EXTRA_SGL_MEMORY;
 581         if ((s->s_queue_dma = pqi_alloc_single(s, alloc_len)) == NULL)
 582                 return (B_FALSE);
 583 
 584         aligned_pointer = PQIALIGN_TYPED(s->s_queue_dma->alloc_memory,
 585             PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
 586         for (i = 0; i < s->s_num_queue_groups; i++) {
 587                 qg = &s->s_queue_groups[i];
 588 
 589                 qg->iq_element_array[RAID_PATH] = aligned_pointer;
 590                 qg->iq_element_array_bus_addr[RAID_PATH] =
 591                     s->s_queue_dma->dma_addr +
 592                     ((uintptr_t)aligned_pointer -
 593                     (uintptr_t)s->s_queue_dma->alloc_memory);
 594 
 595                 aligned_pointer += array_len_iq;
 596                 aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
 597                     PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
 598 
 599                 qg->iq_element_array[AIO_PATH] = aligned_pointer;
 600                 qg->iq_element_array_bus_addr[AIO_PATH] =
 601                     s->s_queue_dma->dma_addr +
 602                     ((uintptr_t)aligned_pointer -
 603                     (uintptr_t)s->s_queue_dma->alloc_memory);
 604 
 605                 aligned_pointer += array_len_iq;
 606                 aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
 607                     PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
 608         }
 609         for (i = 0; i < s->s_num_queue_groups; i++) {
 610                 qg = &s->s_queue_groups[i];
 611 
 612                 qg->oq_element_array = aligned_pointer;
 613                 qg->oq_element_array_bus_addr =
 614                     s->s_queue_dma->dma_addr +
 615                     ((uintptr_t)aligned_pointer -
 616                     (uintptr_t)s->s_queue_dma->alloc_memory);
 617 
 618                 aligned_pointer += array_len_oq;
 619                 aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
 620                     PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
 621         }
 622 
 623         s->s_event_queue.oq_element_array = aligned_pointer;
 624         s->s_event_queue.oq_element_array_bus_addr =
 625             s->s_queue_dma->dma_addr +
 626             ((uintptr_t)aligned_pointer -
 627             (uintptr_t)s->s_queue_dma->alloc_memory);
 628         aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
 629             PQI_EVENT_OQ_ELEMENT_LENGTH;
 630 
 631         aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
 632             PQI_OPERATIONAL_INDEX_ALIGNMENT, caddr_t);
 633 
 634         for (i = 0; i < s->s_num_queue_groups; i++) {
 635                 qg = &s->s_queue_groups[i];
 636 
 637                 /* LINTED E_BAD_PTR_CAST_ALIGN */
 638                 qg->iq_ci[RAID_PATH] = (pqi_index_t *)aligned_pointer;
 639                 qg->iq_ci_bus_addr[RAID_PATH] =
 640                     s->s_queue_dma->dma_addr +
 641                     ((uintptr_t)aligned_pointer -
 642                     (uintptr_t)s->s_queue_dma->alloc_memory);
 643 
 644                 aligned_pointer += sizeof (pqi_index_t);
 645                 aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
 646                     PQI_OPERATIONAL_INDEX_ALIGNMENT, caddr_t);
 647 
 648                 /* LINTED E_BAD_PTR_CAST_ALIGN */
 649                 qg->iq_ci[AIO_PATH] = (pqi_index_t *)aligned_pointer;
 650                 qg->iq_ci_bus_addr[AIO_PATH] =
 651                     s->s_queue_dma->dma_addr +
 652                     ((uintptr_t)aligned_pointer -
 653                     (uintptr_t)s->s_queue_dma->alloc_memory);
 654 
 655                 aligned_pointer += sizeof (pqi_index_t);
 656                 aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
 657                     PQI_OPERATIONAL_INDEX_ALIGNMENT, caddr_t);
 658 
 659                 /* LINTED E_BAD_PTR_CAST_ALIGN */
 660                 qg->oq_pi = (pqi_index_t *)aligned_pointer;
 661                 qg->oq_pi_bus_addr =
 662                     s->s_queue_dma->dma_addr +
 663                     ((uintptr_t)aligned_pointer -
 664                     (uintptr_t)s->s_queue_dma->alloc_memory);
 665 
 666                 aligned_pointer += sizeof (pqi_index_t);
 667                 aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
 668                     PQI_OPERATIONAL_INDEX_ALIGNMENT, caddr_t);
 669         }
 670 
 671         /* LINTED E_BAD_PTR_CAST_ALIGN */
 672         s->s_event_queue.oq_pi = (pqi_index_t *)aligned_pointer;
 673         s->s_event_queue.oq_pi_bus_addr =
 674             s->s_queue_dma->dma_addr +
 675             ((uintptr_t)aligned_pointer -
 676             (uintptr_t)s->s_queue_dma->alloc_memory);
 677         ASSERT((uintptr_t)aligned_pointer -
 678             (uintptr_t)s->s_queue_dma->alloc_memory +
 679             sizeof (pqi_index_t) <= s->s_queue_dma->len_to_alloc);
 680 
 681         return (B_TRUE);
 682 }
 683 
 684 static boolean_t
 685 pqi_init_operational_queues(pqi_state_t s)
 686 {
 687         int             i;
 688         uint16_t        iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
 689         uint16_t        oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
 690 
 691         for (i = 0; i < s->s_num_queue_groups; i++) {
 692                 s->s_queue_groups[i].qg_softc = s;
 693         }
 694         s->s_event_queue.oq_id = oq_id++;
 695         for (i = 0; i < s->s_num_queue_groups; i++) {
 696                 s->s_queue_groups[i].iq_id[RAID_PATH] = iq_id++;
 697                 s->s_queue_groups[i].iq_id[AIO_PATH] = iq_id++;
 698                 s->s_queue_groups[i].oq_id = oq_id++;
 699                 s->s_queue_groups[i].qg_active = B_TRUE;
 700         }
 701         s->s_event_queue.int_msg_num = 0;
 702         for (i = 0; i < s->s_num_queue_groups; i++)
 703                 s->s_queue_groups[i].int_msg_num = (uint16_t)i;
 704 
 705         for (i = 0; i < s->s_num_queue_groups; i++) {
 706                 mutex_init(&s->s_queue_groups[i].submit_lock[0], NULL,
 707                     MUTEX_DRIVER, NULL);
 708                 mutex_init(&s->s_queue_groups[i].submit_lock[1], NULL,
 709                     MUTEX_DRIVER, NULL);
 710                 list_create(&s->s_queue_groups[i].request_list[RAID_PATH],
 711                     sizeof (pqi_io_request_t),
 712                     offsetof(struct pqi_io_request, io_list_node));
 713                 list_create(&s->s_queue_groups[i].request_list[AIO_PATH],
 714                     sizeof (pqi_io_request_t),
 715                     offsetof(struct pqi_io_request, io_list_node));
 716         }
 717         return (B_TRUE);
 718 }
 719 
 720 static boolean_t
 721 pqi_create_queues(pqi_state_t s)
 722 {
 723         int     i;
 724 
 725         if (create_event_queue(s) == B_FALSE)
 726                 return (B_FALSE);
 727 
 728         for (i = 0; i < s->s_num_queue_groups; i++) {
 729                 if (create_queue_group(s, i) == B_FALSE) {
 730                         return (B_FALSE);
 731                 }
 732         }
 733 
 734         return (B_TRUE);
 735 }
 736 
 737 static boolean_t
 738 pqi_change_irq_mode(pqi_state_t s)
 739 {
 740         /* ---- Device already is in MSIX mode ---- */
 741         s->s_intr_ready = 1;
 742         return (B_TRUE);
 743 }
 744 
 745 static boolean_t
 746 pqi_start_heartbeat_timer(pqi_state_t s)
 747 {
 748         s->s_last_heartbeat_count = 0;
 749         s->s_last_intr_count = 0;
 750 
 751         s->s_watchdog = timeout(pqi_watchdog, s, drv_usectohz(WATCHDOG));
 752         return (B_TRUE);
 753 }
 754 
 755 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
 756         (offsetof(struct pqi_event_config, descriptors) + \
 757         (PQI_MAX_EVENT_DESCRIPTORS * sizeof (pqi_event_descriptor_t)))
 758 
 759 static boolean_t
 760 pqi_enable_events(pqi_state_t s)
 761 {
 762         int                     i;
 763         pqi_event_config_t      *ec;
 764         pqi_event_descriptor_t  *desc;
 765         pqi_general_mgmt_rqst_t rqst;
 766         pqi_dma_overhead_t      *dma;
 767         pqi_sg_entry_t          *sg;
 768         boolean_t               rval = B_FALSE;
 769 
 770         (void) memset(&rqst, 0, sizeof (rqst));
 771         dma = pqi_alloc_single(s, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH);
 772         if (dma == NULL)
 773                 return (B_FALSE);
 774 
 775         rqst.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
 776         rqst.header.iu_length = offsetof(struct pqi_general_management_request,
 777             data.report_event_configuration.sg_descriptors[1]) -
 778             PQI_REQUEST_HEADER_LENGTH;
 779         rqst.data.report_event_configuration.buffer_length =
 780             PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH;
 781         sg = &rqst.data.report_event_configuration.sg_descriptors[0];
 782         sg->sg_addr = dma->dma_addr;
 783         sg->sg_len = dma->len_to_alloc;
 784         sg->sg_flags = CISS_SG_LAST;
 785 
 786         if (submit_raid_rqst_sync(s, &rqst.header, NULL) == B_FALSE)
 787                 goto error_out;
 788 
 789         (void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
 790         ec = (pqi_event_config_t *)dma->alloc_memory;
 791         for (i = 0; i < ec->num_event_descriptors; i++) {
 792                 desc = &ec->descriptors[i];
 793                 if (pqi_supported_event(desc->event_type) == B_TRUE)
 794                         desc->oq_id = s->s_event_queue.oq_id;
 795                 else
 796                         desc->oq_id = 0;
 797         }
 798 
 799         rqst.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
 800         rqst.header.iu_length = offsetof(struct pqi_general_management_request,
 801             data.report_event_configuration.sg_descriptors[1]) -
 802             PQI_REQUEST_HEADER_LENGTH;
 803         rqst.data.report_event_configuration.buffer_length =
 804             PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH;
 805         (void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORDEV);
 806 
 807         rval = submit_raid_rqst_sync(s, &rqst.header, NULL);
 808 
 809 error_out:
 810         pqi_free_single(s, dma);
 811         return (rval);
 812 }
 813 
 814 /*
 815  * pqi_get_hba_version -- find HBA's version number
 816  */
 817 static boolean_t
 818 pqi_get_hba_version(pqi_state_t s)
 819 {
 820         bmic_identify_controller_t      *ident;
 821         boolean_t                       rval = B_FALSE;
 822 
 823         ident = PQI_ZALLOC(sizeof (*ident), KM_SLEEP);
 824         if (identify_controller(s, ident) == B_FALSE)
 825                 goto out;
 826         (void) memcpy(s->s_firmware_version, ident->firmware_version,
 827             sizeof (ident->firmware_version));
 828         s->s_firmware_version[sizeof (ident->firmware_version)] = '\0';
 829         (void) snprintf(s->s_firmware_version + strlen(s->s_firmware_version),
 830             sizeof (s->s_firmware_version) - strlen(s->s_firmware_version),
 831             "-%u", ident->firmware_build_number);
 832         rval = B_TRUE;
 833 out:
 834         PQI_FREE(ident, sizeof (*ident));
 835         return (rval);
 836 }
 837 
 838 /*
 839  * pqi_version_to_hba -- send driver version to HBA
 840  */
 841 static boolean_t
 842 pqi_version_to_hba(pqi_state_t s)
 843 {
 844         bmic_host_wellness_driver_version_t     *b;
 845         boolean_t                               rval = B_FALSE;
 846 
 847         b = PQI_ZALLOC(sizeof (*b), KM_SLEEP);
 848         b->start_tag[0] = '<';
 849         b->start_tag[1] = 'H';
 850         b->start_tag[2] = 'W';
 851         b->start_tag[3] = '>';
 852         b->drv_tag[0] = 'D';
 853         b->drv_tag[1] = 'V';
 854         b->driver_version_length = sizeof (b->driver_version);
 855         (void) snprintf(b->driver_version, sizeof (b->driver_version),
 856             "Illumos 1.0");
 857         b->end_tag[0] = 'Z';
 858         b->end_tag[1] = 'Z';
 859 
 860         rval = write_host_wellness(s, b, sizeof (*b));
 861         PQI_FREE(b, sizeof (*b));
 862 
 863         return (rval);
 864 }
 865 
 866 
 867 static boolean_t
 868 pqi_schedule_update_time_worker(pqi_state_t s)
 869 {
 870         update_time(s);
 871         return (B_TRUE);
 872 }
 873 
 874 static uint32_t pqi_next_lun;
 875 
 876 static boolean_t
 877 pqi_scan_scsi_devices(pqi_state_t s)
 878 {
 879         report_phys_lun_extended_t      *phys_list      = NULL;
 880         report_log_lun_extended_t       *logical_list   = NULL;
 881         boolean_t                       rval            = B_FALSE;
 882         int                             num_phys        = 0;
 883         int                             num_logical     = 0;
 884         int                             i;
 885         pqi_device_t                    dev;
 886 
 887         if (get_device_list(s, &phys_list, &logical_list) == B_FALSE)
 888                 goto error_out;
 889 
 890         if (phys_list) {
 891                 num_phys = ntohl(phys_list->header.list_length) /
 892                     sizeof (phys_list->lun_entries[0]);
 893         }
 894 
 895         if (logical_list) {
 896                 num_logical = ntohl(logical_list->header.list_length) /
 897                     sizeof (logical_list->lun_entries[0]);
 898         }
 899 
 900         /*
 901          * Need to look for devices that are no longer available. The call
 902          * below to is_new_dev() will mark either the new device just created
 903          * as having been scanned or if is_new_dev() finds an existing
 904          * device in the list that one will be marked as scanned.
 905          */
 906         mutex_enter(&s->s_mutex);
 907         for (dev = list_head(&s->s_devnodes); dev != NULL;
 908             dev = list_next(&s->s_devnodes, dev)) {
 909                 dev->pd_scanned = 0;
 910         }
 911         mutex_exit(&s->s_mutex);
 912 
 913         for (i = 0; i < (num_phys + num_logical); i++) {
 914                 if (i < num_phys) {
 915                         dev = create_phys_dev(s, &phys_list->lun_entries[i]);
 916                 } else {
 917                         dev = create_logical_dev(s,
 918                             &logical_list->lun_entries[i - num_phys]);
 919                 }
 920                 if (dev != NULL) {
 921                         if (is_new_dev(s, dev) == B_TRUE) {
 922                                 list_create(&dev->pd_cmd_list,
 923                                     sizeof (struct pqi_cmd),
 924                                     offsetof(struct pqi_cmd, pc_list));
 925                                 mutex_init(&dev->pd_mutex, NULL, MUTEX_DRIVER,
 926                                     NULL);
 927 
 928                                 mutex_enter(&s->s_mutex);
 929                                 /*
 930                                  * Start at index 0. The first call to
 931                                  * atomic_inc_32_nv will return 1 so subtract
 932                                  * 1 from the return value.
 933                                  */
 934                                 dev->pd_target =
 935                                     atomic_inc_32_nv(&pqi_next_lun) - 1;
 936                                 list_insert_tail(&s->s_devnodes, dev);
 937                                 mutex_exit(&s->s_mutex);
 938                         } else {
 939                                 ddi_devid_free_guid(dev->pd_guid);
 940                                 PQI_FREE(dev, sizeof (*dev));
 941                         }
 942                 }
 943         }
 944 
 945         /*
 946          * Now look through the list for devices which have disappeared.
 947          * Mark them as being offline. During the call to config_one, which
 948          * will come next during a hotplug event, those devices will be
 949          * offlined to the SCSI subsystem.
 950          */
 951         mutex_enter(&s->s_mutex);
 952         for (dev = list_head(&s->s_devnodes); dev != NULL;
 953             dev = list_next(&s->s_devnodes, dev)) {
 954                 if (dev->pd_scanned)
 955                         dev->pd_online = 1;
 956                 else
 957                         dev->pd_online = 0;
 958 
 959                 /* ---- Software version of disk pull for debug ---- */
 960                 if (pqi_do_offline && dev->pd_target == pqi_offline_target) {
 961                         cmn_err(CE_WARN, "%s: offlining %d\n", __func__,
 962                             pqi_offline_target);
 963                         dev->pd_online = 0;
 964                 }
 965         }
 966 
 967         mutex_exit(&s->s_mutex);
 968 
 969         rval = B_TRUE;
 970 
 971 error_out:
 972 
 973         if (phys_list != NULL)
 974                 PQI_FREE(phys_list, ntohl(phys_list->header.list_length) +
 975                     sizeof (report_lun_header_t));
 976         if (logical_list != NULL)
 977                 PQI_FREE(logical_list,
 978                     ntohl(logical_list->header.list_length) +
 979                     sizeof (report_lun_header_t));
 980         return (rval);
 981 }
 982 
 983 /*
 984  * []----------------------------------------------------------[]
 985  * | Entry points used by other funtions found in other files   |
 986  * []----------------------------------------------------------[]
 987  */
 988 void
 989 pqi_rescan_devices(pqi_state_t s)
 990 {
 991         (void) pqi_scan_scsi_devices(s);
 992 }
 993 
 994 boolean_t
 995 pqi_scsi_inquiry(pqi_state_t s, pqi_device_t dev, int vpd,
 996     struct scsi_inquiry *inq, int len)
 997 {
 998         pqi_raid_path_request_t rqst;
 999 
1000         if (build_raid_path_request(&rqst, SCMD_INQUIRY,
1001             dev->pd_scsi3addr, len, vpd) == B_FALSE)
1002                 return (B_FALSE);
1003 
1004         return (scsi_common(s, &rqst, (caddr_t)inq, len));
1005 }
1006 
1007 void
1008 pqi_free_io_resource(pqi_state_t s)
1009 {
1010         pqi_io_request_t        *io = s->s_io_rqst_pool;
1011         int                     i;
1012 
1013         if (io == NULL)
1014                 return;
1015 
1016         for (i = 0; i < s->s_max_io_slots; i++) {
1017                 if (io->io_iu == NULL)
1018                         break;
1019                 PQI_FREE(io->io_iu, s->s_max_inbound_iu_length);
1020                 io->io_iu = NULL;
1021                 pqi_free_single(s, io->io_sg_chain_dma);
1022                 io->io_sg_chain_dma = NULL;
1023         }
1024 
1025         PQI_FREE(s->s_io_rqst_pool, s->s_max_io_slots * sizeof (*io));
1026         s->s_io_rqst_pool = NULL;
1027 }
1028 
1029 /*
1030  * []----------------------------------------------------------[]
1031  * | Utility functions for startup code.                        |
1032  * []----------------------------------------------------------[]
1033  */
1034 
1035 static boolean_t
1036 scsi_common(pqi_state_t s, pqi_raid_path_request_t *rqst, caddr_t buf, int len)
1037 {
1038         pqi_dma_overhead_t      *dma;
1039         pqi_sg_entry_t          *sg;
1040         boolean_t               rval = B_FALSE;
1041 
1042         if ((dma = pqi_alloc_single(s, len)) == NULL)
1043                 return (B_FALSE);
1044 
1045         sg = &rqst->rp_sglist[0];
1046         sg->sg_addr = dma->dma_addr;
1047         sg->sg_len = dma->len_to_alloc;
1048         sg->sg_flags = CISS_SG_LAST;
1049 
1050         if (submit_raid_rqst_sync(s, &rqst->header, NULL) == B_FALSE)
1051                 goto out;
1052 
1053         (void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1054         (void) memcpy(buf, dma->alloc_memory, len);
1055         rval = B_TRUE;
1056 out:
1057         pqi_free_single(s, dma);
1058         return (rval);
1059 }
1060 
1061 static void
1062 bcopy_fromregs(pqi_state_t s, uint8_t *iomem, uint8_t *dst, uint32_t len)
1063 {
1064         int     i;
1065 
1066         for (i = 0; i < len; i++) {
1067                 *dst++ = ddi_get8(s->s_datap, iomem + i);
1068         }
1069 }
1070 
1071 static void
1072 submit_admin_request(pqi_state_t s, pqi_general_admin_request_t *r)
1073 {
1074         pqi_admin_queues_t      *aq;
1075         pqi_index_t             iq_pi;
1076         caddr_t                 next_element;
1077 
1078         aq = &s->s_admin_queues;
1079         iq_pi = aq->iq_pi_copy;
1080         next_element = aq->iq_element_array + (iq_pi *
1081             PQI_ADMIN_IQ_ELEMENT_LENGTH);
1082         (void) memcpy(next_element, r, sizeof (*r));
1083         (void) ddi_dma_sync(s->s_adminq_dma->handle,
1084             iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH, sizeof (*r),
1085             DDI_DMA_SYNC_FORDEV);
1086         iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
1087         aq->iq_pi_copy = iq_pi;
1088 
1089         ddi_put32(s->s_datap, aq->iq_pi, iq_pi);
1090 }
1091 
1092 static boolean_t
1093 poll_for_admin_response(pqi_state_t s, pqi_general_admin_response_t *r)
1094 {
1095         pqi_admin_queues_t      *aq;
1096         pqi_index_t             oq_pi;
1097         pqi_index_t             oq_ci;
1098         int                     countdown = 10 * MICROSEC;      /* 10 seconds */
1099         int                     pause_time = 10 * MILLISEC;     /* 10ms */
1100 
1101         countdown /= pause_time;
1102         aq = &s->s_admin_queues;
1103         oq_ci = aq->oq_ci_copy;
1104 
1105         while (--countdown) {
1106                 oq_pi = ddi_get32(s->s_adminq_dma->acc, aq->oq_pi);
1107                 if (oq_pi != oq_ci)
1108                         break;
1109                 drv_usecwait(pause_time);
1110         }
1111         if (countdown == 0)
1112                 return (B_FALSE);
1113 
1114         (void) ddi_dma_sync(s->s_adminq_dma->handle,
1115             oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH, sizeof (*r),
1116             DDI_DMA_SYNC_FORCPU);
1117         (void) memcpy(r, aq->oq_element_array +
1118             (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof (*r));
1119 
1120         aq->oq_ci_copy = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
1121         ddi_put32(s->s_datap, aq->oq_ci, aq->oq_ci_copy);
1122 
1123         return (B_TRUE);
1124 }
1125 
1126 static boolean_t
1127 validate_admin_response(pqi_general_admin_response_t *r, uint8_t code)
1128 {
1129         if (r->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
1130                 return (B_FALSE);
1131 
1132         if (r->header.iu_length != PQI_GENERAL_ADMIN_IU_LENGTH)
1133                 return (B_FALSE);
1134 
1135         if (r->function_code != code)
1136                 return (B_FALSE);
1137 
1138         if (r->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
1139                 return (B_FALSE);
1140 
1141         return (B_TRUE);
1142 }
1143 
1144 static boolean_t
1145 submit_admin_rqst_sync(pqi_state_t s,
1146     pqi_general_admin_request_t *rqst, pqi_general_admin_response_t *rsp)
1147 {
1148         boolean_t       rval;
1149 
1150         submit_admin_request(s, rqst);
1151         rval = poll_for_admin_response(s, rsp);
1152         if (rval == B_TRUE) {
1153                 rval = validate_admin_response(rsp, rqst->function_code);
1154                 if (rval == B_FALSE) {
1155                         pqi_show_dev_state(s);
1156                 }
1157         }
1158         return (rval);
1159 }
1160 
1161 static boolean_t
1162 create_event_queue(pqi_state_t s)
1163 {
1164         pqi_event_queue_t               *eq;
1165         pqi_general_admin_request_t     request;
1166         pqi_general_admin_response_t    response;
1167 
1168         eq = &s->s_event_queue;
1169 
1170         /*
1171          * Create OQ (Outbound Queue - device to host queue) to dedicate
1172          * to events.
1173          */
1174         (void) memset(&request, 0, sizeof (request));
1175         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
1176         request.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
1177         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
1178         request.data.create_operational_oq.queue_id = eq->oq_id;
1179         request.data.create_operational_oq.element_array_addr =
1180             eq->oq_element_array_bus_addr;
1181         request.data.create_operational_oq.pi_addr = eq->oq_pi_bus_addr;
1182         request.data.create_operational_oq.num_elements =
1183             PQI_NUM_EVENT_QUEUE_ELEMENTS;
1184         request.data.create_operational_oq.element_length =
1185             PQI_EVENT_OQ_ELEMENT_LENGTH / 16;
1186         request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
1187         request.data.create_operational_oq.int_msg_num = eq->int_msg_num;
1188 
1189         if (submit_admin_rqst_sync(s, &request, &response) == B_FALSE)
1190                 return (B_FALSE);
1191 
1192         eq->oq_ci = (uint32_t *)(intptr_t)((uint64_t)(intptr_t)s->s_reg +
1193             PQI_DEVICE_REGISTERS_OFFSET +
1194             response.data.create_operational_oq.oq_ci_offset);
1195 
1196         return (B_TRUE);
1197 }
1198 
1199 static boolean_t
1200 create_queue_group(pqi_state_t s, int idx)
1201 {
1202         pqi_queue_group_t               *qg;
1203         pqi_general_admin_request_t     rqst;
1204         pqi_general_admin_response_t    rsp;
1205 
1206         qg = &s->s_queue_groups[idx];
1207 
1208         /* ---- Create inbound queue for RAID path (host to device) ---- */
1209         (void) memset(&rqst, 0, sizeof (rqst));
1210         rqst.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
1211         rqst.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
1212         rqst.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
1213         rqst.data.create_operational_iq.queue_id = qg->iq_id[RAID_PATH];
1214         rqst.data.create_operational_iq.element_array_addr =
1215             qg->iq_element_array_bus_addr[RAID_PATH];
1216         rqst.data.create_operational_iq.ci_addr =
1217             qg->iq_ci_bus_addr[RAID_PATH];
1218         rqst.data.create_operational_iq.num_elements =
1219             s->s_num_elements_per_iq;
1220         rqst.data.create_operational_iq.element_length =
1221             PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16;
1222         rqst.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
1223 
1224         if (submit_admin_rqst_sync(s, &rqst, &rsp) == B_FALSE)
1225                 return (B_FALSE);
1226         qg->iq_pi[RAID_PATH] =
1227             (uint32_t *)(intptr_t)((uint64_t)(intptr_t)s->s_reg +
1228             PQI_DEVICE_REGISTERS_OFFSET +
1229             rsp.data.create_operational_iq.iq_pi_offset);
1230 
1231         /* ---- Create inbound queue for Advanced I/O path. ---- */
1232         (void) memset(&rqst, 0, sizeof (rqst));
1233         rqst.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
1234         rqst.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
1235         rqst.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
1236         rqst.data.create_operational_iq.queue_id =
1237             qg->iq_id[AIO_PATH];
1238         rqst.data.create_operational_iq.element_array_addr =
1239             qg->iq_element_array_bus_addr[AIO_PATH];
1240         rqst.data.create_operational_iq.ci_addr =
1241             qg->iq_ci_bus_addr[AIO_PATH];
1242         rqst.data.create_operational_iq.num_elements =
1243             s->s_num_elements_per_iq;
1244         rqst.data.create_operational_iq.element_length =
1245             PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16;
1246         rqst.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
1247 
1248         if (submit_admin_rqst_sync(s, &rqst, &rsp) == B_FALSE)
1249                 return (B_FALSE);
1250 
1251         qg->iq_pi[AIO_PATH] =
1252             (uint32_t *)(intptr_t)((uint64_t)(intptr_t)s->s_reg +
1253             PQI_DEVICE_REGISTERS_OFFSET +
1254             rsp.data.create_operational_iq.iq_pi_offset);
1255 
1256         /* ---- Change second queue to be AIO ---- */
1257         (void) memset(&rqst, 0, sizeof (rqst));
1258         rqst.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
1259         rqst.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
1260         rqst.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
1261         rqst.data.change_operational_iq_properties.queue_id =
1262             qg->iq_id[AIO_PATH];
1263         rqst.data.change_operational_iq_properties.queue_id =
1264             PQI_IQ_PROPERTY_IS_AIO_QUEUE;
1265 
1266         if (submit_admin_rqst_sync(s, &rqst, &rsp) == B_FALSE)
1267                 return (B_FALSE);
1268 
1269         /* ---- Create outbound queue (device to host) ---- */
1270         (void) memset(&rqst, 0, sizeof (rqst));
1271         rqst.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
1272         rqst.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
1273         rqst.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
1274         rqst.data.create_operational_oq.queue_id = qg->oq_id;
1275         rqst.data.create_operational_oq.element_array_addr =
1276             qg->oq_element_array_bus_addr;
1277         rqst.data.create_operational_oq.pi_addr = qg->oq_pi_bus_addr;
1278         rqst.data.create_operational_oq.num_elements =
1279             s->s_num_elements_per_oq;
1280         rqst.data.create_operational_oq.element_length =
1281             PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16;
1282         rqst.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
1283         rqst.data.create_operational_oq.int_msg_num = qg->int_msg_num;
1284 
1285         if (submit_admin_rqst_sync(s, &rqst, &rsp) == B_FALSE)
1286                 return (B_FALSE);
1287         qg->oq_ci = (uint32_t *)(intptr_t)((uint64_t)(intptr_t)s->s_reg +
1288             PQI_DEVICE_REGISTERS_OFFSET +
1289             rsp.data.create_operational_oq.oq_ci_offset);
1290 
1291         return (B_TRUE);
1292 }
1293 
1294 /* ARGSUSED */
1295 static void
1296 raid_sync_complete(pqi_io_request_t *io, void *ctx)
1297 {
1298         ksema_t *s = (ksema_t *)ctx;
1299 
1300         sema_v(s);
1301 }
1302 
1303 static boolean_t
1304 submit_raid_sync_with_io(pqi_state_t s, pqi_io_request_t *io)
1305 {
1306         ksema_t sema;
1307 
1308         sema_init(&sema, 0, NULL, SEMA_DRIVER, NULL);
1309 
1310         io->io_cb = raid_sync_complete;
1311         io->io_context = &sema;
1312 
1313         if (pqi_is_offline(s))
1314                 return (B_FALSE);
1315 
1316         /*
1317          * If the controller hangs this reference to the io structure
1318          * is used to cancel the command. The status will be set to
1319          * EIO instead of PQI_DATA_IN_OUT_GOOD.
1320          */
1321         s->s_sync_io = io;
1322         s->s_sync_expire = gethrtime() + (SYNC_CMDS_TIMEOUT_SECS * NANOSEC);
1323 
1324         pqi_start_io(s, &s->s_queue_groups[PQI_DEFAULT_QUEUE_GROUP],
1325             RAID_PATH, io);
1326         sema_p(&sema);
1327         s->s_sync_io = NULL;
1328         s->s_sync_expire = 0;
1329         switch (io->io_status) {
1330                 case PQI_DATA_IN_OUT_GOOD:
1331                 case PQI_DATA_IN_OUT_UNDERFLOW:
1332                         return (B_TRUE);
1333                 default:
1334                         return (B_FALSE);
1335         }
1336 }
1337 
1338 /*ARGSUSED*/
1339 static boolean_t
1340 submit_raid_rqst_sync(pqi_state_t s, pqi_iu_header_t *rqst,
1341     pqi_raid_error_info_t e_info)
1342 {
1343         pqi_io_request_t        *io;
1344         size_t                  len;
1345         boolean_t               rval = B_FALSE; // default to error case
1346 
1347         sema_p(&s->s_sync_rqst);
1348 
1349         io = pqi_alloc_io(s);
1350         ((pqi_raid_path_request_t *)rqst)->rp_id = io->io_index;
1351         if (rqst->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
1352                 ((pqi_raid_path_request_t *)rqst)->rp_error_index =
1353                     io->io_index;
1354         len = rqst->iu_length + PQI_REQUEST_HEADER_LENGTH;
1355         (void) memcpy(io->io_iu, rqst, len);
1356 
1357         if (submit_raid_sync_with_io(s, io) == B_TRUE)
1358                 rval = B_TRUE;
1359 
1360         pqi_free_io(io);
1361         sema_v(&s->s_sync_rqst);
1362         return (rval);
1363 }
1364 
1365 static boolean_t
1366 build_raid_path_request(pqi_raid_path_request_t *rqst,
1367     int cmd, caddr_t lun, uint32_t len, int vpd_page)
1368 {
1369         uint8_t         *cdb;
1370 
1371         (void) memset(rqst, 0, sizeof (*rqst));
1372         rqst->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
1373         rqst->header.iu_length = offsetof(struct pqi_raid_path_request,
1374             rp_sglist[1]) - PQI_REQUEST_HEADER_LENGTH;
1375         rqst->rp_data_len = len;
1376         (void) memcpy(rqst->rp_lun, lun, sizeof (rqst->rp_lun));
1377         rqst->rp_task_attr = SOP_TASK_ATTRIBUTE_SIMPLE;
1378         rqst->rp_additional_cdb = SOP_ADDITIONAL_CDB_BYTES_0;
1379 
1380         cdb = rqst->rp_cdb;
1381         switch (cmd) {
1382         case SCMD_READ_CAPACITY:
1383                 rqst->rp_data_dir = (uint8_t)SOP_READ_FLAG;
1384                 cdb[0] = (uint8_t)cmd;
1385                 break;
1386 
1387         case SCMD_READ:
1388                 rqst->rp_data_dir = (uint8_t)SOP_READ_FLAG;
1389                 cdb[0] = (uint8_t)cmd;
1390                 cdb[2] = (uint8_t)vpd_page >> 8;
1391                 cdb[3] = (uint8_t)vpd_page;
1392                 cdb[4] = len >> 9;
1393                 break;
1394 
1395         case SCMD_MODE_SENSE:
1396                 rqst->rp_data_dir = (uint8_t)SOP_READ_FLAG;
1397                 cdb[0] = (uint8_t)cmd;
1398                 cdb[1] = 0;
1399                 cdb[2] = (uint8_t)vpd_page;
1400                 cdb[4] = (uint8_t)len;
1401                 break;
1402 
1403         case SCMD_INQUIRY:
1404                 rqst->rp_data_dir = SOP_READ_FLAG;
1405                 cdb[0] = (uint8_t)cmd;
1406                 if (vpd_page & VPD_PAGE) {
1407                         cdb[1] = 0x1;
1408                         cdb[2] = (uint8_t)vpd_page;
1409                 }
1410                 cdb[4] = (uint8_t)len;
1411                 break;
1412 
1413         case BMIC_IDENTIFY_PHYSICAL_DEVICE:
1414         case BMIC_IDENTIFY_CONTROLLER:
1415                 rqst->rp_data_dir = SOP_READ_FLAG;
1416                 cdb[0] = BMIC_READ;
1417                 cdb[6] = (uint8_t)cmd;
1418                 cdb[7] = (uint8_t)(len >> 8);
1419                 cdb[8] = (uint8_t)len;
1420                 break;
1421 
1422         case BMIC_WRITE_HOST_WELLNESS:
1423                 rqst->rp_data_dir = SOP_WRITE_FLAG;
1424                 cdb[0] = BMIC_WRITE;
1425                 cdb[6] = (uint8_t)cmd;
1426                 cdb[7] = (uint8_t)(len >> 8);
1427                 cdb[8] = (uint8_t)len;
1428                 break;
1429 
1430         case CISS_REPORT_LOG:
1431         case CISS_REPORT_PHYS:
1432                 rqst->rp_data_dir = SOP_READ_FLAG;
1433                 cdb[0] = (uint8_t)cmd;
1434                 if (cmd == CISS_REPORT_PHYS)
1435                         cdb[1] = CISS_REPORT_PHYS_EXTENDED;
1436                 else
1437                         cdb[1] = CISS_REPORT_LOG_EXTENDED;
1438                 cdb[6] = (uint8_t)(len >> 24);
1439                 cdb[7] = (uint8_t)(len >> 16);
1440                 cdb[8] = (uint8_t)(len >> 8);
1441                 cdb[9] = (uint8_t)len;
1442                 break;
1443 
1444         default:
1445                 ASSERT(0);
1446                 break;
1447         }
1448 
1449         return (B_TRUE);
1450 }
1451 
1452 static boolean_t
1453 identify_physical_device(pqi_state_t s, pqi_device_t devp,
1454     bmic_identify_physical_device_t *buf)
1455 {
1456         pqi_dma_overhead_t      *dma;
1457         pqi_raid_path_request_t rqst;
1458         boolean_t               rval = B_FALSE;
1459         uint16_t                idx;
1460 
1461         if ((dma = pqi_alloc_single(s, sizeof (*buf))) == NULL)
1462                 return (B_FALSE);
1463 
1464         if (build_raid_path_request(&rqst, BMIC_IDENTIFY_PHYSICAL_DEVICE,
1465             RAID_CTLR_LUNID, sizeof (*buf), 0) == B_FALSE)
1466                 goto out;
1467 
1468         idx = CISS_GET_DRIVE_NUMBER(devp->pd_scsi3addr);
1469         rqst.rp_cdb[2] = (uint8_t)idx;
1470         rqst.rp_cdb[9] = (uint8_t)idx >> 8;
1471 
1472         rqst.rp_sglist[0].sg_addr = dma->dma_addr;
1473         rqst.rp_sglist[0].sg_len = dma->len_to_alloc;
1474         rqst.rp_sglist[0].sg_flags = CISS_SG_LAST;
1475 
1476         if (submit_raid_rqst_sync(s, &rqst.header, NULL) == B_FALSE)
1477                 goto out;
1478 
1479         (void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1480         (void) memcpy(buf, dma->alloc_memory, sizeof (*buf));
1481         rval = B_TRUE;
1482 out:
1483         pqi_free_single(s, dma);
1484         return (rval);
1485 }
1486 
1487 static boolean_t
1488 identify_controller(pqi_state_t s, bmic_identify_controller_t *ident)
1489 {
1490         pqi_raid_path_request_t rqst;
1491         pqi_dma_overhead_t      *dma;
1492         boolean_t               rval = B_FALSE;
1493 
1494         if ((dma = pqi_alloc_single(s, sizeof (*ident))) == NULL)
1495                 return (B_FALSE);
1496 
1497         if (build_raid_path_request(&rqst, BMIC_IDENTIFY_CONTROLLER,
1498             RAID_CTLR_LUNID, sizeof (*ident), 0) == B_FALSE)
1499                 goto out;
1500 
1501         rqst.rp_sglist[0].sg_addr = dma->dma_addr;
1502         rqst.rp_sglist[0].sg_len = dma->len_to_alloc;
1503         rqst.rp_sglist[0].sg_flags = CISS_SG_LAST;
1504 
1505         if (submit_raid_rqst_sync(s, &rqst.header, NULL) == B_FALSE)
1506                 goto out;
1507 
1508         (void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1509         (void) memcpy(ident, dma->alloc_memory, sizeof (*ident));
1510         rval = B_TRUE;
1511 out:
1512         pqi_free_single(s, dma);
1513         return (rval);
1514 }
1515 
1516 static boolean_t
1517 write_host_wellness(pqi_state_t s, void *buf, size_t len)
1518 {
1519         pqi_dma_overhead_t      *dma;
1520         boolean_t               rval = B_FALSE;
1521         pqi_raid_path_request_t rqst;
1522 
1523         if ((dma = pqi_alloc_single(s, len)) == NULL)
1524                 return (B_FALSE);
1525         if (build_raid_path_request(&rqst, BMIC_WRITE_HOST_WELLNESS,
1526             RAID_CTLR_LUNID, len, 0) == B_FALSE)
1527                 goto out;
1528 
1529         (void) memcpy(dma->alloc_memory, buf, dma->len_to_alloc);
1530         rqst.rp_sglist[0].sg_addr = dma->dma_addr;
1531         rqst.rp_sglist[0].sg_len = dma->len_to_alloc;
1532         rqst.rp_sglist[0].sg_flags = CISS_SG_LAST;
1533 
1534         rval = submit_raid_rqst_sync(s, &rqst.header, NULL);
1535 out:
1536         pqi_free_single(s, dma);
1537         return (rval);
1538 }
1539 
1540 static boolean_t
1541 report_luns(pqi_state_t s, int cmd, void *data, size_t len)
1542 {
1543         pqi_dma_overhead_t      *dma;
1544         boolean_t               rval = B_FALSE;
1545         pqi_raid_path_request_t rqst;
1546 
1547         if ((dma = pqi_alloc_single(s, len)) == NULL)
1548                 return (B_FALSE);
1549         if (build_raid_path_request(&rqst, cmd, RAID_CTLR_LUNID,
1550             len, 0) == B_FALSE)
1551                 goto error_out;
1552 
1553         rqst.rp_sglist[0].sg_addr = dma->dma_addr;
1554         rqst.rp_sglist[0].sg_len = dma->len_to_alloc;
1555         rqst.rp_sglist[0].sg_flags = CISS_SG_LAST;
1556 
1557         if (submit_raid_rqst_sync(s, &rqst.header, NULL) == B_FALSE)
1558                 goto error_out;
1559 
1560         (void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1561         (void) memcpy(data, dma->alloc_memory, len);
1562         rval = B_TRUE;
1563 
1564 error_out:
1565         pqi_free_single(s, dma);
1566         return (rval);
1567 }
1568 
1569 static boolean_t
1570 report_luns_by_cmd(pqi_state_t s, int cmd, void **buf)
1571 {
1572         void            *data           = NULL;
1573         size_t          data_len        = 0;
1574         size_t          new_data_len;
1575         uint32_t        new_list_len    = 0;
1576         uint32_t        list_len        = 0;
1577         boolean_t       rval            = B_FALSE;
1578 
1579         new_data_len = sizeof (report_lun_header_t);
1580         do {
1581                 if (data != NULL) {
1582                         PQI_FREE(data, data_len);
1583                 }
1584                 data_len = new_data_len;
1585                 data = PQI_ZALLOC(data_len, KM_SLEEP);
1586                 list_len = new_list_len;
1587                 if (report_luns(s, cmd, data, data_len) == B_FALSE)
1588                         goto error_out;
1589                 new_list_len =
1590                     ntohl(((report_lun_header_t *)data)->list_length);
1591                 new_data_len = sizeof (report_lun_header_t) +
1592                     new_list_len;
1593         } while (new_list_len > list_len);
1594         rval = B_TRUE;
1595 
1596 error_out:
1597         if (rval == B_FALSE) {
1598                 PQI_FREE(data, data_len);
1599                 data = NULL;
1600         }
1601         *buf = data;
1602         return (rval);
1603 }
1604 
1605 static inline boolean_t
1606 report_phys_luns(pqi_state_t s, void **v)
1607 {
1608         return (report_luns_by_cmd(s, CISS_REPORT_PHYS, v));
1609 }
1610 
1611 static inline boolean_t
1612 report_logical_luns(pqi_state_t s, void **v)
1613 {
1614         return (report_luns_by_cmd(s, CISS_REPORT_LOG, v));
1615 }
1616 
1617 static boolean_t
1618 get_device_list(pqi_state_t s, report_phys_lun_extended_t **pl,
1619     report_log_lun_extended_t **ll)
1620 {
1621         report_log_lun_extended_t       *log_data;
1622         report_log_lun_extended_t       *internal_log;
1623         size_t                          list_len;
1624         size_t                          data_len;
1625         report_lun_header_t             header;
1626 
1627         if (report_phys_luns(s, (void **)pl) == B_FALSE)
1628                 return (B_FALSE);
1629 
1630         if (report_logical_luns(s, (void **)ll) == B_FALSE)
1631                 return (B_FALSE);
1632 
1633         log_data = *ll;
1634         if (log_data) {
1635                 list_len = ntohl(log_data->header.list_length);
1636         } else {
1637                 (void) memset(&header, 0, sizeof (header));
1638                 log_data = (report_log_lun_extended_t *)&header;
1639                 list_len = 0;
1640         }
1641 
1642         data_len = sizeof (header) + list_len;
1643         /*
1644          * Add the controller to the logical luns which is a empty device
1645          */
1646         internal_log = PQI_ZALLOC(data_len +
1647             sizeof (report_log_lun_extended_entry_t), KM_SLEEP);
1648         (void) memcpy(internal_log, log_data, data_len);
1649         internal_log->header.list_length = htonl(list_len +
1650             sizeof (report_log_lun_extended_entry_t));
1651 
1652         if (*ll != NULL)
1653                 PQI_FREE(*ll, sizeof (report_lun_header_t) +
1654                     ntohl((*ll)->header.list_length));
1655         *ll = internal_log;
1656         return (B_TRUE);
1657 }
1658 
1659 /* ---- Only skip physical devices ---- */
1660 static boolean_t
1661 skip_device(char *addr)
1662 {
1663         return (MASKED_DEVICE(addr) ? B_TRUE : B_FALSE);
1664 }
1665 
1666 static boolean_t
1667 get_device_info(pqi_state_t s, pqi_device_t dev)
1668 {
1669         boolean_t               rval = B_FALSE;
1670         struct scsi_inquiry     *inq;
1671 
1672         inq = PQI_ZALLOC(sizeof (*inq), KM_SLEEP);
1673         if (pqi_scsi_inquiry(s, dev, 0, inq, sizeof (*inq)) == B_FALSE)
1674                 goto out;
1675 
1676         dev->pd_devtype = inq->inq_dtype & 0x1f;
1677         (void) memcpy(dev->pd_vendor, inq->inq_vid, sizeof (dev->pd_vendor));
1678         (void) memcpy(dev->pd_model, inq->inq_pid, sizeof (dev->pd_model));
1679 
1680         /* TODO Handle logical devices */
1681         rval = B_TRUE;
1682 out:
1683         PQI_FREE(inq, sizeof (*inq));
1684         return (rval);
1685 }
1686 
1687 static boolean_t
1688 is_supported_dev(pqi_state_t s, pqi_device_t dev)
1689 {
1690         boolean_t       rval = B_FALSE;
1691 
1692         switch (dev->pd_devtype) {
1693         case DTYPE_DIRECT:
1694         case TYPE_ZBC:
1695         case DTYPE_SEQUENTIAL:
1696         case DTYPE_ESI:
1697                 rval = B_TRUE;
1698                 break;
1699         case DTYPE_ARRAY_CTRL:
1700                 if (strncmp(dev->pd_scsi3addr, RAID_CTLR_LUNID,
1701                     sizeof (dev->pd_scsi3addr)) == 0)
1702                         rval = B_TRUE;
1703                 break;
1704         default:
1705                 dev_err(s->s_dip, CE_WARN, "Not supported device: 0x%x\n",
1706                     dev->pd_devtype);
1707                 break;
1708         }
1709         return (rval);
1710 }
1711 
1712 /* ARGSUSED */
1713 static void
1714 get_phys_disk_info(pqi_state_t s, pqi_device_t dev,
1715     bmic_identify_physical_device_t *id)
1716 {
1717 }
1718 
1719 /*ARGSUSED*/
1720 static int
1721 is_external_raid_addr(char *addr)
1722 {
1723         return (0);
1724 }
1725 
1726 static void
1727 build_guid(pqi_state_t s, pqi_device_t d)
1728 {
1729         int                     len     = 0xff;
1730         struct scsi_inquiry     *inq    = NULL;
1731         uchar_t                 *inq83  = NULL;
1732         ddi_devid_t             devid;
1733 
1734         ddi_devid_free_guid(d->pd_guid);
1735         d->pd_guid = NULL;
1736 
1737         inq = kmem_alloc(sizeof (struct scsi_inquiry), KM_SLEEP);
1738         if (pqi_scsi_inquiry(s, d, 0, inq, sizeof (struct scsi_inquiry)) ==
1739             B_FALSE) {
1740                 goto out;
1741         }
1742 
1743         inq83 = kmem_zalloc(len, KM_SLEEP);
1744         if (pqi_scsi_inquiry(s, d, VPD_PAGE | 0x83,
1745             (struct scsi_inquiry *)inq83, len) == B_FALSE) {
1746                 goto out;
1747         }
1748 
1749         if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, NULL,
1750             (uchar_t *)inq, sizeof (struct scsi_inquiry), NULL, 0, inq83,
1751             (size_t)len, &devid) == DDI_SUCCESS) {
1752                 d->pd_guid = ddi_devid_to_guid(devid);
1753                 ddi_devid_free(devid);
1754         }
1755 out:
1756         if (inq != NULL)
1757                 kmem_free(inq, sizeof (struct scsi_inquiry));
1758         if (inq83 != NULL)
1759                 kmem_free(inq83, len);
1760 }
1761 
1762 static pqi_device_t
1763 create_phys_dev(pqi_state_t s, report_phys_lun_extended_entry_t *e)
1764 {
1765         pqi_device_t                    dev;
1766         bmic_identify_physical_device_t *id_phys        = NULL;
1767 
1768         dev = PQI_ZALLOC(sizeof (*dev), KM_SLEEP);
1769         dev->pd_phys_dev = 1;
1770         dev->pd_wwid = e->wwid;
1771         (void) memcpy(dev->pd_scsi3addr, e->lunid, sizeof (dev->pd_scsi3addr));
1772         dev->pd_target = -1;
1773 
1774         if (skip_device(dev->pd_scsi3addr) == B_TRUE)
1775                 goto out;
1776 
1777         if (get_device_info(s, dev) == B_FALSE)
1778                 goto out;
1779 
1780         if (!is_supported_dev(s, dev))
1781                 goto out;
1782 
1783         switch (dev->pd_devtype) {
1784         case DTYPE_ESI:
1785                 build_guid(s, dev);
1786                 dev->pd_sas_address = ntohll(dev->pd_wwid);
1787                 break;
1788 
1789         case DTYPE_DIRECT:
1790         case TYPE_ZBC:
1791                 build_guid(s, dev);
1792                 id_phys = PQI_ZALLOC(sizeof (*id_phys), KM_SLEEP);
1793                 if ((e->device_flags &
1794                     REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1795                     e->aio_handle) {
1796 
1797                         /*
1798                          * XXX Until I figure out what's wrong with
1799                          * using AIO I'll disable this for now.
1800                          */
1801                         dev->pd_aio_enabled = 0;
1802                         dev->pd_aio_handle = e->aio_handle;
1803                         if (identify_physical_device(s, dev,
1804                             id_phys) == B_FALSE)
1805                                 goto out;
1806                 }
1807                 dev->pd_sas_address = ntohll(dev->pd_wwid);
1808                 get_phys_disk_info(s, dev, id_phys);
1809                 PQI_FREE(id_phys, sizeof (*id_phys));
1810                 break;
1811         }
1812 
1813         return (dev);
1814 out:
1815         PQI_FREE(dev, sizeof (*dev));
1816         return (NULL);
1817 }
1818 
1819 static pqi_device_t
1820 create_logical_dev(pqi_state_t s, report_log_lun_extended_entry_t *e)
1821 {
1822         pqi_device_t    dev;
1823 
1824         dev = PQI_ZALLOC(sizeof (*dev), KM_SLEEP);
1825         dev->pd_phys_dev = 0;
1826         dev->pd_target = -1;
1827         (void) memcpy(dev->pd_scsi3addr, e->lunid, sizeof (dev->pd_scsi3addr));
1828         dev->pd_external_raid = is_external_raid_addr(dev->pd_scsi3addr);
1829 
1830         if (get_device_info(s, dev) == B_FALSE)
1831                 goto out;
1832 
1833         if (!is_supported_dev(s, dev))
1834                 goto out;
1835 
1836         (void) memcpy(dev->pd_volume_id, e->volume_id,
1837             sizeof (dev->pd_volume_id));
1838         return (dev);
1839 
1840 out:
1841         PQI_FREE(dev, sizeof (*dev));
1842         return (NULL);
1843 }
1844 
1845 /*
1846  * is_new_dev -- look to see if new_dev is indeed new.
1847  *
1848  * NOTE: This function has two outcomes. One is to determine if the new_dev
1849  * is truly new. The other is to mark a new_dev as being scanned if it's
1850  * truly new or marking the existing device as having been scanned.
1851  */
1852 static boolean_t
1853 is_new_dev(pqi_state_t s, pqi_device_t new_dev)
1854 {
1855         pqi_device_t    dev;
1856 
1857         for (dev = list_head(&s->s_devnodes); dev != NULL;
1858             dev = list_next(&s->s_devnodes, dev)) {
1859                 if (dev->pd_wwid == new_dev->pd_wwid) {
1860                         dev->pd_scanned = 1;
1861                         return (B_FALSE);
1862                 }
1863         }
1864 
1865         new_dev->pd_scanned = 1;
1866         return (B_TRUE);
1867 }
1868 
1869 #define PQI_RESET_ACTION_RESET          0x1
1870 
1871 #define PQI_RESET_TYPE_NO_RESET         0x0
1872 #define PQI_RESET_TYPE_SOFT_RESET       0x1
1873 #define PQI_RESET_TYPE_FIRM_RESET       0x2
1874 #define PQI_RESET_TYPE_HARD_RESET       0x3
1875 
1876 static boolean_t
1877 hba_reset(pqi_state_t s)
1878 {
1879         uint32_t        val;
1880 
1881         val = (PQI_RESET_ACTION_RESET << 5) | PQI_RESET_TYPE_HARD_RESET;
1882         S32(s, pqi_registers.device_reset, val);
1883 
1884         return (pqi_wait_for_mode_ready(s));
1885 }
1886 
1887 static void
1888 save_ctrl_mode(pqi_state_t s, int mode)
1889 {
1890         sis_write_scratch(s, mode);
1891 }
1892 
1893 static boolean_t
1894 revert_to_sis(pqi_state_t s)
1895 {
1896         if (!hba_reset(s))
1897                 return (B_FALSE);
1898         if (sis_reenable_mode(s) == B_FALSE)
1899                 return (B_FALSE);
1900         sis_write_scratch(s, SIS_MODE);
1901         return (B_TRUE);
1902 }
1903 
1904 
1905 #define BIN2BCD(x)      ((((x) / 10) << 4) + (x) % 10)
1906 
1907 static void
1908 update_time(void *v)
1909 {
1910         pqi_state_t                     s = v;
1911         bmic_host_wellness_time_t       *ht;
1912         struct timeval                  curtime;
1913         todinfo_t                       tod;
1914 
1915         ht = PQI_ZALLOC(sizeof (*ht), KM_SLEEP);
1916         ht->start_tag[0] = '<';
1917         ht->start_tag[1] = 'H';
1918         ht->start_tag[2] = 'W';
1919         ht->start_tag[3] = '>';
1920         ht->time_tag[0] = 'T';
1921         ht->time_tag[1] = 'D';
1922         ht->time_length = sizeof (ht->time);
1923 
1924         uniqtime(&curtime);
1925         mutex_enter(&tod_lock);
1926         tod = utc_to_tod(curtime.tv_sec);
1927         mutex_exit(&tod_lock);
1928 
1929         ht->time[0] = BIN2BCD(tod.tod_hour);         /* Hour */
1930         ht->time[1] = BIN2BCD(tod.tod_min);          /* Minute */
1931         ht->time[2] = BIN2BCD(tod.tod_sec);          /* Second */
1932         ht->time[3] = 0;
1933         ht->time[4] = BIN2BCD(tod.tod_month);                /* Month */
1934         ht->time[5] = BIN2BCD(tod.tod_day);          /* Day */
1935         ht->time[6] = BIN2BCD(20);                   /* Century */
1936         ht->time[7] = BIN2BCD(tod.tod_year - 70);    /* Year w/in century */
1937 
1938         ht->dont_write_tag[0] = 'D';
1939         ht->dont_write_tag[1] = 'W';
1940         ht->end_tag[0] = 'Z';
1941         ht->end_tag[1] = 'Z';
1942 
1943         (void) write_host_wellness(s, ht, sizeof (*ht));
1944         PQI_FREE(ht, sizeof (*ht));
1945         s->s_time_of_day = timeout(update_time, s,
1946             DAY * drv_usectohz(MICROSEC));
1947 }