3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 QLogic Corporation. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
29 */
30
31 #include <sys/conf.h>
32 #include <sys/ddi.h>
33 #include <sys/stat.h>
34 #include <sys/pci.h>
35 #include <sys/sunddi.h>
36 #include <sys/modctl.h>
37 #include <sys/file.h>
38 #include <sys/cred.h>
39 #include <sys/byteorder.h>
40 #include <sys/atomic.h>
41 #include <sys/scsi/scsi.h>
42
43 #include <sys/stmf_defines.h>
44 #include <sys/fct_defines.h>
45 #include <sys/stmf.h>
46 #include <sys/stmf_ioctl.h>
47 #include <sys/portif.h>
48 #include <sys/fct.h>
49
50 #include "qlt.h"
51 #include "qlt_dma.h"
52 #include "qlt_ioctl.h"
53 #include "qlt_open.h"
54
55 static int qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
56 static int qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
57 static void qlt_enable_intr(qlt_state_t *);
58 static void qlt_disable_intr(qlt_state_t *);
59 static fct_status_t qlt_reset_chip(qlt_state_t *qlt);
60 static fct_status_t qlt_download_fw(qlt_state_t *qlt);
61 static fct_status_t qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
62 uint32_t word_count, uint32_t risc_addr);
63 static fct_status_t qlt_raw_mailbox_command(qlt_state_t *qlt);
64 static mbox_cmd_t *qlt_alloc_mailbox_command(qlt_state_t *qlt,
65 uint32_t dma_size);
66 void qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
67 static fct_status_t qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
68 static uint_t qlt_isr(caddr_t arg, caddr_t arg2);
69 static fct_status_t qlt_firmware_dump(fct_local_port_t *port,
70 stmf_state_change_info_t *ssci);
71 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
72 static void qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp);
73 static void qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio);
74 static void qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp);
75 static void qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp);
76 static void qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp);
77 static void qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
78 static void qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt,
79 uint8_t *rsp);
80 static void qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
81 static void qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp);
82 static void qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp);
83 static fct_status_t qlt_read_nvram(qlt_state_t *qlt);
84 static void qlt_verify_fw(qlt_state_t *qlt);
85 static void qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp);
86 fct_status_t qlt_port_start(caddr_t arg);
87 fct_status_t qlt_port_stop(caddr_t arg);
88 fct_status_t qlt_port_online(qlt_state_t *qlt);
89 fct_status_t qlt_port_offline(qlt_state_t *qlt);
90 static fct_status_t qlt_get_link_info(fct_local_port_t *port,
91 fct_link_info_t *li);
92 static void qlt_ctl(struct fct_local_port *port, int cmd, void *arg);
93 static fct_status_t qlt_force_lip(qlt_state_t *);
94 static fct_status_t qlt_do_flogi(struct fct_local_port *port,
95 fct_flogi_xchg_t *fx);
96 void qlt_handle_atio_queue_update(qlt_state_t *qlt);
97 void qlt_handle_resp_queue_update(qlt_state_t *qlt);
98 fct_status_t qlt_register_remote_port(fct_local_port_t *port,
99 fct_remote_port_t *rp, fct_cmd_t *login);
100 fct_status_t qlt_deregister_remote_port(fct_local_port_t *port,
101 fct_remote_port_t *rp);
102 fct_status_t qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags);
103 fct_status_t qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd);
104 fct_status_t qlt_send_abts_response(qlt_state_t *qlt,
105 fct_cmd_t *cmd, int terminate);
106 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
107 int qlt_set_uniq_flag(uint16_t *ptr, uint16_t setf, uint16_t abortf);
108 fct_status_t qlt_abort_cmd(struct fct_local_port *port,
109 fct_cmd_t *cmd, uint32_t flags);
110 fct_status_t qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
111 fct_status_t qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd);
112 fct_status_t qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
113 fct_status_t qlt_send_cmd(fct_cmd_t *cmd);
114 fct_status_t qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd);
115 fct_status_t qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd);
116 fct_status_t qlt_xfer_scsi_data(fct_cmd_t *cmd,
117 stmf_data_buf_t *dbuf, uint32_t ioflags);
118 fct_status_t qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd);
119 static void qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp);
120 static void qlt_release_intr(qlt_state_t *qlt);
121 static int qlt_setup_interrupts(qlt_state_t *qlt);
122 static void qlt_destroy_mutex(qlt_state_t *qlt);
123
124 static fct_status_t qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr,
125 uint32_t words);
126 static int qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries,
127 caddr_t buf, uint_t size_left);
128 static int qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
129 caddr_t buf, uint_t size_left);
130 static int qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr,
131 int count, uint_t size_left);
132 static int qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
133 cred_t *credp, int *rval);
134 static int qlt_open(dev_t *devp, int flag, int otype, cred_t *credp);
135 static int qlt_close(dev_t dev, int flag, int otype, cred_t *credp);
136
137 static int qlt_setup_msi(qlt_state_t *qlt);
138 static int qlt_setup_msix(qlt_state_t *qlt);
139
140 static int qlt_el_trace_desc_ctor(qlt_state_t *qlt);
141 static int qlt_el_trace_desc_dtor(qlt_state_t *qlt);
142 static int qlt_validate_trace_desc(qlt_state_t *qlt);
143 static char *qlt_find_trace_start(qlt_state_t *qlt);
144
145 static int qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval);
146 static int qlt_read_string_prop(qlt_state_t *qlt, char *prop, char **prop_val);
147 static int qlt_read_string_instance_prop(qlt_state_t *qlt, char *prop,
148 char **prop_val);
149 static int qlt_read_int_instance_prop(qlt_state_t *, char *, int);
150 static int qlt_convert_string_to_ull(char *prop, int radix,
151 u_longlong_t *result);
152 static boolean_t qlt_wwn_overload_prop(qlt_state_t *qlt);
153 static int qlt_quiesce(dev_info_t *dip);
154 static fct_status_t qlt_raw_wrt_risc_ram_word(qlt_state_t *qlt, uint32_t,
155 uint32_t);
156 static fct_status_t qlt_raw_rd_risc_ram_word(qlt_state_t *qlt, uint32_t,
157 uint32_t *);
158 static void qlt_mps_reset(qlt_state_t *qlt);
159 static void qlt_properties(qlt_state_t *qlt);
160
161
162 #define SETELSBIT(bmp, els) (bmp)[((els) >> 3) & 0x1F] = \
163 (uint8_t)((bmp)[((els) >> 3) & 0x1F] | ((uint8_t)1) << ((els) & 7))
164
165 int qlt_enable_msix = 0;
166 int qlt_enable_msi = 1;
167
168
169 string_table_t prop_status_tbl[] = DDI_PROP_STATUS();
170
171 /* Array to quickly calculate next free buf index to use */
172 #if 0
173 static int qlt_nfb[] = { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
174 #endif
175
176 static struct cb_ops qlt_cb_ops = {
177 qlt_open,
178 qlt_close,
179 nodev,
180 nodev,
181 nodev,
182 nodev,
183 nodev,
184 qlt_ioctl,
185 nodev,
189 ddi_prop_op,
190 0,
191 D_MP | D_NEW
192 };
193
194 static struct dev_ops qlt_ops = {
195 DEVO_REV,
196 0,
197 nodev,
198 nulldev,
199 nulldev,
200 qlt_attach,
201 qlt_detach,
202 nodev,
203 &qlt_cb_ops,
204 NULL,
205 ddi_power,
206 qlt_quiesce
207 };
208
209 #ifndef PORT_SPEED_10G
210 #define PORT_SPEED_10G 16
211 #endif
212
213 static struct modldrv modldrv = {
214 &mod_driverops,
215 QLT_NAME" "QLT_VERSION,
216 &qlt_ops,
217 };
218
219 static struct modlinkage modlinkage = {
220 MODREV_1, &modldrv, NULL
221 };
222
223 void *qlt_state = NULL;
224 kmutex_t qlt_global_lock;
225 static uint32_t qlt_loaded_counter = 0;
226
227 static char *pci_speeds[] = { " 33", "-X Mode 1 66", "-X Mode 1 100",
228 "-X Mode 1 133", "--Invalid--",
229 "-X Mode 2 66", "-X Mode 2 100",
230 "-X Mode 2 133", " 66" };
231
232 /* Always use 64 bit DMA. */
233 static ddi_dma_attr_t qlt_queue_dma_attr = {
234 DMA_ATTR_V0, /* dma_attr_version */
235 0, /* low DMA address range */
236 0xffffffffffffffff, /* high DMA address range */
237 0xffffffff, /* DMA counter register */
238 64, /* DMA address alignment */
239 0xff, /* DMA burstsizes */
240 1, /* min effective DMA size */
241 0xffffffff, /* max DMA xfer size */
242 0xffffffff, /* segment boundary */
243 1, /* s/g list length */
244 1, /* granularity of device */
245 0 /* DMA transfer flags */
246 };
247
248 /* qlogic logging */
249 int enable_extended_logging = 0;
250
251 static char qlt_provider_name[] = "qlt";
252 static struct stmf_port_provider *qlt_pp;
253
254 int
255 _init(void)
256 {
257 int ret;
258
259 ret = ddi_soft_state_init(&qlt_state, sizeof (qlt_state_t), 0);
260 if (ret == 0) {
261 mutex_init(&qlt_global_lock, 0, MUTEX_DRIVER, 0);
262 qlt_pp = (stmf_port_provider_t *)stmf_alloc(
263 STMF_STRUCT_PORT_PROVIDER, 0, 0);
264 qlt_pp->pp_portif_rev = PORTIF_REV_1;
265 qlt_pp->pp_name = qlt_provider_name;
266 if (stmf_register_port_provider(qlt_pp) != STMF_SUCCESS) {
267 stmf_free(qlt_pp);
268 mutex_destroy(&qlt_global_lock);
269 ddi_soft_state_fini(&qlt_state);
270 return (EIO);
286 int ret;
287
288 if (qlt_loaded_counter)
289 return (EBUSY);
290 ret = mod_remove(&modlinkage);
291 if (ret == 0) {
292 (void) stmf_deregister_port_provider(qlt_pp);
293 stmf_free(qlt_pp);
294 mutex_destroy(&qlt_global_lock);
295 ddi_soft_state_fini(&qlt_state);
296 }
297 return (ret);
298 }
299
300 int
301 _info(struct modinfo *modinfop)
302 {
303 return (mod_info(&modlinkage, modinfop));
304 }
305
306
307 static int
308 qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
309 {
310 int instance;
311 qlt_state_t *qlt;
312 ddi_device_acc_attr_t dev_acc_attr;
313 uint16_t did;
314 uint16_t val;
315 uint16_t mr;
316 size_t discard;
317 uint_t ncookies;
318 int max_read_size;
319 int max_payload_size;
320 fct_status_t ret;
321
322 /* No support for suspend resume yet */
323 if (cmd != DDI_ATTACH)
324 return (DDI_FAILURE);
325 instance = ddi_get_instance(dip);
326
327 if (ddi_soft_state_zalloc(qlt_state, instance) != DDI_SUCCESS) {
328 return (DDI_FAILURE);
329 }
330
331 if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
332 NULL) {
333 goto attach_fail_1;
334 }
335
336 qlt->instance = instance;
337
338 qlt->nvram = (qlt_nvram_t *)kmem_zalloc(sizeof (qlt_nvram_t), KM_SLEEP);
339 qlt->dip = dip;
340
341 if (qlt_el_trace_desc_ctor(qlt) != DDI_SUCCESS) {
342 cmn_err(CE_WARN, "qlt(%d): can't setup el tracing", instance);
343 goto attach_fail_1;
344 }
345
346 EL(qlt, "instance=%d, ptr=%p\n", instance, (void *)qlt);
347
348 if (pci_config_setup(dip, &qlt->pcicfg_acc_handle) != DDI_SUCCESS) {
349 goto attach_fail_2;
350 }
351 did = PCICFG_RD16(qlt, PCI_CONF_DEVID);
352 if ((did != 0x2422) && (did != 0x2432) &&
353 (did != 0x8432) && (did != 0x2532) &&
354 (did != 0x8001)) {
355 cmn_err(CE_WARN, "qlt(%d): unknown devid(%x), failing attach",
356 instance, did);
357 goto attach_fail_4;
358 }
359
360 if ((did & 0xFF00) == 0x8000)
361 qlt->qlt_81xx_chip = 1;
362 else if ((did & 0xFF00) == 0x2500)
363 qlt->qlt_25xx_chip = 1;
364
365 dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
366 dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
367 dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
368 if (ddi_regs_map_setup(dip, 2, &qlt->regs, 0, 0x100,
369 &dev_acc_attr, &qlt->regs_acc_handle) != DDI_SUCCESS) {
370 goto attach_fail_4;
371 }
372 if (did == 0x2422) {
373 uint32_t pci_bits = REG_RD32(qlt, REG_CTRL_STATUS);
374 uint32_t slot = pci_bits & PCI_64_BIT_SLOT;
375 pci_bits >>= 8;
376 pci_bits &= 0xf;
377 if ((pci_bits == 3) || (pci_bits == 7)) {
378 cmn_err(CE_NOTE,
379 "!qlt(%d): HBA running at PCI%sMHz (%d)",
380 instance, pci_speeds[pci_bits], pci_bits);
381 } else {
382 cmn_err(CE_WARN,
383 "qlt(%d): HBA running at PCI%sMHz %s(%d)",
384 instance, (pci_bits <= 8) ? pci_speeds[pci_bits] :
385 "(Invalid)", ((pci_bits == 0) ||
386 (pci_bits == 8)) ? (slot ? "64 bit slot " :
387 "32 bit slot ") : "", pci_bits);
388 }
389 }
390 if ((ret = qlt_read_nvram(qlt)) != QLT_SUCCESS) {
391 cmn_err(CE_WARN, "qlt(%d): read nvram failure %llx", instance,
392 (unsigned long long)ret);
393 goto attach_fail_5;
394 }
395
396 qlt_properties(qlt);
397
398 if (ddi_dma_alloc_handle(dip, &qlt_queue_dma_attr, DDI_DMA_SLEEP,
399 0, &qlt->queue_mem_dma_handle) != DDI_SUCCESS) {
400 goto attach_fail_5;
401 }
402 if (ddi_dma_mem_alloc(qlt->queue_mem_dma_handle, TOTAL_DMA_MEM_SIZE,
403 &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
404 &qlt->queue_mem_ptr, &discard, &qlt->queue_mem_acc_handle) !=
405 DDI_SUCCESS) {
406 goto attach_fail_6;
407 }
408 if (ddi_dma_addr_bind_handle(qlt->queue_mem_dma_handle, NULL,
409 qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE,
410 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
411 &qlt->queue_mem_cookie, &ncookies) != DDI_SUCCESS) {
412 goto attach_fail_7;
413 }
414 if (ncookies != 1)
415 goto attach_fail_8;
416 qlt->req_ptr = qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET;
417 qlt->resp_ptr = qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET;
418 qlt->preq_ptr = qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET;
419 qlt->atio_ptr = qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET;
420
421 /* mutex are inited in this function */
422 if (qlt_setup_interrupts(qlt) != DDI_SUCCESS)
423 goto attach_fail_8;
424
425 (void) snprintf(qlt->qlt_minor_name, sizeof (qlt->qlt_minor_name),
426 "qlt%d", instance);
427 (void) snprintf(qlt->qlt_port_alias, sizeof (qlt->qlt_port_alias),
428 "%s,0", qlt->qlt_minor_name);
429
430 if (ddi_create_minor_node(dip, qlt->qlt_minor_name, S_IFCHR,
431 instance, DDI_NT_STMF_PP, 0) != DDI_SUCCESS) {
432 goto attach_fail_9;
433 }
434
435 cv_init(&qlt->rp_dereg_cv, NULL, CV_DRIVER, NULL);
436 cv_init(&qlt->mbox_cv, NULL, CV_DRIVER, NULL);
437 mutex_init(&qlt->qlt_ioctl_lock, NULL, MUTEX_DRIVER, NULL);
438
439 /* Setup PCI cfg space registers */
440 max_read_size = qlt_read_int_prop(qlt, "pci-max-read-request", 11);
441 if (max_read_size == 11)
442 goto over_max_read_xfer_setting;
443 if (did == 0x2422) {
444 if (max_read_size == 512)
445 val = 0;
446 else if (max_read_size == 1024)
447 val = 1;
448 else if (max_read_size == 2048)
449 val = 2;
450 else if (max_read_size == 4096)
451 val = 3;
452 else {
453 cmn_err(CE_WARN, "qlt(%d) malformed "
454 "pci-max-read-request in qlt.conf. Valid values "
455 "for this HBA are 512/1024/2048/4096", instance);
456 goto over_max_read_xfer_setting;
457 }
458 mr = (uint16_t)PCICFG_RD16(qlt, 0x4E);
459 mr = (uint16_t)(mr & 0xfff3);
460 mr = (uint16_t)(mr | (val << 2));
461 PCICFG_WR16(qlt, 0x4E, mr);
462 } else if ((did == 0x2432) || (did == 0x8432) ||
463 (did == 0x2532) || (did == 0x8001)) {
464 if (max_read_size == 128)
465 val = 0;
466 else if (max_read_size == 256)
467 val = 1;
468 else if (max_read_size == 512)
469 val = 2;
470 else if (max_read_size == 1024)
471 val = 3;
472 else if (max_read_size == 2048)
473 val = 4;
474 else if (max_read_size == 4096)
475 val = 5;
476 else {
477 cmn_err(CE_WARN, "qlt(%d) malformed "
478 "pci-max-read-request in qlt.conf. Valid values "
479 "for this HBA are 128/256/512/1024/2048/4096",
480 instance);
481 goto over_max_read_xfer_setting;
482 }
483 mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
484 mr = (uint16_t)(mr & 0x8fff);
485 mr = (uint16_t)(mr | (val << 12));
486 PCICFG_WR16(qlt, 0x54, mr);
487 } else {
488 cmn_err(CE_WARN, "qlt(%d): dont know how to set "
489 "pci-max-read-request for this device (%x)",
490 instance, did);
491 }
492 over_max_read_xfer_setting:;
493
494 max_payload_size = qlt_read_int_prop(qlt, "pcie-max-payload-size", 11);
495 if (max_payload_size == 11)
496 goto over_max_payload_setting;
497 if ((did == 0x2432) || (did == 0x8432) ||
498 (did == 0x2532) || (did == 0x8001)) {
499 if (max_payload_size == 128)
500 val = 0;
501 else if (max_payload_size == 256)
502 val = 1;
503 else if (max_payload_size == 512)
504 val = 2;
505 else if (max_payload_size == 1024)
506 val = 3;
507 else {
508 cmn_err(CE_WARN, "qlt(%d) malformed "
509 "pcie-max-payload-size in qlt.conf. Valid values "
510 "for this HBA are 128/256/512/1024",
511 instance);
512 goto over_max_payload_setting;
513 }
514 mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
515 mr = (uint16_t)(mr & 0xff1f);
516 mr = (uint16_t)(mr | (val << 5));
517 PCICFG_WR16(qlt, 0x54, mr);
518 } else {
519 cmn_err(CE_WARN, "qlt(%d): dont know how to set "
520 "pcie-max-payload-size for this device (%x)",
521 instance, did);
522 }
523
524 over_max_payload_setting:;
525
526 qlt_enable_intr(qlt);
527
528 if (qlt_port_start((caddr_t)qlt) != QLT_SUCCESS)
529 goto attach_fail_10;
530
531 ddi_report_dev(dip);
532 return (DDI_SUCCESS);
533
534 attach_fail_10:;
535 mutex_destroy(&qlt->qlt_ioctl_lock);
536 cv_destroy(&qlt->mbox_cv);
537 cv_destroy(&qlt->rp_dereg_cv);
538 ddi_remove_minor_node(dip, qlt->qlt_minor_name);
539 attach_fail_9:;
540 qlt_destroy_mutex(qlt);
541 qlt_release_intr(qlt);
542 attach_fail_8:;
543 (void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
544 attach_fail_7:;
545 ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
546 attach_fail_6:;
547 ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
548 attach_fail_5:;
549 ddi_regs_map_free(&qlt->regs_acc_handle);
550 attach_fail_4:;
551 pci_config_teardown(&qlt->pcicfg_acc_handle);
552 kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
553 (void) qlt_el_trace_desc_dtor(qlt);
554 attach_fail_2:;
555 attach_fail_1:;
556 ddi_soft_state_free(qlt_state, instance);
557 return (DDI_FAILURE);
558 }
559
560 #define FCT_I_EVENT_BRING_PORT_OFFLINE 0x83
561
562 /* ARGSUSED */
563 static int
564 qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
565 {
566 qlt_state_t *qlt;
567
568 int instance;
569
570 instance = ddi_get_instance(dip);
571 if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
572 NULL) {
573 return (DDI_FAILURE);
574 }
575
576 if (qlt->fw_code01) {
577 return (DDI_FAILURE);
578 }
579
580 if ((qlt->qlt_state != FCT_STATE_OFFLINE) ||
581 qlt->qlt_state_not_acked) {
582 return (DDI_FAILURE);
583 }
584 if (qlt_port_stop((caddr_t)qlt) != FCT_SUCCESS) {
585 return (DDI_FAILURE);
586 }
587
588 qlt_disable_intr(qlt);
589
590 ddi_remove_minor_node(dip, qlt->qlt_minor_name);
591 qlt_destroy_mutex(qlt);
592 qlt_release_intr(qlt);
593 (void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
594 ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
595 ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
596 ddi_regs_map_free(&qlt->regs_acc_handle);
597 pci_config_teardown(&qlt->pcicfg_acc_handle);
598 kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
599 cv_destroy(&qlt->mbox_cv);
600 cv_destroy(&qlt->rp_dereg_cv);
601 (void) qlt_el_trace_desc_dtor(qlt);
602 ddi_soft_state_free(qlt_state, instance);
603
604 return (DDI_SUCCESS);
605 }
606
607 /*
608 * qlt_quiesce quiesce a device attached to the system.
609 */
610 static int
611 qlt_quiesce(dev_info_t *dip)
612 {
613 qlt_state_t *qlt;
614 uint32_t timer;
615 uint32_t stat;
616
617 qlt = ddi_get_soft_state(qlt_state, ddi_get_instance(dip));
618 if (qlt == NULL) {
619 /* Oh well.... */
620 return (DDI_SUCCESS);
621 }
622
623 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_HOST_TO_RISC_INTR));
624 REG_WR16(qlt, REG_MBOX0, MBC_STOP_FIRMWARE);
625 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
626 for (timer = 0; timer < 30000; timer++) {
627 stat = REG_RD32(qlt, REG_RISC_STATUS);
628 if (stat & RISC_HOST_INTR_REQUEST) {
629 if ((stat & FW_INTR_STATUS_MASK) < 0x12) {
630 REG_WR32(qlt, REG_HCCR,
631 HCCR_CMD(CLEAR_RISC_PAUSE));
632 break;
633 }
634 REG_WR32(qlt, REG_HCCR,
635 HCCR_CMD(CLEAR_HOST_TO_RISC_INTR));
636 }
637 drv_usecwait(100);
638 }
639 /* Reset the chip. */
640 REG_WR32(qlt, REG_CTRL_STATUS, CHIP_SOFT_RESET | DMA_SHUTDOWN_CTRL |
641 PCI_X_XFER_CTRL);
642 drv_usecwait(100);
643
644 qlt_disable_intr(qlt);
645
646 return (DDI_SUCCESS);
647 }
648
649 static void
650 qlt_enable_intr(qlt_state_t *qlt)
651 {
652 if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
653 (void) ddi_intr_block_enable(qlt->htable, qlt->intr_cnt);
654 } else {
655 int i;
656 for (i = 0; i < qlt->intr_cnt; i++)
657 (void) ddi_intr_enable(qlt->htable[i]);
658 }
659 qlt->qlt_intr_enabled = 1;
660 }
661
662 static void
663 qlt_disable_intr(qlt_state_t *qlt)
664 {
665 if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
666 (void) ddi_intr_block_disable(qlt->htable, qlt->intr_cnt);
667 } else {
668 int i;
669 for (i = 0; i < qlt->intr_cnt; i++)
670 (void) ddi_intr_disable(qlt->htable[i]);
671 }
672 qlt->qlt_intr_enabled = 0;
673 }
674
675 static void
676 qlt_release_intr(qlt_state_t *qlt)
677 {
678 if (qlt->htable) {
679 int i;
680 for (i = 0; i < qlt->intr_cnt; i++) {
681 (void) ddi_intr_remove_handler(qlt->htable[i]);
682 (void) ddi_intr_free(qlt->htable[i]);
683 }
684 kmem_free(qlt->htable, (uint_t)qlt->intr_size);
685 }
686 qlt->htable = NULL;
687 qlt->intr_pri = 0;
688 qlt->intr_cnt = 0;
689 qlt->intr_size = 0;
690 qlt->intr_cap = 0;
691 }
692
693
694 static void
695 qlt_init_mutex(qlt_state_t *qlt)
696 {
697 mutex_init(&qlt->req_lock, 0, MUTEX_DRIVER,
698 INT2PTR(qlt->intr_pri, void *));
699 mutex_init(&qlt->preq_lock, 0, MUTEX_DRIVER,
700 INT2PTR(qlt->intr_pri, void *));
701 mutex_init(&qlt->mbox_lock, NULL, MUTEX_DRIVER,
702 INT2PTR(qlt->intr_pri, void *));
703 mutex_init(&qlt->intr_lock, NULL, MUTEX_DRIVER,
704 INT2PTR(qlt->intr_pri, void *));
705 }
706
707 static void
708 qlt_destroy_mutex(qlt_state_t *qlt)
709 {
710 mutex_destroy(&qlt->req_lock);
711 mutex_destroy(&qlt->preq_lock);
712 mutex_destroy(&qlt->mbox_lock);
713 mutex_destroy(&qlt->intr_lock);
714 }
715
716
717 static int
718 qlt_setup_msix(qlt_state_t *qlt)
719 {
720 int count, avail, actual;
721 int ret;
722 int itype = DDI_INTR_TYPE_MSIX;
723 int i;
724
725 ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
726 if (ret != DDI_SUCCESS || count == 0) {
727 EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
728 count);
729 return (DDI_FAILURE);
730 }
731 ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
732 if (ret != DDI_SUCCESS || avail == 0) {
733 EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
734 avail);
735 return (DDI_FAILURE);
736 }
737 if (avail < count) {
738 stmf_trace(qlt->qlt_port_alias,
739 "qlt_setup_msix: nintrs=%d,avail=%d", count, avail);
740 }
741
742 qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
743 qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
744 ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
745 DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
746 /* we need at least 2 interrupt vectors */
747 if (ret != DDI_SUCCESS || actual < 2) {
748 EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
749 actual);
750 ret = DDI_FAILURE;
751 goto release_intr;
752 }
753 if (actual < count) {
754 EL(qlt, "requested: %d, received: %d\n", count, actual);
755 }
756
757 qlt->intr_cnt = actual;
758 ret = ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
759 if (ret != DDI_SUCCESS) {
760 EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
761 ret = DDI_FAILURE;
762 goto release_intr;
763 }
764 qlt_init_mutex(qlt);
765 for (i = 0; i < actual; i++) {
766 ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
767 qlt, INT2PTR((uint_t)i, void *));
768 if (ret != DDI_SUCCESS) {
769 EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
770 goto release_mutex;
771 }
772 }
773
774 (void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
775 qlt->intr_flags |= QLT_INTR_MSIX;
776 return (DDI_SUCCESS);
777
778 release_mutex:
779 qlt_destroy_mutex(qlt);
780 release_intr:
781 for (i = 0; i < actual; i++)
782 (void) ddi_intr_free(qlt->htable[i]);
783 #if 0
784 free_mem:
785 #endif
786 kmem_free(qlt->htable, (uint_t)qlt->intr_size);
787 qlt->htable = NULL;
788 qlt_release_intr(qlt);
789 return (ret);
790 }
791
792
793 static int
794 qlt_setup_msi(qlt_state_t *qlt)
795 {
796 int count, avail, actual;
797 int itype = DDI_INTR_TYPE_MSI;
798 int ret;
799 int i;
800
801 /* get the # of interrupts */
802 ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
803 if (ret != DDI_SUCCESS || count == 0) {
804 EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
805 count);
806 return (DDI_FAILURE);
807 }
808 ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
809 if (ret != DDI_SUCCESS || avail == 0) {
810 EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
811 avail);
812 return (DDI_FAILURE);
813 }
814 if (avail < count) {
815 EL(qlt, "nintrs=%d, avail=%d\n", count, avail);
816 }
817 /* MSI requires only 1 interrupt. */
818 count = 1;
819
820 /* allocate interrupt */
917 release_mutex:
918 qlt_destroy_mutex(qlt);
919 release_intr:
920 (void) ddi_intr_free(qlt->htable[0]);
921 free_mem:
922 kmem_free(qlt->htable, (uint_t)qlt->intr_size);
923 qlt->htable = NULL;
924 qlt_release_intr(qlt);
925 return (ret);
926 }
927
928 static int
929 qlt_setup_interrupts(qlt_state_t *qlt)
930 {
931 int itypes = 0;
932
933 /*
934 * x86 has a bug in the ddi_intr_block_enable/disable area (6562198).
935 */
936 #ifndef __sparc
937 if (qlt_enable_msi != 0) {
938 #endif
939 if (ddi_intr_get_supported_types(qlt->dip, &itypes) != DDI_SUCCESS) {
940 itypes = DDI_INTR_TYPE_FIXED;
941 }
942
943 if (qlt_enable_msix && (itypes & DDI_INTR_TYPE_MSIX)) {
944 if (qlt_setup_msix(qlt) == DDI_SUCCESS)
945 return (DDI_SUCCESS);
946 }
947
948 if (itypes & DDI_INTR_TYPE_MSI) {
949 if (qlt_setup_msi(qlt) == DDI_SUCCESS)
950 return (DDI_SUCCESS);
951 }
952 #ifndef __sparc
953 }
954 #endif
955 return (qlt_setup_fixed(qlt));
956 }
957
958 /*
959 * Filling the hba attributes
960 */
961 void
962 qlt_populate_hba_fru_details(struct fct_local_port *port,
963 struct fct_port_attrs *port_attrs)
964 {
965 caddr_t bufp;
966 int len;
967 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
968
969 (void) snprintf(port_attrs->manufacturer, FCHBA_MANUFACTURER_LEN,
970 "QLogic Corp.");
971 (void) snprintf(port_attrs->driver_name, FCHBA_DRIVER_NAME_LEN,
972 "%s", QLT_NAME);
973 (void) snprintf(port_attrs->driver_version, FCHBA_DRIVER_VERSION_LEN,
974 "%s", QLT_VERSION);
975 port_attrs->serial_number[0] = '\0';
976 port_attrs->hardware_version[0] = '\0';
977
978 (void) snprintf(port_attrs->firmware_version,
979 FCHBA_FIRMWARE_VERSION_LEN, "%d.%d.%d", qlt->fw_major,
980 qlt->fw_minor, qlt->fw_subminor);
981
982 /* Get FCode version */
983 if (ddi_getlongprop(DDI_DEV_T_ANY, qlt->dip, PROP_LEN_AND_VAL_ALLOC |
984 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
985 (int *)&len) == DDI_PROP_SUCCESS) {
986 (void) snprintf(port_attrs->option_rom_version,
987 FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
988 kmem_free(bufp, (uint_t)len);
989 bufp = NULL;
990 } else {
991 #ifdef __sparc
992 (void) snprintf(port_attrs->option_rom_version,
993 FCHBA_OPTION_ROM_VERSION_LEN, "No Fcode found");
994 #else
995 (void) snprintf(port_attrs->option_rom_version,
996 FCHBA_OPTION_ROM_VERSION_LEN, "N/A");
997 #endif
998 }
999 port_attrs->vendor_specific_id = qlt->nvram->subsystem_vendor_id[0] |
1000 qlt->nvram->subsystem_vendor_id[1] << 8;
1001
1002 port_attrs->max_frame_size = qlt->nvram->max_frame_length[1] << 8 |
1003 qlt->nvram->max_frame_length[0];
1004
1005 port_attrs->supported_cos = 0x10000000;
1006 port_attrs->supported_speed = PORT_SPEED_1G |
1007 PORT_SPEED_2G | PORT_SPEED_4G;
1008 if (qlt->qlt_25xx_chip)
1009 port_attrs->supported_speed = PORT_SPEED_2G | PORT_SPEED_4G |
1010 PORT_SPEED_8G;
1011 if (qlt->qlt_81xx_chip)
1012 port_attrs->supported_speed = PORT_SPEED_10G;
1013
1014 /* limit string length to nvr model_name length */
1015 len = (qlt->qlt_81xx_chip) ? 16 : 8;
1016 (void) snprintf(port_attrs->model,
1017 (uint_t)(len < FCHBA_MODEL_LEN ? len : FCHBA_MODEL_LEN),
1018 "%s", qlt->nvram->model_name);
1019
1020 (void) snprintf(port_attrs->model_description,
1021 (uint_t)(len < FCHBA_MODEL_DESCRIPTION_LEN ? len :
1022 FCHBA_MODEL_DESCRIPTION_LEN),
1023 "%s", qlt->nvram->model_name);
1024 }
1025
1026 /* ARGSUSED */
1027 fct_status_t
1028 qlt_info(uint32_t cmd, fct_local_port_t *port,
1029 void *arg, uint8_t *buf, uint32_t *bufsizep)
1030 {
1031 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
1032 mbox_cmd_t *mcp;
1033 fct_status_t ret = FCT_SUCCESS;
1034 uint8_t *p;
1035 fct_port_link_status_t *link_status;
1036
1037 switch (cmd) {
1038 case FC_TGT_PORT_RLS:
1039 if ((*bufsizep) < sizeof (fct_port_link_status_t)) {
1040 EL(qlt, "FC_TGT_PORT_RLS bufsizep=%xh < "
1041 "fct_port_link_status_t=%xh\n", *bufsizep,
1042 sizeof (fct_port_link_status_t));
1043 ret = FCT_FAILURE;
1044 break;
1045 }
1046 /* send mailbox command to get link status */
1047 mcp = qlt_alloc_mailbox_command(qlt, 156);
1048 if (mcp == NULL) {
1049 EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
1050 ret = FCT_ALLOC_FAILURE;
1051 break;
1052 }
1053
1054 /* GET LINK STATUS count */
1055 mcp->to_fw[0] = MBC_GET_STATUS_COUNTS;
1056 mcp->to_fw[8] = 156/4;
1057 mcp->to_fw_mask |= BIT_1 | BIT_8;
1058 mcp->from_fw_mask |= BIT_1 | BIT_2;
1059
1060 ret = qlt_mailbox_command(qlt, mcp);
1061 if (ret != QLT_SUCCESS) {
1062 EL(qlt, "qlt_mailbox_command=6dh status=%llxh\n", ret);
1063 qlt_free_mailbox_command(qlt, mcp);
1064 break;
1065 }
1066 qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1067
1068 p = mcp->dbuf->db_sglist[0].seg_addr;
1069 link_status = (fct_port_link_status_t *)buf;
1070 link_status->LinkFailureCount = LE_32(*((uint32_t *)p));
1071 link_status->LossOfSyncCount = LE_32(*((uint32_t *)(p + 4)));
1072 link_status->LossOfSignalsCount = LE_32(*((uint32_t *)(p + 8)));
1073 link_status->PrimitiveSeqProtocolErrorCount =
1074 LE_32(*((uint32_t *)(p + 12)));
1075 link_status->InvalidTransmissionWordCount =
1076 LE_32(*((uint32_t *)(p + 16)));
1077 link_status->InvalidCRCCount =
1078 LE_32(*((uint32_t *)(p + 20)));
1079
1080 qlt_free_mailbox_command(qlt, mcp);
1081 break;
1082 default:
1083 EL(qlt, "Unknown cmd=%xh\n", cmd);
1084 ret = FCT_FAILURE;
1085 break;
1086 }
1087 return (ret);
1088 }
1089
1090 fct_status_t
1091 qlt_port_start(caddr_t arg)
1092 {
1093 qlt_state_t *qlt = (qlt_state_t *)arg;
1094 fct_local_port_t *port;
1095 fct_dbuf_store_t *fds;
1096 fct_status_t ret;
1097
1098 if (qlt_dmem_init(qlt) != QLT_SUCCESS) {
1099 return (FCT_FAILURE);
1100 }
1101 /* Initialize the ddi_dma_handle free pool */
1102 qlt_dma_handle_pool_init(qlt);
1103
1104 port = (fct_local_port_t *)fct_alloc(FCT_STRUCT_LOCAL_PORT, 0, 0);
1105 if (port == NULL) {
1106 goto qlt_pstart_fail_1;
1107 }
1108 fds = (fct_dbuf_store_t *)fct_alloc(FCT_STRUCT_DBUF_STORE, 0, 0);
1109 if (fds == NULL) {
1110 goto qlt_pstart_fail_2;
1111 }
1112 qlt->qlt_port = port;
1113 fds->fds_alloc_data_buf = qlt_dmem_alloc;
1114 fds->fds_free_data_buf = qlt_dmem_free;
1115 fds->fds_setup_dbuf = qlt_dma_setup_dbuf;
1116 fds->fds_teardown_dbuf = qlt_dma_teardown_dbuf;
1117 fds->fds_max_sgl_xfer_len = QLT_DMA_SG_LIST_LENGTH * MMU_PAGESIZE;
1118 fds->fds_copy_threshold = MMU_PAGESIZE;
1119 fds->fds_fca_private = (void *)qlt;
1120 /*
1121 * Since we keep everything in the state struct and dont allocate any
1122 * port private area, just use that pointer to point to the
1123 * state struct.
1124 */
1125 port->port_fca_private = qlt;
1126 port->port_fca_abort_timeout = 5 * 1000; /* 5 seconds */
1127 bcopy(qlt->nvram->node_name, port->port_nwwn, 8);
1128 bcopy(qlt->nvram->port_name, port->port_pwwn, 8);
1129 fct_wwn_to_str(port->port_nwwn_str, port->port_nwwn);
1130 fct_wwn_to_str(port->port_pwwn_str, port->port_pwwn);
1131 port->port_default_alias = qlt->qlt_port_alias;
1132 port->port_pp = qlt_pp;
1133 port->port_fds = fds;
1134 port->port_max_logins = QLT_MAX_LOGINS;
1135 port->port_max_xchges = QLT_MAX_XCHGES;
1136 port->port_fca_fcp_cmd_size = sizeof (qlt_cmd_t);
1137 port->port_fca_rp_private_size = sizeof (qlt_remote_port_t);
1138 port->port_fca_sol_els_private_size = sizeof (qlt_cmd_t);
1139 port->port_fca_sol_ct_private_size = sizeof (qlt_cmd_t);
1140 port->port_get_link_info = qlt_get_link_info;
1141 port->port_register_remote_port = qlt_register_remote_port;
1142 port->port_deregister_remote_port = qlt_deregister_remote_port;
1143 port->port_send_cmd = qlt_send_cmd;
1144 port->port_xfer_scsi_data = qlt_xfer_scsi_data;
1145 port->port_send_cmd_response = qlt_send_cmd_response;
1146 port->port_abort_cmd = qlt_abort_cmd;
1147 port->port_ctl = qlt_ctl;
1148 port->port_flogi_xchg = qlt_do_flogi;
1149 port->port_populate_hba_details = qlt_populate_hba_fru_details;
1150 port->port_info = qlt_info;
1151 port->port_fca_version = FCT_FCA_MODREV_1;
1152
1153 if ((ret = fct_register_local_port(port)) != FCT_SUCCESS) {
1154 EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1155 goto qlt_pstart_fail_2_5;
1156 }
1157
1158 return (QLT_SUCCESS);
1159 #if 0
1160 qlt_pstart_fail_3:
1161 (void) fct_deregister_local_port(port);
1162 #endif
1163 qlt_pstart_fail_2_5:
1164 fct_free(fds);
1165 qlt_pstart_fail_2:
1166 fct_free(port);
1167 qlt->qlt_port = NULL;
1168 qlt_pstart_fail_1:
1169 qlt_dma_handle_pool_fini(qlt);
1170 qlt_dmem_fini(qlt);
1171 return (QLT_FAILURE);
1172 }
1173
1174 fct_status_t
1175 qlt_port_stop(caddr_t arg)
1176 {
1177 qlt_state_t *qlt = (qlt_state_t *)arg;
1178 fct_status_t ret;
1179
1180 if ((ret = fct_deregister_local_port(qlt->qlt_port)) != FCT_SUCCESS) {
1181 EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1182 return (QLT_FAILURE);
1183 }
1184 fct_free(qlt->qlt_port->port_fds);
1185 fct_free(qlt->qlt_port);
1186 qlt->qlt_port = NULL;
1187 qlt_dma_handle_pool_fini(qlt);
1188 qlt_dmem_fini(qlt);
1189 return (QLT_SUCCESS);
1190 }
1191
1192 /*
1193 * Called by framework to init the HBA.
1194 * Can be called in the middle of I/O. (Why ??)
1195 * Should make sure sane state both before and after the initialization
1196 */
1197 fct_status_t
1198 qlt_port_online(qlt_state_t *qlt)
1199 {
1200 uint64_t da;
1201 int instance, i;
1202 fct_status_t ret;
1203 uint16_t rcount;
1204 caddr_t icb;
1205 mbox_cmd_t *mcp;
1206 uint8_t *elsbmp;
1207
1208 instance = ddi_get_instance(qlt->dip);
1209
1210 /* XXX Make sure a sane state */
1211
1212 if ((ret = qlt_download_fw(qlt)) != QLT_SUCCESS) {
1213 cmn_err(CE_NOTE, "reset chip failed %llx", (long long)ret);
1214 return (ret);
1215 }
1216
1217 bzero(qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE);
1218
1219 /* Get resource count */
1220 REG_WR16(qlt, REG_MBOX(0), MBC_GET_RESOURCE_COUNTS);
1221 ret = qlt_raw_mailbox_command(qlt);
1222 rcount = REG_RD16(qlt, REG_MBOX(3));
1223 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1224 if (ret != QLT_SUCCESS) {
1225 EL(qlt, "qlt_raw_mailbox_command=42h status=%llxh\n", ret);
1226 return (ret);
1227 }
1228
1229 /* Enable PUREX */
1230 REG_WR16(qlt, REG_MBOX(0), MBC_SET_ADDITIONAL_FIRMWARE_OPT);
1231 REG_WR16(qlt, REG_MBOX(1), OPT_PUREX_ENABLE);
1232 REG_WR16(qlt, REG_MBOX(2), 0x0);
1233 REG_WR16(qlt, REG_MBOX(3), 0x0);
1236 if (ret != QLT_SUCCESS) {
1237 EL(qlt, "qlt_raw_mailbox_command=38h status=%llxh\n", ret);
1238 cmn_err(CE_NOTE, "Enable PUREX failed");
1239 return (ret);
1240 }
1241
1242 /* Pass ELS bitmap to fw */
1243 REG_WR16(qlt, REG_MBOX(0), MBC_SET_PARAMETERS);
1244 REG_WR16(qlt, REG_MBOX(1), PARAM_TYPE(PUREX_ELS_CMDS));
1245 elsbmp = (uint8_t *)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET;
1246 bzero(elsbmp, 32);
1247 da = qlt->queue_mem_cookie.dmac_laddress;
1248 da += MBOX_DMA_MEM_OFFSET;
1249 REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
1250 REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
1251 REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
1252 REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
1253 SETELSBIT(elsbmp, ELS_OP_PLOGI);
1254 SETELSBIT(elsbmp, ELS_OP_LOGO);
1255 SETELSBIT(elsbmp, ELS_OP_ABTX);
1256 SETELSBIT(elsbmp, ELS_OP_ECHO);
1257 SETELSBIT(elsbmp, ELS_OP_PRLI);
1258 SETELSBIT(elsbmp, ELS_OP_PRLO);
1259 SETELSBIT(elsbmp, ELS_OP_SCN);
1260 SETELSBIT(elsbmp, ELS_OP_TPRLO);
1261 SETELSBIT(elsbmp, ELS_OP_PDISC);
1262 SETELSBIT(elsbmp, ELS_OP_ADISC);
1263 SETELSBIT(elsbmp, ELS_OP_RSCN);
1264 SETELSBIT(elsbmp, ELS_OP_RNID);
1265 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, MBOX_DMA_MEM_OFFSET, 32,
1266 DDI_DMA_SYNC_FORDEV);
1267 ret = qlt_raw_mailbox_command(qlt);
1268 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1269 if (ret != QLT_SUCCESS) {
1270 EL(qlt, "qlt_raw_mailbox_command=59h status=llxh\n", ret);
1271 cmn_err(CE_NOTE, "Set ELS Bitmap failed ret=%llx, "
1272 "elsbmp0=%x elabmp1=%x", (long long)ret, elsbmp[0],
1273 elsbmp[1]);
1274 return (ret);
1275 }
1276
1277 /* Init queue pointers */
1278 REG_WR32(qlt, REG_REQ_IN_PTR, 0);
1279 REG_WR32(qlt, REG_REQ_OUT_PTR, 0);
1280 REG_WR32(qlt, REG_RESP_IN_PTR, 0);
1281 REG_WR32(qlt, REG_RESP_OUT_PTR, 0);
1282 REG_WR32(qlt, REG_PREQ_IN_PTR, 0);
1283 REG_WR32(qlt, REG_PREQ_OUT_PTR, 0);
1284 REG_WR32(qlt, REG_ATIO_IN_PTR, 0);
1285 REG_WR32(qlt, REG_ATIO_OUT_PTR, 0);
1286 qlt->req_ndx_to_fw = qlt->req_ndx_from_fw = 0;
1287 qlt->req_available = REQUEST_QUEUE_ENTRIES - 1;
1288 qlt->resp_ndx_to_fw = qlt->resp_ndx_from_fw = 0;
1289 qlt->preq_ndx_to_fw = qlt->preq_ndx_from_fw = 0;
1290 qlt->atio_ndx_to_fw = qlt->atio_ndx_from_fw = 0;
1291
1292 /*
1293 * XXX support for tunables. Also should we cache icb ?
1294 */
1295 if (qlt->qlt_81xx_chip) {
1296 /* allocate extra 64 bytes for Extended init control block */
1297 mcp = qlt_alloc_mailbox_command(qlt, 0xC0);
1298 } else {
1299 mcp = qlt_alloc_mailbox_command(qlt, 0x80);
1300 }
1301 if (mcp == NULL) {
1302 EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
1303 return (STMF_ALLOC_FAILURE);
1304 }
1305 icb = (caddr_t)mcp->dbuf->db_sglist[0].seg_addr;
1306 if (qlt->qlt_81xx_chip) {
1307 bzero(icb, 0xC0);
1308 } else {
1309 bzero(icb, 0x80);
1310 }
1311 da = qlt->queue_mem_cookie.dmac_laddress;
1312 DMEM_WR16(qlt, icb, 1); /* Version */
1313 DMEM_WR16(qlt, icb+4, 2112); /* Max frame length */
1314 DMEM_WR16(qlt, icb+6, 16); /* Execution throttle */
1315 DMEM_WR16(qlt, icb+8, rcount); /* Xchg count */
1316 DMEM_WR16(qlt, icb+0x0a, 0x00); /* Hard address (not used) */
1317 bcopy(qlt->qlt_port->port_pwwn, icb+0x0c, 8);
1318 bcopy(qlt->qlt_port->port_nwwn, icb+0x14, 8);
1319 DMEM_WR16(qlt, icb+0x20, 3); /* Login retry count */
1320 DMEM_WR16(qlt, icb+0x24, RESPONSE_QUEUE_ENTRIES);
1321 DMEM_WR16(qlt, icb+0x26, REQUEST_QUEUE_ENTRIES);
1322 if (!qlt->qlt_81xx_chip) {
1323 DMEM_WR16(qlt, icb+0x28, 100); /* ms of NOS/OLS for Link down */
1324 }
1325 DMEM_WR16(qlt, icb+0x2a, PRIORITY_QUEUE_ENTRIES);
1326 DMEM_WR64(qlt, icb+0x2c, (da+REQUEST_QUEUE_OFFSET));
1327 DMEM_WR64(qlt, icb+0x34, (da+RESPONSE_QUEUE_OFFSET));
1328 DMEM_WR64(qlt, icb+0x3c, (da+PRIORITY_QUEUE_OFFSET));
1329 DMEM_WR16(qlt, icb+0x4e, ATIO_QUEUE_ENTRIES);
1330 DMEM_WR64(qlt, icb+0x50, (da+ATIO_QUEUE_OFFSET));
1331 DMEM_WR16(qlt, icb+0x58, 2); /* Interrupt delay Timer */
1332 DMEM_WR16(qlt, icb+0x5a, 4); /* Login timeout (secs) */
1333 if (qlt->qlt_81xx_chip) {
1334 qlt_nvram_81xx_t *qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
1335
1336 DMEM_WR32(qlt, icb+0x5c, BIT_5 | BIT_4); /* fw options 1 */
1337 DMEM_WR32(qlt, icb+0x64, BIT_20 | BIT_4); /* fw options 3 */
1338 DMEM_WR32(qlt, icb+0x70,
1339 qlt81nvr->enode_mac[0] |
1340 (qlt81nvr->enode_mac[1] << 8) |
1341 (qlt81nvr->enode_mac[2] << 16) |
1342 (qlt81nvr->enode_mac[3] << 24));
1343 DMEM_WR16(qlt, icb+0x74,
1344 qlt81nvr->enode_mac[4] |
1345 (qlt81nvr->enode_mac[5] << 8));
1346 } else {
1347 DMEM_WR32(qlt, icb+0x5c, BIT_11 | BIT_5 | BIT_4 |
1348 BIT_2 | BIT_1 | BIT_0);
1349 DMEM_WR32(qlt, icb+0x60, BIT_5);
1350 DMEM_WR32(qlt, icb+0x64, BIT_14 | BIT_8 | BIT_7 |
1351 BIT_4);
1352 }
1353
1354 if (qlt->qlt_81xx_chip) {
1355 qlt_dmem_bctl_t *bctl;
1356 uint32_t index;
1357 caddr_t src;
1358 caddr_t dst;
1359 qlt_nvram_81xx_t *qlt81nvr;
1360
1361 dst = icb+0x80;
1362 qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
1363 src = (caddr_t)&qlt81nvr->ext_blk;
1364 index = sizeof (qlt_ext_icb_81xx_t);
1365
1366 /* Use defaults for cases where we find nothing in NVR */
1367 if (*src == 0) {
1368 EL(qlt, "nvram eicb=null\n");
1369 cmn_err(CE_NOTE, "qlt(%d) NVR eicb is zeroed",
1370 instance);
1371 qlt81nvr->ext_blk.version[0] = 1;
1372 /*
1373 * not yet, for !FIP firmware at least
1374 *
1375 * qlt81nvr->ext_blk.fcf_vlan_match = 0x81;
1376 */
1377 #ifdef _LITTLE_ENDIAN
1378 qlt81nvr->ext_blk.fcf_vlan_id[0] = 0xEA;
1379 qlt81nvr->ext_blk.fcf_vlan_id[1] = 0x03;
1380 #else
1381 qlt81nvr->ext_blk.fcf_vlan_id[1] = 0xEA;
1382 qlt81nvr->ext_blk.fcf_vlan_id[0] = 0x03;
1383 #endif
1384 }
1385
1386 while (index--) {
1387 *dst++ = *src++;
1388 }
1389
1390 bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
1391 da = bctl->bctl_dev_addr + 0x80; /* base addr of eicb (phys) */
1392
1393 mcp->to_fw[11] = LSW(LSD(da));
1394 mcp->to_fw[10] = MSW(LSD(da));
1395 mcp->to_fw[13] = LSW(MSD(da));
1396 mcp->to_fw[12] = MSW(MSD(da));
1397 mcp->to_fw[14] = (uint16_t)(sizeof (qlt_ext_icb_81xx_t) &
1398 0xffff);
1399
1400 /* eicb enable */
1401 mcp->to_fw[1] = (uint16_t)(mcp->to_fw[1] | BIT_0);
1402 mcp->to_fw_mask |= BIT_14 | BIT_13 | BIT_12 | BIT_11 | BIT_10 |
1403 BIT_1;
1404 }
1405
1406 qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORDEV);
1407 mcp->to_fw[0] = MBC_INITIALIZE_FIRMWARE;
1408
1409 /*
1410 * This is the 1st command after adapter initialize which will
1411 * use interrupts and regular mailbox interface.
1412 */
1413 qlt->mbox_io_state = MBOX_STATE_READY;
1414 REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
1415 /* Issue mailbox to firmware */
1416 ret = qlt_mailbox_command(qlt, mcp);
1417 if (ret != QLT_SUCCESS) {
1418 EL(qlt, "qlt_mailbox_command=60h status=%llxh\n", ret);
1419 cmn_err(CE_NOTE, "qlt(%d) init fw failed %llx, intr status %x",
1420 instance, (long long)ret, REG_RD32(qlt, REG_INTR_STATUS));
1421 }
1422
1423 mcp->to_fw_mask = BIT_0;
1424 mcp->from_fw_mask = BIT_0 | BIT_1;
1425 mcp->to_fw[0] = 0x28;
1426 ret = qlt_mailbox_command(qlt, mcp);
1427 if (ret != QLT_SUCCESS) {
1428 EL(qlt, "qlt_mailbox_command=28h status=%llxh\n", ret);
1429 cmn_err(CE_NOTE, "qlt(%d) get_fw_options %llx", instance,
1430 (long long)ret);
1431 }
1432
1433 /*
1434 * Report FW versions for 81xx - MPI rev is useful
1435 */
1436 if (qlt->qlt_81xx_chip) {
1437 mcp->to_fw_mask = BIT_0;
1438 mcp->from_fw_mask = BIT_11 | BIT_10 | BIT_3 | BIT_2 | BIT_1 |
1439 BIT_0;
1440 mcp->to_fw[0] = 0x8;
1441 ret = qlt_mailbox_command(qlt, mcp);
1442 if (ret != QLT_SUCCESS) {
1443 EL(qlt, "about fw failed: %llx\n", (long long)ret);
1444 } else {
1445 EL(qlt, "Firmware version %d.%d.%d, MPI: %d.%d.%d\n",
1446 mcp->from_fw[1], mcp->from_fw[2], mcp->from_fw[3],
1447 mcp->from_fw[10] & 0xff, mcp->from_fw[11] >> 8,
1448 mcp->from_fw[11] & 0xff);
1449 }
1450 }
1451
1452 qlt_free_mailbox_command(qlt, mcp);
1453
1454 for (i = 0; i < 5; i++) {
1455 qlt->qlt_bufref[i] = 0;
1456 }
1457 qlt->qlt_bumpbucket = 0;
1458 qlt->qlt_pmintry = 0;
1459 qlt->qlt_pmin_ok = 0;
1460
1461 if (ret != QLT_SUCCESS)
1462 return (ret);
1463 return (FCT_SUCCESS);
1464 }
1465
1466 fct_status_t
1467 qlt_port_offline(qlt_state_t *qlt)
1468 {
1469 int retries;
1470
1471 mutex_enter(&qlt->mbox_lock);
1472
1473 if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
1474 mutex_exit(&qlt->mbox_lock);
1475 goto poff_mbox_done;
1476 }
1477
1478 /* Wait to grab the mailboxes */
1479 for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
1480 retries++) {
1481 cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
1482 if ((retries > 5) ||
1483 (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
1484 qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1485 mutex_exit(&qlt->mbox_lock);
1486 goto poff_mbox_done;
1487 }
1488 }
1489 qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1490 mutex_exit(&qlt->mbox_lock);
1491 poff_mbox_done:;
1492 qlt->intr_sneak_counter = 10;
1493 mutex_enter(&qlt->intr_lock);
1494 (void) qlt_reset_chip(qlt);
1495 drv_usecwait(20);
1496 qlt->intr_sneak_counter = 0;
1497 mutex_exit(&qlt->intr_lock);
1498
1499 return (FCT_SUCCESS);
1500 }
1501
1502 static fct_status_t
1503 qlt_get_link_info(fct_local_port_t *port, fct_link_info_t *li)
1504 {
1505 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
1506 mbox_cmd_t *mcp;
1507 fct_status_t fc_ret;
1508 fct_status_t ret;
1509 clock_t et;
1510
1511 et = ddi_get_lbolt() + drv_usectohz(5000000);
1512 mcp = qlt_alloc_mailbox_command(qlt, 0);
1513 link_info_retry:
1514 mcp->to_fw[0] = MBC_GET_ID;
1515 mcp->to_fw[9] = 0;
1516 mcp->to_fw_mask |= BIT_0 | BIT_9;
1517 mcp->from_fw_mask |= BIT_0 | BIT_1 | BIT_2 | BIT_3 | BIT_6 | BIT_7;
1518 /* Issue mailbox to firmware */
1519 ret = qlt_mailbox_command(qlt, mcp);
1520 if (ret != QLT_SUCCESS) {
1521 EL(qlt, "qlt_mailbox_command=20h status=%llxh\n", ret);
1522 if ((mcp->from_fw[0] == 0x4005) && (mcp->from_fw[1] == 7)) {
1523 /* Firmware is not ready */
1524 if (ddi_get_lbolt() < et) {
1525 delay(drv_usectohz(50000));
1526 goto link_info_retry;
1527 }
1528 }
1529 stmf_trace(qlt->qlt_port_alias, "GET ID mbox failed, ret=%llx "
1530 "mb0=%x mb1=%x", ret, mcp->from_fw[0], mcp->from_fw[1]);
1531 fc_ret = FCT_FAILURE;
1532 } else {
1533 li->portid = ((uint32_t)(mcp->from_fw[2])) |
1534 (((uint32_t)(mcp->from_fw[3])) << 16);
1535
1536 li->port_speed = qlt->link_speed;
1537 switch (mcp->from_fw[6]) {
1538 case 1:
1539 li->port_topology = PORT_TOPOLOGY_PUBLIC_LOOP;
1540 li->port_fca_flogi_done = 1;
1541 break;
1542 case 0:
1543 li->port_topology = PORT_TOPOLOGY_PRIVATE_LOOP;
1544 li->port_no_fct_flogi = 1;
1545 break;
1546 case 3:
1547 li->port_topology = PORT_TOPOLOGY_FABRIC_PT_TO_PT;
1548 li->port_fca_flogi_done = 1;
1549 break;
1550 case 2: /*FALLTHROUGH*/
1551 case 4:
1552 li->port_topology = PORT_TOPOLOGY_PT_TO_PT;
1553 li->port_fca_flogi_done = 1;
1554 break;
1555 default:
1556 li->port_topology = PORT_TOPOLOGY_UNKNOWN;
1557 EL(qlt, "Unknown topology=%xh\n", mcp->from_fw[6]);
1558 }
1559 qlt->cur_topology = li->port_topology;
1560 fc_ret = FCT_SUCCESS;
1561 }
1562 qlt_free_mailbox_command(qlt, mcp);
1563
1564 if ((fc_ret == FCT_SUCCESS) && (li->port_fca_flogi_done)) {
1565 mcp = qlt_alloc_mailbox_command(qlt, 64);
1566 mcp->to_fw[0] = MBC_GET_PORT_DATABASE;
1567 mcp->to_fw[1] = 0x7FE;
1568 mcp->to_fw[9] = 0;
1569 mcp->to_fw[10] = 0;
1570 mcp->to_fw_mask |= BIT_0 | BIT_1 | BIT_9 | BIT_10;
1571 fc_ret = qlt_mailbox_command(qlt, mcp);
1572 if (fc_ret != QLT_SUCCESS) {
1573 EL(qlt, "qlt_mailbox_command=64h status=%llxh\n",
1574 fc_ret);
1575 stmf_trace(qlt->qlt_port_alias, "Attempt to get port "
1576 "database for F_port failed, ret = %llx", fc_ret);
1577 } else {
1578 uint8_t *p;
1579
1580 qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1581 p = mcp->dbuf->db_sglist[0].seg_addr;
1582 bcopy(p + 0x18, li->port_rpwwn, 8);
1583 bcopy(p + 0x20, li->port_rnwwn, 8);
1584 }
1585 qlt_free_mailbox_command(qlt, mcp);
1586 }
1587 return (fc_ret);
1588 }
1589
1590 static int
1591 qlt_open(dev_t *devp, int flag, int otype, cred_t *credp)
1592 {
1593 int instance;
1594 qlt_state_t *qlt;
1595
1596 if (otype != OTYP_CHR) {
1597 return (EINVAL);
1598 }
1599
1600 /*
1601 * Since this is for debugging only, only allow root to issue ioctl now
1602 */
1603 if (drv_priv(credp)) {
1678 * uploaded firmware is not supported and is provided here for test
1679 * purposes only.
1680 */
1681 /* ARGSUSED */
1682 static int
1683 qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
1684 cred_t *credp, int *rval)
1685 {
1686 qlt_state_t *qlt;
1687 int ret = 0;
1688 #ifdef _LITTLE_ENDIAN
1689 int i;
1690 #endif
1691 stmf_iocdata_t *iocd;
1692 void *ibuf = NULL;
1693 void *obuf = NULL;
1694 uint32_t *intp;
1695 qlt_fw_info_t *fwi;
1696 mbox_cmd_t *mcp;
1697 fct_status_t st;
1698 char info[QLT_INFO_LEN];
1699 fct_status_t ret2;
1700
1701 if (drv_priv(credp) != 0)
1702 return (EPERM);
1703
1704 qlt = ddi_get_soft_state(qlt_state, (int32_t)getminor(dev));
1705 ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
1706 if (ret)
1707 return (ret);
1708 iocd->stmf_error = 0;
1709
1710 switch (cmd) {
1711 case QLT_IOCTL_FETCH_FWDUMP:
1712 if (iocd->stmf_obuf_size < QLT_FWDUMP_BUFSIZE) {
1713 EL(qlt, "FETCH_FWDUMP obuf_size=%d < %d\n",
1714 iocd->stmf_obuf_size, QLT_FWDUMP_BUFSIZE);
1715 ret = EINVAL;
1716 break;
1717 }
1718 mutex_enter(&qlt->qlt_ioctl_lock);
1732 }
1733 if (qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER) {
1734 mutex_exit(&qlt->qlt_ioctl_lock);
1735 ret = EEXIST;
1736 EL(qlt, "fwdump already fetched\n");
1737 iocd->stmf_error = QLTIO_ALREADY_FETCHED;
1738 break;
1739 }
1740 bcopy(qlt->qlt_fwdump_buf, obuf, QLT_FWDUMP_BUFSIZE);
1741 qlt->qlt_ioctl_flags |= QLT_FWDUMP_FETCHED_BY_USER;
1742 mutex_exit(&qlt->qlt_ioctl_lock);
1743
1744 break;
1745
1746 case QLT_IOCTL_TRIGGER_FWDUMP:
1747 if (qlt->qlt_state != FCT_STATE_ONLINE) {
1748 ret = EACCES;
1749 iocd->stmf_error = QLTIO_NOT_ONLINE;
1750 break;
1751 }
1752 (void) snprintf(info, sizeof (info), "qlt_ioctl: qlt-%p, "
1753 "user triggered FWDUMP with RFLAG_RESET", (void *)qlt);
1754 if ((ret2 = fct_port_shutdown(qlt->qlt_port,
1755 STMF_RFLAG_USER_REQUEST | STMF_RFLAG_RESET |
1756 STMF_RFLAG_COLLECT_DEBUG_DUMP, info)) != FCT_SUCCESS) {
1757 EL(qlt, "TRIGGER_FWDUMP fct_port_shutdown status="
1758 "%llxh\n", ret2);
1759 ret = EIO;
1760 }
1761 break;
1762 case QLT_IOCTL_UPLOAD_FW:
1763 if ((iocd->stmf_ibuf_size < 1024) ||
1764 (iocd->stmf_ibuf_size & 3)) {
1765 EL(qlt, "UPLOAD_FW ibuf_size=%d < 1024\n",
1766 iocd->stmf_ibuf_size);
1767 ret = EINVAL;
1768 iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1769 break;
1770 }
1771 intp = (uint32_t *)ibuf;
1772 #ifdef _LITTLE_ENDIAN
1773 for (i = 0; (i << 2) < iocd->stmf_ibuf_size; i++) {
1774 intp[i] = BSWAP_32(intp[i]);
1775 }
1776 #endif
1777 if (((intp[3] << 2) >= iocd->stmf_ibuf_size) ||
1778 (((intp[intp[3] + 3] + intp[3]) << 2) !=
1779 iocd->stmf_ibuf_size)) {
1780 EL(qlt, "UPLOAD_FW fw_size=%d >= %d\n", intp[3] << 2,
1781 iocd->stmf_ibuf_size);
1782 ret = EINVAL;
1783 iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1784 break;
1785 }
1786 if ((qlt->qlt_81xx_chip && ((intp[8] & 8) == 0)) ||
1787 (qlt->qlt_25xx_chip && ((intp[8] & 4) == 0)) ||
1788 (!qlt->qlt_25xx_chip && !qlt->qlt_81xx_chip &&
1789 ((intp[8] & 3) == 0))) {
1790 EL(qlt, "UPLOAD_FW fw_type=%d\n", intp[8]);
1791 ret = EACCES;
1792 iocd->stmf_error = QLTIO_INVALID_FW_TYPE;
1793 break;
1794 }
1795
1796 /* Everything looks ok, lets copy this firmware */
1797 if (qlt->fw_code01) {
1798 kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1799 qlt->fw_length02) << 2);
1800 qlt->fw_code01 = NULL;
1801 } else {
1802 atomic_inc_32(&qlt_loaded_counter);
1803 }
1804 qlt->fw_length01 = intp[3];
1805 qlt->fw_code01 = (uint32_t *)kmem_alloc(iocd->stmf_ibuf_size,
1806 KM_SLEEP);
1807 bcopy(intp, qlt->fw_code01, iocd->stmf_ibuf_size);
1808 qlt->fw_addr01 = intp[2];
1828 break;
1829 }
1830 fwi = (qlt_fw_info_t *)obuf;
1831 if (qlt->qlt_stay_offline) {
1832 fwi->fwi_stay_offline = 1;
1833 }
1834 if (qlt->qlt_state == FCT_STATE_ONLINE) {
1835 fwi->fwi_port_active = 1;
1836 }
1837 fwi->fwi_active_major = qlt->fw_major;
1838 fwi->fwi_active_minor = qlt->fw_minor;
1839 fwi->fwi_active_subminor = qlt->fw_subminor;
1840 fwi->fwi_active_attr = qlt->fw_attr;
1841 if (qlt->fw_code01) {
1842 fwi->fwi_fw_uploaded = 1;
1843 fwi->fwi_loaded_major = (uint16_t)qlt->fw_code01[4];
1844 fwi->fwi_loaded_minor = (uint16_t)qlt->fw_code01[5];
1845 fwi->fwi_loaded_subminor = (uint16_t)qlt->fw_code01[6];
1846 fwi->fwi_loaded_attr = (uint16_t)qlt->fw_code01[7];
1847 }
1848 if (qlt->qlt_81xx_chip) {
1849 fwi->fwi_default_major = (uint16_t)fw8100_code01[4];
1850 fwi->fwi_default_minor = (uint16_t)fw8100_code01[5];
1851 fwi->fwi_default_subminor = (uint16_t)fw8100_code01[6];
1852 fwi->fwi_default_attr = (uint16_t)fw8100_code01[7];
1853 } else if (qlt->qlt_25xx_chip) {
1854 fwi->fwi_default_major = (uint16_t)fw2500_code01[4];
1855 fwi->fwi_default_minor = (uint16_t)fw2500_code01[5];
1856 fwi->fwi_default_subminor = (uint16_t)fw2500_code01[6];
1857 fwi->fwi_default_attr = (uint16_t)fw2500_code01[7];
1858 } else {
1859 fwi->fwi_default_major = (uint16_t)fw2400_code01[4];
1860 fwi->fwi_default_minor = (uint16_t)fw2400_code01[5];
1861 fwi->fwi_default_subminor = (uint16_t)fw2400_code01[6];
1862 fwi->fwi_default_attr = (uint16_t)fw2400_code01[7];
1863 }
1864 break;
1865
1866 case QLT_IOCTL_STAY_OFFLINE:
1867 if (!iocd->stmf_ibuf_size) {
1868 EL(qlt, "STAY_OFFLINE ibuf_size=%d\n",
1903 EL(qlt, "IOCTL_MBOX status=%xh\n", st);
1904 ret = EIO;
1905 switch (st) {
1906 case QLT_MBOX_NOT_INITIALIZED:
1907 iocd->stmf_error = QLTIO_MBOX_NOT_INITIALIZED;
1908 break;
1909 case QLT_MBOX_BUSY:
1910 iocd->stmf_error = QLTIO_CANT_GET_MBOXES;
1911 break;
1912 case QLT_MBOX_TIMEOUT:
1913 iocd->stmf_error = QLTIO_MBOX_TIMED_OUT;
1914 break;
1915 case QLT_MBOX_ABORTED:
1916 iocd->stmf_error = QLTIO_MBOX_ABORTED;
1917 break;
1918 }
1919 }
1920 break;
1921
1922 case QLT_IOCTL_ELOG:
1923 qlt_dump_el_trace_buffer(qlt);
1924 break;
1925
1926 default:
1927 EL(qlt, "Unknown ioctl-%xh\n", cmd);
1928 ret = ENOTTY;
1929 }
1930
1931 if (ret == 0) {
1932 ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
1933 } else if (iocd->stmf_error) {
1934 (void) stmf_copyout_iocdata(data, mode, iocd, obuf);
1935 }
1936 if (obuf) {
1937 kmem_free(obuf, iocd->stmf_obuf_size);
1938 obuf = NULL;
1939 }
1940 if (ibuf) {
1941 kmem_free(ibuf, iocd->stmf_ibuf_size);
1942 ibuf = NULL;
1943 }
1944 kmem_free(iocd, sizeof (stmf_iocdata_t));
1945 return (ret);
1946 }
1947
1948 static fct_status_t
1949 qlt_force_lip(qlt_state_t *qlt)
1950 {
1951 mbox_cmd_t *mcp;
1952 fct_status_t rval;
1953
1954 mcp = qlt_alloc_mailbox_command(qlt, 0);
1955 mcp->to_fw[0] = 0x0072;
1956 mcp->to_fw[1] = BIT_4;
1957 mcp->to_fw[3] = 1;
1958 mcp->to_fw_mask |= BIT_1 | BIT_3;
1959 rval = qlt_mailbox_command(qlt, mcp);
1960 if (rval != FCT_SUCCESS) {
1961 EL(qlt, "qlt force lip MB failed: rval=%x", rval);
1962 } else {
1963 if (mcp->from_fw[0] != 0x4000) {
1964 QLT_LOG(qlt->qlt_port_alias, "qlt FLIP: fw[0]=%x",
1965 mcp->from_fw[0]);
1966 rval = FCT_FAILURE;
1967 }
1968 }
1969 qlt_free_mailbox_command(qlt, mcp);
1970 return (rval);
1971 }
1972
1973 static void
1974 qlt_ctl(struct fct_local_port *port, int cmd, void *arg)
1975 {
1976 stmf_change_status_t st;
1977 stmf_state_change_info_t *ssci = (stmf_state_change_info_t *)arg;
1978 qlt_state_t *qlt;
1979 fct_status_t ret;
1980
1981 ASSERT((cmd == FCT_CMD_PORT_ONLINE) ||
1982 (cmd == FCT_CMD_PORT_OFFLINE) ||
1983 (cmd == FCT_CMD_FORCE_LIP) ||
1984 (cmd == FCT_ACK_PORT_ONLINE_COMPLETE) ||
1985 (cmd == FCT_ACK_PORT_OFFLINE_COMPLETE));
1986
1987 qlt = (qlt_state_t *)port->port_fca_private;
1988 st.st_completion_status = FCT_SUCCESS;
1989 st.st_additional_info = NULL;
1990
1991 switch (cmd) {
1992 case FCT_CMD_PORT_ONLINE:
1993 if (qlt->qlt_state == FCT_STATE_ONLINE)
1994 st.st_completion_status = STMF_ALREADY;
1995 else if (qlt->qlt_state != FCT_STATE_OFFLINE)
1996 st.st_completion_status = FCT_FAILURE;
1997 if (st.st_completion_status == FCT_SUCCESS) {
1998 qlt->qlt_state = FCT_STATE_ONLINING;
1999 qlt->qlt_state_not_acked = 1;
2000 st.st_completion_status = qlt_port_online(qlt);
2001 if (st.st_completion_status != STMF_SUCCESS) {
2002 EL(qlt, "PORT_ONLINE status=%xh\n",
2003 st.st_completion_status);
2004 qlt->qlt_state = FCT_STATE_OFFLINE;
2005 qlt->qlt_state_not_acked = 0;
2006 } else {
2007 qlt->qlt_state = FCT_STATE_ONLINE;
2008 }
2009 }
2010 fct_ctl(port->port_lport, FCT_CMD_PORT_ONLINE_COMPLETE, &st);
2044
2045 case FCT_ACK_PORT_OFFLINE_COMPLETE:
2046 qlt->qlt_state_not_acked = 0;
2047 if ((qlt->qlt_change_state_flags & STMF_RFLAG_RESET) &&
2048 (qlt->qlt_stay_offline == 0)) {
2049 if ((ret = fct_port_initialize(port,
2050 qlt->qlt_change_state_flags,
2051 "qlt_ctl FCT_ACK_PORT_OFFLINE_COMPLETE "
2052 "with RLFLAG_RESET")) != FCT_SUCCESS) {
2053 EL(qlt, "fct_port_initialize status=%llxh\n",
2054 ret);
2055 cmn_err(CE_WARN, "qlt_ctl: "
2056 "fct_port_initialize failed, please use "
2057 "stmfstate to start the port-%s manualy",
2058 qlt->qlt_port_alias);
2059 }
2060 }
2061 break;
2062
2063 case FCT_CMD_FORCE_LIP:
2064 if (qlt->qlt_81xx_chip) {
2065 EL(qlt, "force lip is an unsupported command "
2066 "for this adapter type\n");
2067 } else {
2068 *((fct_status_t *)arg) = qlt_force_lip(qlt);
2069 EL(qlt, "forcelip done\n");
2070 }
2071 break;
2072
2073 default:
2074 EL(qlt, "unsupport cmd - 0x%02X", cmd);
2075 break;
2076 }
2077 }
2078
2079 /* ARGSUSED */
2080 static fct_status_t
2081 qlt_do_flogi(fct_local_port_t *port, fct_flogi_xchg_t *fx)
2082 {
2083 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
2084
2085 EL(qlt, "FLOGI requested not supported\n");
2086 cmn_err(CE_WARN, "qlt: FLOGI requested (not supported)");
2087 return (FCT_FAILURE);
2088 }
2089
2090 /*
2091 * Return a pointer to n entries in the request queue. Assumes that
2092 * request queue lock is held. Does a very short busy wait if
2093 * less/zero entries are available. Retuns NULL if it still cannot
2094 * fullfill the request.
2095 * **CALL qlt_submit_req_entries() BEFORE DROPPING THE LOCK**
2096 */
2097 caddr_t
2098 qlt_get_req_entries(qlt_state_t *qlt, uint32_t n)
2099 {
2100 int try = 0;
2101
2102 while (qlt->req_available < n) {
2103 uint32_t val1, val2, val3;
2104 val1 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2105 val2 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2106 val3 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2107 if ((val1 != val2) || (val2 != val3))
2108 continue;
2109
2110 qlt->req_ndx_from_fw = val1;
2111 qlt->req_available = REQUEST_QUEUE_ENTRIES - 1 -
2112 ((qlt->req_ndx_to_fw - qlt->req_ndx_from_fw) &
2113 (REQUEST_QUEUE_ENTRIES - 1));
2114 if (qlt->req_available < n) {
2115 if (try < 2) {
2116 drv_usecwait(100);
2117 try++;
2118 continue;
2119 } else {
2120 stmf_trace(qlt->qlt_port_alias,
2121 "Req Q is full");
2122 return (NULL);
2123 }
2124 }
2125 break;
2126 }
2127 /* We dont change anything until the entries are sumitted */
2128 return (&qlt->req_ptr[qlt->req_ndx_to_fw << 6]);
2129 }
2130
2131 /*
2132 * updates the req in ptr to fw. Assumes that req lock is held.
2133 */
2134 void
2135 qlt_submit_req_entries(qlt_state_t *qlt, uint32_t n)
2136 {
2137 ASSERT(n >= 1);
2138 qlt->req_ndx_to_fw += n;
2139 qlt->req_ndx_to_fw &= REQUEST_QUEUE_ENTRIES - 1;
2140 qlt->req_available -= n;
2141 REG_WR32(qlt, REG_REQ_IN_PTR, qlt->req_ndx_to_fw);
2142 }
2143
2144
2145 /*
2146 * Return a pointer to n entries in the priority request queue. Assumes that
2147 * priority request queue lock is held. Does a very short busy wait if
2148 * less/zero entries are available. Retuns NULL if it still cannot
2149 * fullfill the request.
2150 * **CALL qlt_submit_preq_entries() BEFORE DROPPING THE LOCK**
2151 */
2152 caddr_t
2153 qlt_get_preq_entries(qlt_state_t *qlt, uint32_t n)
2154 {
2155 int try = 0;
2156 uint32_t req_available = PRIORITY_QUEUE_ENTRIES - 1 -
2157 ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
2158 (PRIORITY_QUEUE_ENTRIES - 1));
2159
2160 while (req_available < n) {
2161 uint32_t val1, val2, val3;
2162 val1 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2163 val2 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2164 val3 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2201 * - A very hardware specific function. Does not touch driver state.
2202 * - Assumes that interrupts are disabled or not there.
2203 * - Expects that the caller makes sure that all activity has stopped
2204 * and its ok now to go ahead and reset the chip. Also the caller
2205 * takes care of post reset damage control.
2206 * - called by initialize adapter() and dump_fw(for reset only).
2207 * - During attach() nothing much is happening and during initialize_adapter()
2208 * the function (caller) does all the housekeeping so that this function
2209 * can execute in peace.
2210 * - Returns 0 on success.
2211 */
2212 static fct_status_t
2213 qlt_reset_chip(qlt_state_t *qlt)
2214 {
2215 int cntr;
2216
2217 EL(qlt, "initiated\n");
2218
2219 /* XXX: Switch off LEDs */
2220
2221 /* Disable Interrupts */
2222 REG_WR32(qlt, REG_INTR_CTRL, 0);
2223 (void) REG_RD32(qlt, REG_INTR_CTRL);
2224 /* Stop DMA */
2225 REG_WR32(qlt, REG_CTRL_STATUS, DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL);
2226
2227 /* Wait for DMA to be stopped */
2228 cntr = 0;
2229 while (REG_RD32(qlt, REG_CTRL_STATUS) & DMA_ACTIVE_STATUS) {
2230 delay(drv_usectohz(10000)); /* mostly 10ms is 1 tick */
2231 cntr++;
2232 /* 3 sec should be more than enough */
2233 if (cntr == 300)
2234 return (QLT_DMA_STUCK);
2235 }
2236
2237 /* Reset the Chip */
2238 REG_WR32(qlt, REG_CTRL_STATUS,
2239 DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL | CHIP_SOFT_RESET);
2240
2241 qlt->qlt_link_up = 0;
2242
2243 drv_usecwait(100);
2244
2245 /* Wait for ROM firmware to initialize (0x0000) in mailbox 0 */
2246 cntr = 0;
2247 while (REG_RD16(qlt, REG_MBOX(0)) != 0) {
2248 delay(drv_usectohz(10000));
2249 cntr++;
2250 /* 3 sec should be more than enough */
2251 if (cntr == 300)
2252 return (QLT_ROM_STUCK);
2253 }
2254 /* Disable Interrupts (Probably not needed) */
2255 REG_WR32(qlt, REG_INTR_CTRL, 0);
2256
2257 return (QLT_SUCCESS);
2258 }
2259 /*
2260 * - Should not be called from Interrupt.
2261 * - A very hardware specific function. Does not touch driver state.
2262 * - Assumes that interrupts are disabled or not there.
2263 * - Expects that the caller makes sure that all activity has stopped
2264 * and its ok now to go ahead and reset the chip. Also the caller
2265 * takes care of post reset damage control.
2266 * - called by initialize adapter() and dump_fw(for reset only).
2267 * - During attach() nothing much is happening and during initialize_adapter()
2268 * the function (caller) does all the housekeeping so that this function
2269 * can execute in peace.
2270 * - Returns 0 on success.
2271 */
2272 static fct_status_t
2273 qlt_download_fw(qlt_state_t *qlt)
2274 {
2275 uint32_t start_addr;
2276 fct_status_t ret;
2277
2278 EL(qlt, "initiated\n");
2279
2280 (void) qlt_reset_chip(qlt);
2281
2282 if (qlt->qlt_81xx_chip) {
2283 qlt_mps_reset(qlt);
2284 }
2285
2286 /* Load the two segments */
2287 if (qlt->fw_code01 != NULL) {
2288 ret = qlt_load_risc_ram(qlt, qlt->fw_code01, qlt->fw_length01,
2289 qlt->fw_addr01);
2290 if (ret == QLT_SUCCESS) {
2291 ret = qlt_load_risc_ram(qlt, qlt->fw_code02,
2292 qlt->fw_length02, qlt->fw_addr02);
2293 }
2294 start_addr = qlt->fw_addr01;
2295 } else if (qlt->qlt_81xx_chip) {
2296 ret = qlt_load_risc_ram(qlt, fw8100_code01, fw8100_length01,
2297 fw8100_addr01);
2298 if (ret == QLT_SUCCESS) {
2299 ret = qlt_load_risc_ram(qlt, fw8100_code02,
2300 fw8100_length02, fw8100_addr02);
2301 }
2302 start_addr = fw8100_addr01;
2303 } else if (qlt->qlt_25xx_chip) {
2304 ret = qlt_load_risc_ram(qlt, fw2500_code01, fw2500_length01,
2305 fw2500_addr01);
2306 if (ret == QLT_SUCCESS) {
2307 ret = qlt_load_risc_ram(qlt, fw2500_code02,
2308 fw2500_length02, fw2500_addr02);
2309 }
2310 start_addr = fw2500_addr01;
2311 } else {
2312 ret = qlt_load_risc_ram(qlt, fw2400_code01, fw2400_length01,
2313 fw2400_addr01);
2314 if (ret == QLT_SUCCESS) {
2321 EL(qlt, "qlt_load_risc_ram status=%llxh\n", ret);
2322 return (ret);
2323 }
2324
2325 /* Verify Checksum */
2326 REG_WR16(qlt, REG_MBOX(0), MBC_VERIFY_CHECKSUM);
2327 REG_WR16(qlt, REG_MBOX(1), MSW(start_addr));
2328 REG_WR16(qlt, REG_MBOX(2), LSW(start_addr));
2329 ret = qlt_raw_mailbox_command(qlt);
2330 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2331 if (ret != QLT_SUCCESS) {
2332 EL(qlt, "qlt_raw_mailbox_command=7h status=%llxh\n", ret);
2333 return (ret);
2334 }
2335
2336 /* Execute firmware */
2337 REG_WR16(qlt, REG_MBOX(0), MBC_EXECUTE_FIRMWARE);
2338 REG_WR16(qlt, REG_MBOX(1), MSW(start_addr));
2339 REG_WR16(qlt, REG_MBOX(2), LSW(start_addr));
2340 REG_WR16(qlt, REG_MBOX(3), 0);
2341 REG_WR16(qlt, REG_MBOX(4), 1); /* 25xx enable additional credits */
2342 ret = qlt_raw_mailbox_command(qlt);
2343 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2344 if (ret != QLT_SUCCESS) {
2345 EL(qlt, "qlt_raw_mailbox_command=2h status=%llxh\n", ret);
2346 return (ret);
2347 }
2348
2349 /* Get revisions (About Firmware) */
2350 REG_WR16(qlt, REG_MBOX(0), MBC_ABOUT_FIRMWARE);
2351 ret = qlt_raw_mailbox_command(qlt);
2352 qlt->fw_major = REG_RD16(qlt, REG_MBOX(1));
2353 qlt->fw_minor = REG_RD16(qlt, REG_MBOX(2));
2354 qlt->fw_subminor = REG_RD16(qlt, REG_MBOX(3));
2355 qlt->fw_endaddrlo = REG_RD16(qlt, REG_MBOX(4));
2356 qlt->fw_endaddrhi = REG_RD16(qlt, REG_MBOX(5));
2357 qlt->fw_attr = REG_RD16(qlt, REG_MBOX(6));
2358 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2359 if (ret != QLT_SUCCESS) {
2360 EL(qlt, "qlt_raw_mailbox_command=8h status=%llxh\n", ret);
2361 return (ret);
2362 }
2363
2364 return (QLT_SUCCESS);
2365 }
2366
2367 /*
2368 * Used only from qlt_download_fw().
2369 */
2370 static fct_status_t
2371 qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
2372 uint32_t word_count, uint32_t risc_addr)
2373 {
2374 uint32_t words_sent = 0;
2375 uint32_t words_being_sent;
2376 uint32_t *cur_host_addr;
2377 uint32_t cur_risc_addr;
2378 uint64_t da;
2379 fct_status_t ret;
2380
2381 while (words_sent < word_count) {
2382 cur_host_addr = &(host_addr[words_sent]);
2383 cur_risc_addr = risc_addr + (words_sent << 2);
2390 words_being_sent << 2, DDI_DMA_SYNC_FORDEV);
2391 da = qlt->queue_mem_cookie.dmac_laddress;
2392 REG_WR16(qlt, REG_MBOX(0), MBC_LOAD_RAM_EXTENDED);
2393 REG_WR16(qlt, REG_MBOX(1), LSW(risc_addr));
2394 REG_WR16(qlt, REG_MBOX(8), MSW(cur_risc_addr));
2395 REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
2396 REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
2397 REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
2398 REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
2399 REG_WR16(qlt, REG_MBOX(5), LSW(words_being_sent));
2400 REG_WR16(qlt, REG_MBOX(4), MSW(words_being_sent));
2401 ret = qlt_raw_mailbox_command(qlt);
2402 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2403 if (ret != QLT_SUCCESS) {
2404 EL(qlt, "qlt_raw_mailbox_command=0Bh status=%llxh\n",
2405 ret);
2406 return (ret);
2407 }
2408 words_sent += words_being_sent;
2409 }
2410 return (QLT_SUCCESS);
2411 }
2412
2413 /*
2414 * Not used during normal operation. Only during driver init.
2415 * Assumes that interrupts are disabled and mailboxes are loaded.
2416 * Just triggers the mailbox command an waits for the completion.
2417 * Also expects that There is nothing else going on and we will only
2418 * get back a mailbox completion from firmware.
2419 * ---DOES NOT CLEAR INTERRUPT---
2420 * Used only from the code path originating from
2421 * qlt_reset_chip_and_download_fw()
2422 */
2423 static fct_status_t
2424 qlt_raw_mailbox_command(qlt_state_t *qlt)
2425 {
2426 int cntr = 0;
2427 uint32_t status;
2428
2429 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
2430 while ((REG_RD32(qlt, REG_INTR_STATUS) & RISC_PCI_INTR_REQUEST) == 0) {
2431 cntr++;
2432 if (cntr == 100) {
2433 return (QLT_MAILBOX_STUCK);
2434 }
2435 delay(drv_usectohz(10000));
2436 }
2437 status = (REG_RD32(qlt, REG_RISC_STATUS) & FW_INTR_STATUS_MASK);
2438
2439 if ((status == ROM_MBX_CMD_SUCCESSFUL) ||
2440 (status == ROM_MBX_CMD_NOT_SUCCESSFUL) ||
2441 (status == MBX_CMD_SUCCESSFUL) ||
2442 (status == MBX_CMD_NOT_SUCCESSFUL)) {
2443 uint16_t mbox0 = REG_RD16(qlt, REG_MBOX(0));
2444 if (mbox0 == QLT_MBX_CMD_SUCCESS) {
2445 return (QLT_SUCCESS);
2446 } else {
2447 return (QLT_MBOX_FAILED | mbox0);
2448 }
2449 }
2450 /* This is unexpected, dump a message */
2451 cmn_err(CE_WARN, "qlt(%d): Unexpect intr status %llx",
2452 ddi_get_instance(qlt->dip), (unsigned long long)status);
2453 return (QLT_UNEXPECTED_RESPONSE);
2454 }
2455
2456 static mbox_cmd_t *
2457 qlt_alloc_mailbox_command(qlt_state_t *qlt, uint32_t dma_size)
2458 {
2459 mbox_cmd_t *mcp;
2460
2461 mcp = (mbox_cmd_t *)kmem_zalloc(sizeof (mbox_cmd_t), KM_SLEEP);
2462 if (dma_size) {
2463 qlt_dmem_bctl_t *bctl;
2464 uint64_t da;
2465
2466 mcp->dbuf = qlt_i_dmem_alloc(qlt, dma_size, &dma_size, 0);
2467 if (mcp->dbuf == NULL) {
2468 kmem_free(mcp, sizeof (*mcp));
2469 return (NULL);
2484 mcp->from_fw_mask |= BIT_0;
2485 return (mcp);
2486 }
2487
2488 void
2489 qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2490 {
2491 if (mcp->dbuf)
2492 qlt_i_dmem_free(qlt, mcp->dbuf);
2493 kmem_free(mcp, sizeof (*mcp));
2494 }
2495
2496 /*
2497 * This can sleep. Should never be called from interrupt context.
2498 */
2499 static fct_status_t
2500 qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2501 {
2502 int retries;
2503 int i;
2504 char info[QLT_INFO_LEN];
2505
2506 if (curthread->t_flag & T_INTR_THREAD) {
2507 ASSERT(0);
2508 return (QLT_MBOX_FAILED);
2509 }
2510
2511 mutex_enter(&qlt->mbox_lock);
2512 /* See if mailboxes are still uninitialized */
2513 if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
2514 mutex_exit(&qlt->mbox_lock);
2515 return (QLT_MBOX_NOT_INITIALIZED);
2516 }
2517
2518 /* Wait to grab the mailboxes */
2519 for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
2520 retries++) {
2521 cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
2522 if ((retries > 5) ||
2523 (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
2524 mutex_exit(&qlt->mbox_lock);
2525 return (QLT_MBOX_BUSY);
2526 }
2527 }
2528 /* Make sure we always ask for mailbox 0 */
2529 mcp->from_fw_mask |= BIT_0;
2530
2531 /* Load mailboxes, set state and generate RISC interrupt */
2532 qlt->mbox_io_state = MBOX_STATE_CMD_RUNNING;
2533 qlt->mcp = mcp;
2534 for (i = 0; i < MAX_MBOXES; i++) {
2535 if (mcp->to_fw_mask & ((uint32_t)1 << i))
2536 REG_WR16(qlt, REG_MBOX(i), mcp->to_fw[i]);
2537 }
2538 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
2539
2540 qlt_mbox_wait_loop:;
2541 /* Wait for mailbox command completion */
2542 if (cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock, ddi_get_lbolt()
2543 + drv_usectohz(MBOX_TIMEOUT)) < 0) {
2544 (void) snprintf(info, sizeof (info),
2545 "qlt_mailbox_command: qlt-%p, "
2546 "cmd-0x%02X timed out", (void *)qlt, qlt->mcp->to_fw[0]);
2547 qlt->mcp = NULL;
2548 qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
2549 mutex_exit(&qlt->mbox_lock);
2550
2551 /*
2552 * XXX Throw HBA fatal error event
2553 */
2554 (void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
2555 STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2556 return (QLT_MBOX_TIMEOUT);
2557 }
2558 if (qlt->mbox_io_state == MBOX_STATE_CMD_RUNNING)
2559 goto qlt_mbox_wait_loop;
2560
2561 qlt->mcp = NULL;
2562
2563 /* Make sure its a completion */
2564 if (qlt->mbox_io_state != MBOX_STATE_CMD_DONE) {
2565 ASSERT(qlt->mbox_io_state == MBOX_STATE_UNKNOWN);
2566 mutex_exit(&qlt->mbox_lock);
2567 return (QLT_MBOX_ABORTED);
2568 }
2569
2570 /* MBox command completed. Clear state, retuen based on mbox 0 */
2571 /* Mailboxes are already loaded by interrupt routine */
2572 qlt->mbox_io_state = MBOX_STATE_READY;
2573 mutex_exit(&qlt->mbox_lock);
2574 if (mcp->from_fw[0] != QLT_MBX_CMD_SUCCESS)
2575 return (QLT_MBOX_FAILED | mcp->from_fw[0]);
2576
2577 return (QLT_SUCCESS);
2578 }
2579
2580 /*
2581 * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE**
2582 */
2583 /* ARGSUSED */
2584 static uint_t
2585 qlt_isr(caddr_t arg, caddr_t arg2)
2586 {
2587 qlt_state_t *qlt = (qlt_state_t *)arg;
2588 uint32_t risc_status, intr_type;
2589 int i;
2590 int intr_loop_count;
2591 char info[QLT_INFO_LEN];
2592
2593 risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2594 if (!mutex_tryenter(&qlt->intr_lock)) {
2595 /*
2596 * Normally we will always get this lock. If tryenter is
2597 * failing then it means that driver is trying to do
2598 * some cleanup and is masking the intr but some intr
2599 * has sneaked in between. See if our device has generated
2600 * this intr. If so then wait a bit and return claimed.
2601 * If not then return claimed if this is the 1st instance
2602 * of a interrupt after driver has grabbed the lock.
2603 */
2604 if (risc_status & BIT_15) {
2605 drv_usecwait(10);
2606 return (DDI_INTR_CLAIMED);
2607 } else if (qlt->intr_sneak_counter) {
2608 qlt->intr_sneak_counter--;
2609 return (DDI_INTR_CLAIMED);
2610 } else {
2611 return (DDI_INTR_UNCLAIMED);
2612 }
2613 }
2614 if (((risc_status & BIT_15) == 0) ||
2615 (qlt->qlt_intr_enabled == 0)) {
2616 /*
2617 * This might be a pure coincedence that we are operating
2618 * in a interrupt disabled mode and another device
2619 * sharing the interrupt line has generated an interrupt
2620 * while an interrupt from our device might be pending. Just
2621 * ignore it and let the code handling the interrupt
2622 * disabled mode handle it.
2623 */
2624 mutex_exit(&qlt->intr_lock);
2625 return (DDI_INTR_UNCLAIMED);
2626 }
2627
2628 /*
2629 * XXX take care for MSI case. disable intrs
2630 * Its gonna be complicated because of the max iterations.
2631 * as hba will have posted the intr which did not go on PCI
2632 * but we did not service it either because of max iterations.
2633 * Maybe offload the intr on a different thread.
2634 */
2635 intr_loop_count = 0;
2636
2637 REG_WR32(qlt, REG_INTR_CTRL, 0);
2638
2639 intr_again:;
2640
2641 /* check for risc pause */
2642 if (risc_status & BIT_8) {
2643 EL(qlt, "Risc Pause status=%xh\n", risc_status);
2644 cmn_err(CE_WARN, "qlt(%d): Risc Pause %08x",
2645 qlt->instance, risc_status);
2646 (void) snprintf(info, sizeof (info), "Risc Pause %08x",
2647 risc_status);
2648 (void) fct_port_shutdown(qlt->qlt_port,
2649 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2650 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2651 }
2652
2653 /* First check for high performance path */
2654 intr_type = risc_status & 0xff;
2655 if (intr_type == 0x1D) {
2656 qlt->atio_ndx_from_fw = (uint16_t)
2657 REG_RD32(qlt, REG_ATIO_IN_PTR);
2658 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2659 qlt->resp_ndx_from_fw = risc_status >> 16;
2660 qlt_handle_atio_queue_update(qlt);
2661 qlt_handle_resp_queue_update(qlt);
2662 } else if (intr_type == 0x1C) {
2663 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2664 qlt->atio_ndx_from_fw = (uint16_t)(risc_status >> 16);
2665 qlt_handle_atio_queue_update(qlt);
2666 } else if (intr_type == 0x13) {
2667 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2668 qlt->resp_ndx_from_fw = risc_status >> 16;
2669 qlt_handle_resp_queue_update(qlt);
2670 } else if (intr_type == 0x12) {
2671 uint16_t code = (uint16_t)(risc_status >> 16);
2672 uint16_t mbox1 = REG_RD16(qlt, REG_MBOX(1));
2673 uint16_t mbox2 = REG_RD16(qlt, REG_MBOX(2));
2674 uint16_t mbox3 = REG_RD16(qlt, REG_MBOX(3));
2675 uint16_t mbox4 = REG_RD16(qlt, REG_MBOX(4));
2676 uint16_t mbox5 = REG_RD16(qlt, REG_MBOX(5));
2677 uint16_t mbox6 = REG_RD16(qlt, REG_MBOX(6));
2678
2679 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2680 stmf_trace(qlt->qlt_port_alias, "Async event %x mb1=%x mb2=%x,"
2681 " mb3=%x, mb5=%x, mb6=%x", code, mbox1, mbox2, mbox3,
2682 mbox5, mbox6);
2683 EL(qlt, "Async event %x mb1=%x mb2=%x, mb3=%x, mb5=%x, mb6=%x",
2684 code, mbox1, mbox2, mbox3, mbox5, mbox6);
2685
2686 if ((code == 0x8030) || (code == 0x8010) || (code == 0x8013)) {
2687 if (qlt->qlt_link_up) {
2688 fct_handle_event(qlt->qlt_port,
2689 FCT_EVENT_LINK_RESET, 0, 0);
2690 }
2691 } else if (code == 0x8012) {
2692 qlt->qlt_link_up = 0;
2693 fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_DOWN,
2694 0, 0);
2695 } else if (code == 0x8011) {
2696 switch (mbox1) {
2697 case 0: qlt->link_speed = PORT_SPEED_1G;
2698 break;
2699 case 1: qlt->link_speed = PORT_SPEED_2G;
2700 break;
2701 case 3: qlt->link_speed = PORT_SPEED_4G;
2702 break;
2703 case 4: qlt->link_speed = PORT_SPEED_8G;
2704 break;
2705 case 0x13: qlt->link_speed = PORT_SPEED_10G;
2706 break;
2707 default:
2708 qlt->link_speed = PORT_SPEED_UNKNOWN;
2709 }
2710 qlt->qlt_link_up = 1;
2711 fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_UP,
2712 0, 0);
2713 } else if ((code == 0x8002) || (code == 0x8003) ||
2714 (code == 0x8004) || (code == 0x8005)) {
2715 (void) snprintf(info, sizeof (info),
2716 "Got %04x, mb1=%x mb2=%x mb5=%x mb6=%x",
2717 code, mbox1, mbox2, mbox5, mbox6);
2718 (void) fct_port_shutdown(qlt->qlt_port,
2719 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2720 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2721 } else if (code == 0x800F) {
2722 (void) snprintf(info, sizeof (info),
2723 "Got 800F, mb1=%x mb2=%x mb3=%x",
2724 mbox1, mbox2, mbox3);
2725
2726 if (mbox1 != 1) {
2727 /* issue "verify fw" */
2728 qlt_verify_fw(qlt);
2729 }
2730 } else if (code == 0x8101) {
2731 (void) snprintf(info, sizeof (info),
2732 "IDC Req Rcvd:%04x, mb1=%x mb2=%x mb3=%x",
2733 code, mbox1, mbox2, mbox3);
2734
2735 /* check if "ACK" is required (timeout != 0) */
2736 if (mbox1 & 0x0f00) {
2737 caddr_t req;
2738
2739 /*
2740 * Ack the request (queue work to do it?)
2741 * using a mailbox iocb
2742 */
2743 mutex_enter(&qlt->req_lock);
2744 req = qlt_get_req_entries(qlt, 1);
2745 if (req) {
2746 bzero(req, IOCB_SIZE);
2747 req[0] = 0x39; req[1] = 1;
2748 QMEM_WR16(qlt, req+8, 0x101);
2749 QMEM_WR16(qlt, req+10, mbox1);
2750 QMEM_WR16(qlt, req+12, mbox2);
2751 QMEM_WR16(qlt, req+14, mbox3);
2752 QMEM_WR16(qlt, req+16, mbox4);
2753 QMEM_WR16(qlt, req+18, mbox5);
2754 QMEM_WR16(qlt, req+20, mbox6);
2755 qlt_submit_req_entries(qlt, 1);
2756 } else {
2757 (void) snprintf(info, sizeof (info),
2758 "IDC ACK failed");
2759 }
2760 mutex_exit(&qlt->req_lock);
2761 }
2762 }
2763 } else if ((intr_type == 0x10) || (intr_type == 0x11)) {
2764 /* Handle mailbox completion */
2765 mutex_enter(&qlt->mbox_lock);
2766 if (qlt->mbox_io_state != MBOX_STATE_CMD_RUNNING) {
2767 cmn_err(CE_WARN, "qlt(%d): mailbox completion received"
2768 " when driver wasn't waiting for it %d",
2769 qlt->instance, qlt->mbox_io_state);
2770 } else {
2771 for (i = 0; i < MAX_MBOXES; i++) {
2772 if (qlt->mcp->from_fw_mask &
2773 (((uint32_t)1) << i)) {
2774 qlt->mcp->from_fw[i] =
2775 REG_RD16(qlt, REG_MBOX(i));
2776 }
2777 }
2778 qlt->mbox_io_state = MBOX_STATE_CMD_DONE;
2779 }
2780 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2781 cv_broadcast(&qlt->mbox_cv);
2782 mutex_exit(&qlt->mbox_lock);
2783 } else {
2784 cmn_err(CE_WARN, "qlt(%d): Unknown intr type 0x%x",
2785 qlt->instance, intr_type);
2786 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2787 }
2788
2789 (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting */
2790 risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2791 if ((risc_status & BIT_15) &&
2792 (++intr_loop_count < QLT_MAX_ITERATIONS_PER_INTR)) {
2793 goto intr_again;
2794 }
2795
2796 REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
2797
2798 mutex_exit(&qlt->intr_lock);
2799 return (DDI_INTR_CLAIMED);
2800 }
2801
2802 /* **************** NVRAM Functions ********************** */
2803
2804 fct_status_t
2805 qlt_read_flash_word(qlt_state_t *qlt, uint32_t faddr, uint32_t *bp)
2806 {
2807 uint32_t timer;
2808
2809 /* Clear access error flag */
2810 REG_WR32(qlt, REG_CTRL_STATUS,
2811 REG_RD32(qlt, REG_CTRL_STATUS) | FLASH_ERROR);
2812
2813 REG_WR32(qlt, REG_FLASH_ADDR, faddr & ~BIT_31);
2814
2815 /* Wait for READ cycle to complete. */
2816 for (timer = 3000; timer; timer--) {
2817 if (REG_RD32(qlt, REG_FLASH_ADDR) & BIT_31) {
2818 break;
2824 return (QLT_FLASH_TIMEOUT);
2825 } else if (REG_RD32(qlt, REG_CTRL_STATUS) & FLASH_ERROR) {
2826 EL(qlt, "flash access error\n");
2827 return (QLT_FLASH_ACCESS_ERROR);
2828 }
2829
2830 *bp = REG_RD32(qlt, REG_FLASH_DATA);
2831
2832 return (QLT_SUCCESS);
2833 }
2834
2835 fct_status_t
2836 qlt_read_nvram(qlt_state_t *qlt)
2837 {
2838 uint32_t index, addr, chksum;
2839 uint32_t val, *ptr;
2840 fct_status_t ret;
2841 qlt_nvram_t *nv;
2842 uint64_t empty_node_name = 0;
2843
2844 if (qlt->qlt_81xx_chip) {
2845 addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
2846 QLT81_NVRAM_FUNC1_ADDR : QLT81_NVRAM_FUNC0_ADDR;
2847 } else if (qlt->qlt_25xx_chip) {
2848 addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2849 QLT25_NVRAM_FUNC1_ADDR : QLT25_NVRAM_FUNC0_ADDR;
2850 } else {
2851 addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2852 NVRAM_FUNC1_ADDR : NVRAM_FUNC0_ADDR;
2853 }
2854 mutex_enter(&qlt_global_lock);
2855
2856 /* Pause RISC. */
2857 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
2858 (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting. */
2859
2860 /* Get NVRAM data and calculate checksum. */
2861 ptr = (uint32_t *)qlt->nvram;
2862 chksum = 0;
2863 for (index = 0; index < sizeof (qlt_nvram_t) / 4; index++) {
2864 ret = qlt_read_flash_word(qlt, addr++, &val);
2865 if (ret != QLT_SUCCESS) {
2882 nv = qlt->nvram;
2883 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2884 nv->id[2] != 'P' || nv->id[3] != ' ' ||
2885 (nv->nvram_version[0] | nv->nvram_version[1]) == 0) {
2886 EL(qlt, "chksum=%xh, id=%c%c%c%c, ver=%02d%02d\n", chksum,
2887 nv->id[0], nv->id[1], nv->id[2], nv->id[3],
2888 nv->nvram_version[1], nv->nvram_version[0]);
2889 return (QLT_BAD_NVRAM_DATA);
2890 }
2891
2892 /* If node name is zero, hand craft it from port name */
2893 if (bcmp(nv->node_name, &empty_node_name, 8) == 0) {
2894 bcopy(nv->port_name, nv->node_name, 8);
2895 nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0);
2896 nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0);
2897 }
2898
2899 return (QLT_SUCCESS);
2900 }
2901
2902 uint32_t
2903 qlt_sync_atio_queue(qlt_state_t *qlt)
2904 {
2905 uint32_t total_ent;
2906
2907 if (qlt->atio_ndx_from_fw > qlt->atio_ndx_to_fw) {
2908 total_ent = qlt->atio_ndx_from_fw - qlt->atio_ndx_to_fw;
2909 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2910 + (qlt->atio_ndx_to_fw << 6), total_ent << 6,
2911 DDI_DMA_SYNC_FORCPU);
2912 } else {
2913 total_ent = ATIO_QUEUE_ENTRIES - qlt->atio_ndx_to_fw +
2914 qlt->atio_ndx_from_fw;
2915 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2916 + (qlt->atio_ndx_to_fw << 6), (uint_t)(ATIO_QUEUE_ENTRIES -
2917 qlt->atio_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2918 (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2919 ATIO_QUEUE_OFFSET, (uint_t)(qlt->atio_ndx_from_fw << 6),
2920 DDI_DMA_SYNC_FORCPU);
2921 }
2925 void
2926 qlt_handle_atio_queue_update(qlt_state_t *qlt)
2927 {
2928 uint32_t total_ent;
2929
2930 if (qlt->atio_ndx_to_fw == qlt->atio_ndx_from_fw)
2931 return;
2932
2933 total_ent = qlt_sync_atio_queue(qlt);
2934
2935 do {
2936 uint8_t *atio = (uint8_t *)&qlt->atio_ptr[
2937 qlt->atio_ndx_to_fw << 6];
2938 uint32_t ent_cnt;
2939
2940 ent_cnt = (uint32_t)(atio[1]);
2941 if (ent_cnt > total_ent) {
2942 break;
2943 }
2944 switch ((uint8_t)(atio[0])) {
2945 case 0x0d: /* INOT */
2946 qlt_handle_inot(qlt, atio);
2947 break;
2948 case 0x06: /* ATIO */
2949 qlt_handle_atio(qlt, atio);
2950 break;
2951 default:
2952 EL(qlt, "atio_queue_update atio[0]=%xh\n", atio[0]);
2953 cmn_err(CE_WARN, "qlt_handle_atio_queue_update: "
2954 "atio[0] is %x, qlt-%p", atio[0], (void *)qlt);
2955 break;
2956 }
2957 qlt->atio_ndx_to_fw = (uint16_t)(
2958 (qlt->atio_ndx_to_fw + ent_cnt) & (ATIO_QUEUE_ENTRIES - 1));
2959 total_ent -= ent_cnt;
2960 } while (total_ent > 0);
2961 REG_WR32(qlt, REG_ATIO_OUT_PTR, qlt->atio_ndx_to_fw);
2962 }
2963
2964 uint32_t
2965 qlt_sync_resp_queue(qlt_state_t *qlt)
2966 {
2967 uint32_t total_ent;
2968
2969 if (qlt->resp_ndx_from_fw > qlt->resp_ndx_to_fw) {
2970 total_ent = qlt->resp_ndx_from_fw - qlt->resp_ndx_to_fw;
2971 (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2972 RESPONSE_QUEUE_OFFSET
2973 + (qlt->resp_ndx_to_fw << 6), total_ent << 6,
2974 DDI_DMA_SYNC_FORCPU);
2975 } else {
2976 total_ent = RESPONSE_QUEUE_ENTRIES - qlt->resp_ndx_to_fw +
2977 qlt->resp_ndx_from_fw;
2978 (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2979 RESPONSE_QUEUE_OFFSET
2980 + (qlt->resp_ndx_to_fw << 6), (RESPONSE_QUEUE_ENTRIES -
2981 qlt->resp_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2982 (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2983 RESPONSE_QUEUE_OFFSET,
2984 qlt->resp_ndx_from_fw << 6, DDI_DMA_SYNC_FORCPU);
2985 }
2986 return (total_ent);
2987 }
2988
2989 void
2990 qlt_handle_resp_queue_update(qlt_state_t *qlt)
2991 {
2992 uint32_t total_ent;
2993 uint8_t c;
2994
2995 if (qlt->resp_ndx_to_fw == qlt->resp_ndx_from_fw)
2996 return;
2997
2998 total_ent = qlt_sync_resp_queue(qlt);
2999
3000 do {
3001 caddr_t resp = &qlt->resp_ptr[qlt->resp_ndx_to_fw << 6];
3002 uint32_t ent_cnt;
3003
3004 ent_cnt = (uint32_t)(resp[0] == 0x51 ? resp[1] : 1);
3005 if (ent_cnt > total_ent) {
3006 break;
3007 }
3008 switch ((uint8_t)(resp[0])) {
3009 case 0x12: /* CTIO completion */
3010 qlt_handle_ctio_completion(qlt, (uint8_t *)resp);
3011 break;
3012 case 0x0e: /* NACK */
3013 /* Do Nothing */
3014 break;
3015 case 0x1b: /* Verify FW */
3016 qlt_handle_verify_fw_completion(qlt, (uint8_t *)resp);
3017 break;
3018 case 0x29: /* CT PassThrough */
3019 qlt_handle_ct_completion(qlt, (uint8_t *)resp);
3020 break;
3021 case 0x33: /* Abort IO IOCB completion */
3022 qlt_handle_sol_abort_completion(qlt, (uint8_t *)resp);
3023 break;
3024 case 0x51: /* PUREX */
3025 qlt_handle_purex(qlt, (uint8_t *)resp);
3026 break;
3027 case 0x52:
3028 qlt_handle_dereg_completion(qlt, (uint8_t *)resp);
3029 break;
3030 case 0x53: /* ELS passthrough */
3031 c = (uint8_t)(((uint8_t)resp[0x1f]) >> 5);
3032 if (c == 0) {
3033 qlt_handle_sol_els_completion(qlt,
3034 (uint8_t *)resp);
3035 } else if (c == 3) {
3036 qlt_handle_unsol_els_abort_completion(qlt,
3037 (uint8_t *)resp);
3038 } else {
3039 qlt_handle_unsol_els_completion(qlt,
3040 (uint8_t *)resp);
3041 }
3042 break;
3043 case 0x54: /* ABTS received */
3044 qlt_handle_rcvd_abts(qlt, (uint8_t *)resp);
3045 break;
3046 case 0x55: /* ABTS completion */
3047 qlt_handle_abts_completion(qlt, (uint8_t *)resp);
3048 break;
3049 default:
3050 EL(qlt, "response entry=%xh\n", resp[0]);
3051 break;
3052 }
3053 qlt->resp_ndx_to_fw = (qlt->resp_ndx_to_fw + ent_cnt) &
3054 (RESPONSE_QUEUE_ENTRIES - 1);
3055 total_ent -= ent_cnt;
3056 } while (total_ent > 0);
3057 REG_WR32(qlt, REG_RESP_OUT_PTR, qlt->resp_ndx_to_fw);
3058 }
3059
3060 fct_status_t
3061 qlt_portid_to_handle(qlt_state_t *qlt, uint32_t id, uint16_t cmd_handle,
3062 uint16_t *ret_handle)
3063 {
3064 fct_status_t ret;
3065 mbox_cmd_t *mcp;
3066 uint16_t n;
3067 uint16_t h;
3068 uint32_t ent_id;
3069 uint8_t *p;
3070 int found = 0;
3071
3072 mcp = qlt_alloc_mailbox_command(qlt, 2048 * 8);
3073 if (mcp == NULL) {
3074 return (STMF_ALLOC_FAILURE);
3075 }
3076 mcp->to_fw[0] = MBC_GET_ID_LIST;
3077 mcp->to_fw[8] = 2048 * 8;
3078 mcp->to_fw[9] = 0;
3079 mcp->to_fw_mask |= BIT_9 | BIT_8;
3080 mcp->from_fw_mask |= BIT_1 | BIT_2;
3081
3082 ret = qlt_mailbox_command(qlt, mcp);
3083 if (ret != QLT_SUCCESS) {
3084 EL(qlt, "qlt_mailbox_command=7Ch status=%llxh\n", ret);
3085 cmn_err(CE_WARN, "GET ID list failed, ret = %llx, mb0=%x, "
3086 "mb1=%x, mb2=%x", (long long)ret, mcp->from_fw[0],
3087 mcp->from_fw[1], mcp->from_fw[2]);
3088 qlt_free_mailbox_command(qlt, mcp);
3089 return (ret);
3090 }
3091 qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
3092 p = mcp->dbuf->db_sglist[0].seg_addr;
3093 for (n = 0; n < mcp->from_fw[1]; n++) {
3094 ent_id = LE_32(*((uint32_t *)p)) & 0xFFFFFF;
3095 h = (uint16_t)((uint16_t)p[4] | (((uint16_t)p[5]) << 8));
3096 if (ent_id == id) {
3097 found = 1;
3098 *ret_handle = h;
3099 if ((cmd_handle != FCT_HANDLE_NONE) &&
3100 (cmd_handle != h)) {
3101 cmn_err(CE_WARN, "login for portid %x came in "
3102 "with handle %x, while the portid was "
3103 "already using a different handle %x",
3104 id, cmd_handle, h);
3105 qlt_free_mailbox_command(qlt, mcp);
3106 return (QLT_FAILURE);
3107 }
3108 break;
3109 }
3110 if ((cmd_handle != FCT_HANDLE_NONE) && (h == cmd_handle)) {
3111 cmn_err(CE_WARN, "login for portid %x came in with "
3112 "handle %x, while the handle was already in use "
3113 "for portid %x", id, cmd_handle, ent_id);
3114 qlt_free_mailbox_command(qlt, mcp);
3115 return (QLT_FAILURE);
3116 }
3117 p += 8;
3118 }
3119 if (!found) {
3120 *ret_handle = cmd_handle;
3121 }
3122 qlt_free_mailbox_command(qlt, mcp);
3123 return (FCT_SUCCESS);
3124 }
3125
3126 /* ARGSUSED */
3127 fct_status_t
3128 qlt_fill_plogi_req(fct_local_port_t *port, fct_remote_port_t *rp,
3129 fct_cmd_t *login)
3130 {
3131 uint8_t *p;
3132
3133 p = ((fct_els_t *)login->cmd_specific)->els_req_payload;
3174 default:
3175 ret = qlt_portid_to_handle(qlt, rp->rp_id,
3176 login->cmd_rp_handle, &h);
3177 if (ret != FCT_SUCCESS) {
3178 EL(qlt, "qlt_portid_to_handle, status=%llxh\n", ret);
3179 return (ret);
3180 }
3181 }
3182
3183 if (login->cmd_type == FCT_CMD_SOL_ELS) {
3184 ret = qlt_fill_plogi_req(port, rp, login);
3185 } else {
3186 ret = qlt_fill_plogi_resp(port, rp, login);
3187 }
3188
3189 if (ret != FCT_SUCCESS) {
3190 EL(qlt, "qlt_fill_plogi, status=%llxh\n", ret);
3191 return (ret);
3192 }
3193
3194 if (h == FCT_HANDLE_NONE)
3195 return (FCT_SUCCESS);
3196
3197 if (rp->rp_handle == FCT_HANDLE_NONE) {
3198 rp->rp_handle = h;
3199 return (FCT_SUCCESS);
3200 }
3201
3202 if (rp->rp_handle == h)
3203 return (FCT_SUCCESS);
3204
3205 EL(qlt, "rp_handle=%xh != h=%xh\n", rp->rp_handle, h);
3206 return (FCT_FAILURE);
3207 }
3208 /* invoked in single thread */
3209 fct_status_t
3210 qlt_deregister_remote_port(fct_local_port_t *port, fct_remote_port_t *rp)
3211 {
3212 uint8_t *req;
3213 qlt_state_t *qlt;
3214 clock_t dereg_req_timer;
3215 fct_status_t ret;
3216
3217 qlt = (qlt_state_t *)port->port_fca_private;
3218
3219 if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
3220 (qlt->qlt_state == FCT_STATE_OFFLINING))
3221 return (FCT_SUCCESS);
3222 ASSERT(qlt->rp_id_in_dereg == 0);
3223
3224 mutex_enter(&qlt->preq_lock);
3225 req = (uint8_t *)qlt_get_preq_entries(qlt, 1);
3226 if (req == NULL) {
3227 mutex_exit(&qlt->preq_lock);
3228 return (FCT_BUSY);
3229 }
3230 bzero(req, IOCB_SIZE);
3231 req[0] = 0x52; req[1] = 1;
3232 /* QMEM_WR32(qlt, (&req[4]), 0xffffffff); */
3233 QMEM_WR16(qlt, (&req[0xA]), rp->rp_handle);
3234 QMEM_WR16(qlt, (&req[0xC]), 0x98); /* implicit logo */
3235 QMEM_WR32(qlt, (&req[0x10]), rp->rp_id);
3236 qlt->rp_id_in_dereg = rp->rp_id;
3237 qlt_submit_preq_entries(qlt, 1);
3238
3239 dereg_req_timer = ddi_get_lbolt() + drv_usectohz(DEREG_RP_TIMEOUT);
3240 if (cv_timedwait(&qlt->rp_dereg_cv,
3241 &qlt->preq_lock, dereg_req_timer) > 0) {
3242 ret = qlt->rp_dereg_status;
3243 } else {
3244 ret = FCT_BUSY;
3245 }
3246 qlt->rp_dereg_status = 0;
3247 qlt->rp_id_in_dereg = 0;
3248 mutex_exit(&qlt->preq_lock);
3249 return (ret);
3250 }
3251
3252 /*
3253 * Pass received ELS up to framework.
3254 */
3255 static void
3256 qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp)
3257 {
3258 fct_cmd_t *cmd;
3259 fct_els_t *els;
3260 qlt_cmd_t *qcmd;
3261 uint32_t payload_size;
3262 uint32_t remote_portid;
3263 uint8_t *pldptr, *bndrptr;
3264 int i, off;
3265 uint16_t iocb_flags;
3266 char info[QLT_INFO_LEN];
3267
3268 remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
3269 ((uint32_t)(resp[0x1A])) << 16;
3270 iocb_flags = QMEM_RD16(qlt, (&resp[8]));
3271 if (iocb_flags & BIT_15) {
3272 payload_size = (QMEM_RD16(qlt, (&resp[0x0e])) & 0xfff) - 24;
3273 } else {
3274 payload_size = QMEM_RD16(qlt, (&resp[0x0c])) - 24;
3275 }
3276
3277 if (payload_size > ((uint32_t)resp[1] * IOCB_SIZE - 0x2C)) {
3278 EL(qlt, "payload is too large = %xh\n", payload_size);
3279 cmn_err(CE_WARN, "handle_purex: payload is too large");
3280 goto cmd_null;
3281 }
3282
3283 cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ELS,
3284 (int)(payload_size + GET_STRUCT_SIZE(qlt_cmd_t)), 0);
3285 if (cmd == NULL) {
3286 EL(qlt, "fct_alloc cmd==NULL\n");
3287 cmd_null:;
3288 (void) snprintf(info, sizeof (info),
3289 "qlt_handle_purex: qlt-%p, "
3290 "can't allocate space for fct_cmd", (void *)qlt);
3291 (void) fct_port_shutdown(qlt->qlt_port,
3292 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3293 return;
3294 }
3295
3296 cmd->cmd_port = qlt->qlt_port;
3297 cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xa);
3298 if (cmd->cmd_rp_handle == 0xFFFF) {
3299 cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3300 }
3301
3302 els = (fct_els_t *)cmd->cmd_specific;
3303 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3304 els->els_req_size = (uint16_t)payload_size;
3305 els->els_req_payload = GET_BYTE_OFFSET(qcmd,
3306 GET_STRUCT_SIZE(qlt_cmd_t));
3307 qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&resp[0x10]));
3308 cmd->cmd_rportid = remote_portid;
3309 cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
3310 ((uint32_t)(resp[0x16])) << 16;
3311 cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
3312 cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
3313 pldptr = &resp[0x2C];
3314 bndrptr = (uint8_t *)(qlt->resp_ptr + (RESPONSE_QUEUE_ENTRIES << 6));
3315 for (i = 0, off = 0x2c; i < payload_size; i += 4) {
3316 /* Take care of fw's swapping of payload */
3317 els->els_req_payload[i] = pldptr[3];
3318 els->els_req_payload[i+1] = pldptr[2];
3319 els->els_req_payload[i+2] = pldptr[1];
3320 els->els_req_payload[i+3] = pldptr[0];
3321 pldptr += 4;
3322 if (pldptr == bndrptr)
3323 pldptr = (uint8_t *)qlt->resp_ptr;
3324 off += 4;
3325 if (off >= IOCB_SIZE) {
3326 off = 4;
3327 pldptr += 4;
3328 }
3329 }
3330 fct_post_rcvd_cmd(cmd, 0);
3331 }
3332
3333 fct_status_t
3334 qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags)
3335 {
3336 qlt_state_t *qlt;
3337 char info[QLT_INFO_LEN];
3338
3339 qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
3340
3341 if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
3342 if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3343 EL(qlt, "ioflags = %xh\n", ioflags);
3344 goto fatal_panic;
3345 } else {
3346 return (qlt_send_status(qlt, cmd));
3347 }
3348 }
3349
3350 if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
3351 if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3352 goto fatal_panic;
3353 } else {
3354 return (qlt_send_els_response(qlt, cmd));
3355 }
3356 }
3357
3358 if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3359 cmd->cmd_handle = 0;
3360 }
3361
3362 if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
3363 return (qlt_send_abts_response(qlt, cmd, 0));
3364 } else {
3365 EL(qlt, "cmd->cmd_type=%xh\n", cmd->cmd_type);
3366 ASSERT(0);
3367 return (FCT_FAILURE);
3368 }
3369
3370 fatal_panic:;
3371 (void) snprintf(info, sizeof (info),
3372 "qlt_send_cmd_response: can not handle "
3373 "FCT_IOF_FORCE_FCA_DONE for cmd %p, ioflags-%x", (void *)cmd,
3374 ioflags);
3375 (void) fct_port_shutdown(qlt->qlt_port,
3376 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3377 return (FCT_FAILURE);
3378 }
3379
3380 /* ARGSUSED */
3381 fct_status_t
3382 qlt_xfer_scsi_data(fct_cmd_t *cmd, stmf_data_buf_t *dbuf, uint32_t ioflags)
3383 {
3384 qlt_dmem_bctl_t *bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private;
3385 qlt_state_t *qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
3386 qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3387 uint8_t *req, rcnt;
3388 uint16_t flags;
3389 uint16_t cookie_count;
3390
3391 if (dbuf->db_handle == 0)
3392 qcmd->dbuf = dbuf;
3393 flags = (uint16_t)(((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5);
3394 if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
3395 flags = (uint16_t)(flags | 2);
3396 qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORDEV);
3397 } else {
3398 flags = (uint16_t)(flags | 1);
3399 }
3400
3401 if (dbuf->db_flags & DB_SEND_STATUS_GOOD)
3402 flags = (uint16_t)(flags | BIT_15);
3403
3404 if (dbuf->db_flags & DB_LU_DATA_BUF) {
3405 /*
3406 * Data bufs from LU are in scatter/gather list format.
3407 */
3408 cookie_count = qlt_get_cookie_count(dbuf);
3409 rcnt = qlt_get_iocb_count(cookie_count);
3410 } else {
3411 cookie_count = 1;
3412 rcnt = 1;
3413 }
3414 mutex_enter(&qlt->req_lock);
3415 req = (uint8_t *)qlt_get_req_entries(qlt, rcnt);
3416 if (req == NULL) {
3417 mutex_exit(&qlt->req_lock);
3418 return (FCT_BUSY);
3419 }
3420 bzero(req, IOCB_SIZE); /* XXX needed ? */
3421 req[0] = 0x12;
3422 req[1] = rcnt;
3423 req[2] = dbuf->db_handle;
3424 QMEM_WR32(qlt, req+4, cmd->cmd_handle);
3425 QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
3426 QMEM_WR16(qlt, req+10, 60); /* 60 seconds timeout */
3427 QMEM_WR16(qlt, req+12, cookie_count);
3428 QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
3429 QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
3430 QMEM_WR16(qlt, req+0x1A, flags);
3431 QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
3432 QMEM_WR32(qlt, req+0x24, dbuf->db_relative_offset);
3433 QMEM_WR32(qlt, req+0x2C, dbuf->db_data_size);
3434 if (dbuf->db_flags & DB_LU_DATA_BUF) {
3435 uint8_t *qptr; /* qlt continuation segs */
3436 uint16_t cookie_resid;
3437 uint16_t cont_segs;
3438 ddi_dma_cookie_t cookie, *ckp;
3439
3440 /*
3441 * See if the dma cookies are in simple array format.
3442 */
3443 ckp = qlt_get_cookie_array(dbuf);
3444
3445 /*
3446 * Program the first segment into main record.
3447 */
3448 if (ckp) {
3449 ASSERT(ckp->dmac_size);
3450 QMEM_WR64(qlt, req+0x34, ckp->dmac_laddress);
3451 QMEM_WR32(qlt, req+0x3c, ckp->dmac_size);
3452 } else {
3453 qlt_ddi_dma_nextcookie(dbuf, &cookie);
3454 ASSERT(cookie.dmac_size);
3455 QMEM_WR64(qlt, req+0x34, cookie.dmac_laddress);
3456 QMEM_WR32(qlt, req+0x3c, cookie.dmac_size);
3457 }
3458 cookie_resid = cookie_count-1;
3459
3460 /*
3461 * Program remaining segments into continuation records.
3462 */
3463 while (cookie_resid) {
3464 req += IOCB_SIZE;
3465 if (req >= (uint8_t *)qlt->resp_ptr) {
3466 req = (uint8_t *)qlt->req_ptr;
3467 }
3468 req[0] = 0x0a;
3469 req[1] = 1;
3470 req[2] = req[3] = 0; /* tidy */
3471 qptr = &req[4];
3472 for (cont_segs = CONT_A64_DATA_SEGMENTS;
3473 cont_segs && cookie_resid; cont_segs--) {
3474
3475 if (ckp) {
3476 ++ckp; /* next cookie */
3477 ASSERT(ckp->dmac_size != 0);
3478 QMEM_WR64(qlt, qptr,
3479 ckp->dmac_laddress);
3480 qptr += 8; /* skip over laddress */
3481 QMEM_WR32(qlt, qptr, ckp->dmac_size);
3482 qptr += 4; /* skip over size */
3483 } else {
3484 qlt_ddi_dma_nextcookie(dbuf, &cookie);
3485 ASSERT(cookie.dmac_size != 0);
3486 QMEM_WR64(qlt, qptr,
3487 cookie.dmac_laddress);
3488 qptr += 8; /* skip over laddress */
3489 QMEM_WR32(qlt, qptr, cookie.dmac_size);
3490 qptr += 4; /* skip over size */
3491 }
3492 cookie_resid--;
3493 }
3494 /*
3495 * zero unused remainder of IOCB
3496 */
3497 if (cont_segs) {
3498 size_t resid;
3499 resid = (size_t)((uintptr_t)(req+IOCB_SIZE) -
3500 (uintptr_t)qptr);
3501 ASSERT(resid < IOCB_SIZE);
3502 bzero(qptr, resid);
3503 }
3504 }
3505 } else {
3506 /* Single, contiguous buffer */
3507 QMEM_WR64(qlt, req+0x34, bctl->bctl_dev_addr);
3508 QMEM_WR32(qlt, req+0x34+8, dbuf->db_data_size);
3509 }
3510
3511 qlt_submit_req_entries(qlt, rcnt);
3512 mutex_exit(&qlt->req_lock);
3513
3514 return (STMF_SUCCESS);
3515 }
3516
3517 /*
3518 * We must construct proper FCP_RSP_IU now. Here we only focus on
3519 * the handling of FCP_SNS_INFO. If there's protocol failures (FCP_RSP_INFO),
3520 * we could have catched them before we enter here.
3521 */
3522 fct_status_t
3523 qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd)
3524 {
3525 qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3526 scsi_task_t *task = (scsi_task_t *)cmd->cmd_specific;
3527 qlt_dmem_bctl_t *bctl;
3528 uint32_t size;
3529 uint8_t *req, *fcp_rsp_iu;
3530 uint8_t *psd, sensbuf[24]; /* sense data */
3531 uint16_t flags;
3532 uint16_t scsi_status;
3533 int use_mode2;
3534 int ndx;
3535
3536 /*
3537 * Enter fast channel for non check condition
3538 */
3539 if (task->task_scsi_status != STATUS_CHECK) {
3540 /*
3541 * We will use mode1
3542 */
3543 flags = (uint16_t)(BIT_6 | BIT_15 |
3544 (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
3545 scsi_status = (uint16_t)task->task_scsi_status;
3546 if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3547 scsi_status = (uint16_t)(scsi_status | BIT_10);
3548 } else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3549 scsi_status = (uint16_t)(scsi_status | BIT_11);
3550 }
3551 qcmd->dbuf_rsp_iu = NULL;
3552
3553 /*
3554 * Fillout CTIO type 7 IOCB
3555 */
3556 mutex_enter(&qlt->req_lock);
3557 req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3558 if (req == NULL) {
3559 mutex_exit(&qlt->req_lock);
3560 return (FCT_BUSY);
3561 }
3562
3563 /*
3564 * Common fields
3565 */
3566 bzero(req, IOCB_SIZE);
3567 req[0x00] = 0x12;
3568 req[0x01] = 0x1;
3569 req[0x02] = BIT_7; /* indicate if it's a pure status req */
3570 QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3571 QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3572 QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3573 QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
3574
3575 /*
3576 * Mode-specific fields
3577 */
3578 QMEM_WR16(qlt, req + 0x1A, flags);
3579 QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3580 QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
3581 QMEM_WR16(qlt, req + 0x22, scsi_status);
3582
3583 /*
3584 * Trigger FW to send SCSI status out
3585 */
3586 qlt_submit_req_entries(qlt, 1);
3587 mutex_exit(&qlt->req_lock);
3588 return (STMF_SUCCESS);
3589 }
3590
3591 ASSERT(task->task_scsi_status == STATUS_CHECK);
3592 /*
3593 * Decide the SCSI status mode, that should be used
3594 */
3595 use_mode2 = (task->task_sense_length > 24);
3596
3597 /*
3598 * Prepare required information per the SCSI status mode
3599 */
3600 flags = (uint16_t)(BIT_15 |
3601 (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
3602 if (use_mode2) {
3603 flags = (uint16_t)(flags | BIT_7);
3604
3605 size = task->task_sense_length;
3606 qcmd->dbuf_rsp_iu = qlt_i_dmem_alloc(qlt,
3607 task->task_sense_length, &size, 0);
3664 qlt_dmem_dma_sync(qcmd->dbuf_rsp_iu, DDI_DMA_SYNC_FORDEV);
3665 } else {
3666 flags = (uint16_t)(flags | BIT_6);
3667
3668 scsi_status = (uint16_t)task->task_scsi_status;
3669 if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3670 scsi_status = (uint16_t)(scsi_status | BIT_10);
3671 } else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3672 scsi_status = (uint16_t)(scsi_status | BIT_11);
3673 }
3674 if (task->task_sense_length) {
3675 scsi_status = (uint16_t)(scsi_status | BIT_9);
3676 }
3677 bcopy(task->task_sense_data, sensbuf, task->task_sense_length);
3678 qcmd->dbuf_rsp_iu = NULL;
3679 }
3680
3681 /*
3682 * Fillout CTIO type 7 IOCB
3683 */
3684 mutex_enter(&qlt->req_lock);
3685 req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3686 if (req == NULL) {
3687 mutex_exit(&qlt->req_lock);
3688 if (use_mode2) {
3689 qlt_dmem_free(cmd->cmd_port->port_fds,
3690 qcmd->dbuf_rsp_iu);
3691 qcmd->dbuf_rsp_iu = NULL;
3692 }
3693 return (FCT_BUSY);
3694 }
3695
3696 /*
3697 * Common fields
3698 */
3699 bzero(req, IOCB_SIZE);
3700 req[0x00] = 0x12;
3701 req[0x01] = 0x1;
3702 req[0x02] = BIT_7; /* to indicate if it's a pure status req */
3703 QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3704 QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3705 QMEM_WR16(qlt, req + 0x0A, 0); /* not timed by FW */
3706 if (use_mode2) {
3707 QMEM_WR16(qlt, req+0x0C, 1); /* FCP RSP IU data field */
3708 }
3709 QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3710 QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
3711
3712 /*
3713 * Mode-specific fields
3714 */
3715 if (!use_mode2) {
3716 QMEM_WR16(qlt, req + 0x18, task->task_sense_length);
3717 }
3718 QMEM_WR16(qlt, req + 0x1A, flags);
3719 QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3720 QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
3721 if (use_mode2) {
3722 bctl = (qlt_dmem_bctl_t *)qcmd->dbuf_rsp_iu->db_port_private;
3723 QMEM_WR32(qlt, req + 0x2C, 24 + task->task_sense_length);
3724 QMEM_WR64(qlt, req + 0x34, bctl->bctl_dev_addr);
3725 QMEM_WR32(qlt, req + 0x3C, 24 + task->task_sense_length);
3726 } else {
3727 QMEM_WR16(qlt, req + 0x22, scsi_status);
3728 psd = req+0x28;
3729
3730 /*
3731 * Data in sense buf is always big-endian, data in IOCB
3732 * should always be little-endian, so we must do swapping.
3733 */
3734 size = ((task->task_sense_length + 3) & (~3));
3735 for (ndx = 0; ndx < size; ndx += 4) {
3736 psd[ndx + 0] = sensbuf[ndx + 3];
3737 psd[ndx + 1] = sensbuf[ndx + 2];
3738 psd[ndx + 2] = sensbuf[ndx + 1];
3739 psd[ndx + 3] = sensbuf[ndx + 0];
3740 }
3741 }
3742
3743 /*
3744 * Trigger FW to send SCSI status out
3745 */
3746 qlt_submit_req_entries(qlt, 1);
3747 mutex_exit(&qlt->req_lock);
3748
3749 return (STMF_SUCCESS);
3750 }
3751
3752 fct_status_t
3753 qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd)
3754 {
3755 qlt_cmd_t *qcmd;
3756 fct_els_t *els = (fct_els_t *)cmd->cmd_specific;
3757 uint8_t *req, *addr;
3758 qlt_dmem_bctl_t *bctl;
3759 uint32_t minsize;
3760 uint8_t elsop, req1f;
3761
3762 addr = els->els_resp_payload;
3763 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3764
3765 minsize = els->els_resp_size;
3766 qcmd->dbuf = qlt_i_dmem_alloc(qlt, els->els_resp_size, &minsize, 0);
3767 if (qcmd->dbuf == NULL)
3768 return (FCT_BUSY);
3769
3770 bctl = (qlt_dmem_bctl_t *)qcmd->dbuf->db_port_private;
3771
3772 bcopy(addr, qcmd->dbuf->db_sglist[0].seg_addr, els->els_resp_size);
3773 qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORDEV);
3774
3775 if (addr[0] == 0x02) { /* ACC */
3776 req1f = BIT_5;
3777 } else {
3778 req1f = BIT_6;
3779 }
3780 elsop = els->els_req_payload[0];
3781 if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
3782 (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
3783 req1f = (uint8_t)(req1f | BIT_4);
3784 }
3785
3786 mutex_enter(&qlt->req_lock);
3787 req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3788 if (req == NULL) {
3789 mutex_exit(&qlt->req_lock);
3790 qlt_dmem_free(NULL, qcmd->dbuf);
3791 qcmd->dbuf = NULL;
3792 return (FCT_BUSY);
3793 }
3794 bzero(req, IOCB_SIZE);
3795 req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
3796 req[0x16] = elsop; req[0x1f] = req1f;
3797 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3798 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3799 QMEM_WR16(qlt, (&req[0xC]), 1);
3800 QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
3801 QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
3802 if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
3803 req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
3804 req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
3805 req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
3806 }
3807 QMEM_WR32(qlt, (&req[0x24]), els->els_resp_size);
3808 QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
3809 QMEM_WR32(qlt, (&req[0x30]), els->els_resp_size);
3810 qlt_submit_req_entries(qlt, 1);
3811 mutex_exit(&qlt->req_lock);
3812
3813 return (FCT_SUCCESS);
3814 }
3815
3816 fct_status_t
3817 qlt_send_abts_response(qlt_state_t *qlt, fct_cmd_t *cmd, int terminate)
3818 {
3819 qlt_abts_cmd_t *qcmd;
3820 fct_rcvd_abts_t *abts = (fct_rcvd_abts_t *)cmd->cmd_specific;
3821 uint8_t *req;
3822 uint32_t lportid;
3823 uint32_t fctl;
3824 int i;
3825
3826 qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
3827
3828 mutex_enter(&qlt->req_lock);
3829 req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3830 if (req == NULL) {
3831 mutex_exit(&qlt->req_lock);
3832 return (FCT_BUSY);
3833 }
3834 bcopy(qcmd->buf, req, IOCB_SIZE);
3835 lportid = QMEM_RD32(qlt, req+0x14) & 0xFFFFFF;
3836 fctl = QMEM_RD32(qlt, req+0x1C);
3837 fctl = ((fctl ^ BIT_23) & ~BIT_22) | (BIT_19 | BIT_16);
3838 req[0] = 0x55; req[1] = 1; req[2] = (uint8_t)terminate;
3839 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3840 if (cmd->cmd_rp)
3841 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3842 else
3843 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
3844 if (terminate) {
3845 QMEM_WR16(qlt, (&req[0xC]), 1);
3846 }
3847 QMEM_WR32(qlt, req+0x14, cmd->cmd_rportid);
3848 req[0x17] = abts->abts_resp_rctl;
3849 QMEM_WR32(qlt, req+0x18, lportid);
3850 QMEM_WR32(qlt, req+0x1C, fctl);
3851 req[0x23]++;
3852 for (i = 0; i < 12; i += 4) {
3853 /* Take care of firmware's LE requirement */
3854 req[0x2C+i] = abts->abts_resp_payload[i+3];
3855 req[0x2C+i+1] = abts->abts_resp_payload[i+2];
3856 req[0x2C+i+2] = abts->abts_resp_payload[i+1];
3857 req[0x2C+i+3] = abts->abts_resp_payload[i];
3858 }
3859 qlt_submit_req_entries(qlt, 1);
3860 mutex_exit(&qlt->req_lock);
3861
3862 return (FCT_SUCCESS);
3863 }
3864
3865 static void
3866 qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot)
3867 {
3868 int i;
3869 uint32_t d;
3870 caddr_t req;
3871 /* Just put it on the request queue */
3872 mutex_enter(&qlt->req_lock);
3873 req = qlt_get_req_entries(qlt, 1);
3874 if (req == NULL) {
3875 mutex_exit(&qlt->req_lock);
3876 /* XXX handle this */
3877 return;
3878 }
3879 for (i = 0; i < 16; i++) {
3880 d = QMEM_RD32(qlt, inot);
3881 inot += 4;
3882 QMEM_WR32(qlt, req, d);
3883 req += 4;
3884 }
3885 req -= 64;
3886 req[0] = 0x0e;
3887 qlt_submit_req_entries(qlt, 1);
3888 mutex_exit(&qlt->req_lock);
3889 }
3890
3891 uint8_t qlt_task_flags[] = { 1, 3, 2, 1, 4, 0, 1, 1 };
3892 static void
3893 qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio)
3894 {
3895 fct_cmd_t *cmd;
3896 scsi_task_t *task;
3897 qlt_cmd_t *qcmd;
3898 uint32_t rportid, fw_xchg_addr;
3899 uint8_t *p, *q, *req, tm;
3900 uint16_t cdb_size, flags, oxid;
3901 char info[QLT_INFO_LEN];
3902
3903 /*
3904 * If either bidirection xfer is requested of there is extended
3905 * CDB, atio[0x20 + 11] will be greater than or equal to 3.
3906 */
3907 cdb_size = 16;
3908 if (atio[0x20 + 11] >= 3) {
3909 uint8_t b = atio[0x20 + 11];
3910 uint16_t b1;
3911 if ((b & 3) == 3) {
3912 EL(qlt, "bidirectional I/O not supported\n");
3913 cmn_err(CE_WARN, "qlt(%d) CMD with bidirectional I/O "
3914 "received, dropping the cmd as bidirectional "
3915 " transfers are not yet supported", qlt->instance);
3916 /* XXX abort the I/O */
3917 return;
3918 }
3919 cdb_size = (uint16_t)(cdb_size + (b & 0xfc));
3920 /*
3921 * Verify that we have enough entries. Without additional CDB
3922 * Everything will fit nicely within the same 64 bytes. So the
3935 return;
3936 }
3937 }
3938
3939 rportid = (((uint32_t)atio[8 + 5]) << 16) |
3940 (((uint32_t)atio[8 + 6]) << 8) | atio[8+7];
3941 fw_xchg_addr = QMEM_RD32(qlt, atio+4);
3942 oxid = (uint16_t)((((uint16_t)atio[8 + 16]) << 8) | atio[8+17]);
3943
3944 if (fw_xchg_addr == 0xFFFFFFFF) {
3945 EL(qlt, "fw_xchg_addr==0xFFFFFFFF\n");
3946 cmd = NULL;
3947 } else {
3948 cmd = fct_scsi_task_alloc(qlt->qlt_port, FCT_HANDLE_NONE,
3949 rportid, atio+0x20, cdb_size, STMF_TASK_EXT_NONE);
3950 if (cmd == NULL) {
3951 EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
3952 }
3953 }
3954 if (cmd == NULL) {
3955 EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
3956 /* Abort this IO */
3957 flags = (uint16_t)(BIT_14 | ((atio[3] & 0xF0) << 5));
3958
3959 mutex_enter(&qlt->req_lock);
3960 req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3961 if (req == NULL) {
3962 mutex_exit(&qlt->req_lock);
3963
3964 (void) snprintf(info, sizeof (info),
3965 "qlt_handle_atio: qlt-%p, can't "
3966 "allocate space for scsi_task", (void *)qlt);
3967 (void) fct_port_shutdown(qlt->qlt_port,
3968 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3969 return;
3970 }
3971 bzero(req, IOCB_SIZE);
3972 req[0] = 0x12; req[1] = 0x1;
3973 QMEM_WR32(qlt, req+4, 0);
3974 QMEM_WR16(qlt, req+8, fct_get_rp_handle(qlt->qlt_port,
3975 rportid));
3976 QMEM_WR16(qlt, req+10, 60);
3977 QMEM_WR32(qlt, req+0x10, rportid);
3978 QMEM_WR32(qlt, req+0x14, fw_xchg_addr);
3979 QMEM_WR16(qlt, req+0x1A, flags);
3980 QMEM_WR16(qlt, req+0x20, oxid);
3981 qlt_submit_req_entries(qlt, 1);
3982 mutex_exit(&qlt->req_lock);
3983
3984 return;
3985 }
3986
3987 task = (scsi_task_t *)cmd->cmd_specific;
3988 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3989 qcmd->fw_xchg_addr = fw_xchg_addr;
3990 qcmd->param.atio_byte3 = atio[3];
3991 cmd->cmd_oxid = oxid;
3992 cmd->cmd_rxid = (uint16_t)((((uint16_t)atio[8 + 18]) << 8) |
3993 atio[8+19]);
3994 cmd->cmd_rportid = rportid;
3995 cmd->cmd_lportid = (((uint32_t)atio[8 + 1]) << 16) |
3996 (((uint32_t)atio[8 + 2]) << 8) | atio[8 + 3];
3997 cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3998 /* Dont do a 64 byte read as this is IOMMU */
3999 q = atio+0x28;
4000 /* XXX Handle fcp_cntl */
4001 task->task_cmd_seq_no = (uint32_t)(*q++);
4002 task->task_csn_size = 8;
4003 task->task_flags = qlt_task_flags[(*q++) & 7];
4004 tm = *q++;
4005 if (tm) {
4006 if (tm & BIT_1)
4007 task->task_mgmt_function = TM_ABORT_TASK_SET;
4008 else if (tm & BIT_2)
4009 task->task_mgmt_function = TM_CLEAR_TASK_SET;
4010 else if (tm & BIT_4)
4037 q = (uint8_t *)qlt->queue_mem_ptr +
4038 ATIO_QUEUE_OFFSET;
4039 }
4040 }
4041 for (i = 0; i < 4; i++) {
4042 cb[i] = *q++;
4043 if (q == ((uint8_t *)qlt->queue_mem_ptr +
4044 ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
4045 q = (uint8_t *)qlt->queue_mem_ptr +
4046 ATIO_QUEUE_OFFSET;
4047 }
4048 }
4049 task->task_expected_xfer_length = (((uint32_t)cb[0]) << 24) |
4050 (((uint32_t)cb[1]) << 16) |
4051 (((uint32_t)cb[2]) << 8) | cb[3];
4052 } else {
4053 task->task_expected_xfer_length = (((uint32_t)q[0]) << 24) |
4054 (((uint32_t)q[1]) << 16) |
4055 (((uint32_t)q[2]) << 8) | q[3];
4056 }
4057 fct_post_rcvd_cmd(cmd, 0);
4058 }
4059
4060 static void
4061 qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp)
4062 {
4063 uint16_t status;
4064 uint32_t portid;
4065 uint32_t subcode1, subcode2;
4066
4067 status = QMEM_RD16(qlt, rsp+8);
4068 portid = QMEM_RD32(qlt, rsp+0x10) & 0xffffff;
4069 subcode1 = QMEM_RD32(qlt, rsp+0x14);
4070 subcode2 = QMEM_RD32(qlt, rsp+0x18);
4071
4072 mutex_enter(&qlt->preq_lock);
4073 if (portid != qlt->rp_id_in_dereg) {
4074 int instance = ddi_get_instance(qlt->dip);
4075
4076 EL(qlt, "implicit logout reveived portid = %xh\n", portid);
4077 cmn_err(CE_WARN, "qlt(%d): implicit logout completion for 0x%x"
4078 " received when driver wasn't waiting for it",
4079 instance, portid);
4080 mutex_exit(&qlt->preq_lock);
4081 return;
4082 }
4083
4084 if (status != 0) {
4085 EL(qlt, "implicit logout completed for %xh with status %xh, "
4086 "subcode1 %xh subcode2 %xh\n", portid, status, subcode1,
4087 subcode2);
4088 if (status == 0x31 && subcode1 == 0x0a) {
4089 qlt->rp_dereg_status = FCT_SUCCESS;
4090 } else {
4091 EL(qlt, "implicit logout portid=%xh, status=%xh, "
4092 "subcode1=%xh, subcode2=%xh\n", portid, status,
4093 subcode1, subcode2);
4094 qlt->rp_dereg_status =
4095 QLT_FIRMWARE_ERROR(status, subcode1, subcode2);
4096 }
4097 } else {
4098 qlt->rp_dereg_status = FCT_SUCCESS;
4099 }
4100 cv_signal(&qlt->rp_dereg_cv);
4101 mutex_exit(&qlt->preq_lock);
4102 }
4103
4104 /*
4105 * Note that when an ELS is aborted, the regular or aborted completion
4106 * (if any) gets posted before the abort IOCB comes back on response queue.
4107 */
4108 static void
4109 qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
4110 {
4111 char info[QLT_INFO_LEN];
4112 fct_cmd_t *cmd;
4113 qlt_cmd_t *qcmd;
4114 uint32_t hndl;
4115 uint32_t subcode1, subcode2;
4116 uint16_t status;
4117
4118 hndl = QMEM_RD32(qlt, rsp+4);
4119 status = QMEM_RD16(qlt, rsp+8);
4120 subcode1 = QMEM_RD32(qlt, rsp+0x24);
4121 subcode2 = QMEM_RD32(qlt, rsp+0x28);
4122
4123 if (!CMD_HANDLE_VALID(hndl)) {
4124 EL(qlt, "handle = %xh\n", hndl);
4125 /*
4126 * This cannot happen for unsol els completion. This can
4127 * only happen when abort for an unsol els completes.
4128 * This condition indicates a firmware bug.
4129 */
4130 (void) snprintf(info, sizeof (info),
4131 "qlt_handle_unsol_els_completion: "
4132 "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4133 hndl, status, subcode1, subcode2, (void *)rsp);
4134 (void) fct_port_shutdown(qlt->qlt_port,
4135 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4136 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4137 return;
4138 }
4139
4140 if (status == 5) {
4141 /*
4142 * When an unsolicited els is aborted, the abort is done
4143 * by a ELSPT iocb with abort control. This is the aborted IOCB
4144 * and not the abortee. We will do the cleanup when the
4145 * IOCB which caused the abort, returns.
4146 */
4147 EL(qlt, "status = %xh\n", status);
4148 stmf_trace(0, "--UNSOL ELS returned with status 5 --");
4149 return;
4150 }
4151
4152 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4153 if (cmd == NULL) {
4154 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4155 /*
4156 * Now why would this happen ???
4157 */
4158 (void) snprintf(info, sizeof (info),
4159 "qlt_handle_unsol_els_completion: can not "
4160 "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4161 (void *)rsp);
4162 (void) fct_port_shutdown(qlt->qlt_port,
4163 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4164
4165 return;
4166 }
4167
4168 ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
4169 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4170 if (qcmd->flags & QLT_CMD_ABORTING) {
4171 /*
4172 * This is the same case as "if (status == 5)" above. The
4173 * only difference is that in this case the firmware actually
4174 * finished sending the response. So the abort attempt will
4175 * come back with status ?. We will handle it there.
4176 */
4177 stmf_trace(0, "--UNSOL ELS finished while we are trying to "
4178 "abort it");
4179 return;
4180 }
4181
4182 if (qcmd->dbuf != NULL) {
4183 qlt_dmem_free(NULL, qcmd->dbuf);
4184 qcmd->dbuf = NULL;
4185 }
4186
4187 if (status == 0) {
4188 fct_send_response_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4189 } else {
4190 fct_send_response_done(cmd,
4191 QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4192 }
4193 }
4194
4195 static void
4196 qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
4197 {
4198 char info[QLT_INFO_LEN];
4199 fct_cmd_t *cmd;
4200 qlt_cmd_t *qcmd;
4201 uint32_t hndl;
4202 uint32_t subcode1, subcode2;
4203 uint16_t status;
4204
4205 hndl = QMEM_RD32(qlt, rsp+4);
4206 status = QMEM_RD16(qlt, rsp+8);
4207 subcode1 = QMEM_RD32(qlt, rsp+0x24);
4208 subcode2 = QMEM_RD32(qlt, rsp+0x28);
4209
4210 if (!CMD_HANDLE_VALID(hndl)) {
4211 EL(qlt, "handle = %xh\n", hndl);
4212 ASSERT(hndl == 0);
4213 /*
4214 * Someone has requested to abort it, but no one is waiting for
4215 * this completion.
4216 */
4217 if ((status != 0) && (status != 8)) {
4218 EL(qlt, "status = %xh\n", status);
4219 /*
4220 * There could be exchange resource leakage, so
4221 * throw HBA fatal error event now
4222 */
4223 (void) snprintf(info, sizeof (info),
4224 "qlt_handle_unsol_els_abort_completion: "
4225 "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4226 hndl, status, subcode1, subcode2, (void *)rsp);
4227 (void) fct_port_shutdown(qlt->qlt_port,
4228 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4229 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4230 return;
4231 }
4232
4233 return;
4234 }
4235
4236 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4237 if (cmd == NULL) {
4238 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4239 /*
4240 * Why would this happen ??
4241 */
4242 (void) snprintf(info, sizeof (info),
4243 "qlt_handle_unsol_els_abort_completion: can not get "
4244 "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4245 (void *)rsp);
4246 (void) fct_port_shutdown(qlt->qlt_port,
4247 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4248
4249 return;
4250 }
4251
4252 ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
4253 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4254 ASSERT(qcmd->flags & QLT_CMD_ABORTING);
4255
4256 if (qcmd->dbuf != NULL) {
4257 qlt_dmem_free(NULL, qcmd->dbuf);
4258 qcmd->dbuf = NULL;
4259 }
4260
4261 if (status == 0) {
4262 fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
4263 } else if (status == 8) {
4264 fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
4265 } else {
4266 fct_cmd_fca_aborted(cmd,
4267 QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4268 }
4269 }
4270
4271 static void
4272 qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
4273 {
4274 char info[QLT_INFO_LEN];
4275 fct_cmd_t *cmd;
4276 fct_els_t *els;
4277 qlt_cmd_t *qcmd;
4278 uint32_t hndl;
4279 uint32_t subcode1, subcode2;
4280 uint16_t status;
4281
4282 hndl = QMEM_RD32(qlt, rsp+4);
4283 status = QMEM_RD16(qlt, rsp+8);
4284 subcode1 = QMEM_RD32(qlt, rsp+0x24);
4285 subcode2 = QMEM_RD32(qlt, rsp+0x28);
4286
4287 if (!CMD_HANDLE_VALID(hndl)) {
4288 EL(qlt, "handle = %xh\n", hndl);
4289 /*
4290 * This cannot happen for sol els completion.
4291 */
4292 (void) snprintf(info, sizeof (info),
4293 "qlt_handle_sol_els_completion: "
4294 "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4295 hndl, status, subcode1, subcode2, (void *)rsp);
4296 (void) fct_port_shutdown(qlt->qlt_port,
4297 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4298 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4299 return;
4300 }
4301
4302 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4303 if (cmd == NULL) {
4304 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4305 (void) snprintf(info, sizeof (info),
4306 "qlt_handle_sol_els_completion: can not "
4307 "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4308 (void *)rsp);
4309 (void) fct_port_shutdown(qlt->qlt_port,
4310 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4311
4312 return;
4313 }
4314
4315 ASSERT(cmd->cmd_type == FCT_CMD_SOL_ELS);
4316 els = (fct_els_t *)cmd->cmd_specific;
4317 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4318 qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&rsp[0x10]));
4319
4320 if (qcmd->flags & QLT_CMD_ABORTING) {
4321 /*
4322 * We will handle it when the ABORT IO IOCB returns.
4323 */
4324 return;
4325 }
4326
4327 if (qcmd->dbuf != NULL) {
4328 if (status == 0) {
4334 qlt_dmem_free(NULL, qcmd->dbuf);
4335 qcmd->dbuf = NULL;
4336 }
4337
4338 if (status == 0) {
4339 fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4340 } else {
4341 fct_send_cmd_done(cmd,
4342 QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4343 }
4344 }
4345
4346 static void
4347 qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp)
4348 {
4349 fct_cmd_t *cmd;
4350 fct_sol_ct_t *ct;
4351 qlt_cmd_t *qcmd;
4352 uint32_t hndl;
4353 uint16_t status;
4354 char info[QLT_INFO_LEN];
4355
4356 hndl = QMEM_RD32(qlt, rsp+4);
4357 status = QMEM_RD16(qlt, rsp+8);
4358
4359 if (!CMD_HANDLE_VALID(hndl)) {
4360 EL(qlt, "handle = %xh\n", hndl);
4361 /*
4362 * Solicited commands will always have a valid handle.
4363 */
4364 (void) snprintf(info, sizeof (info),
4365 "qlt_handle_ct_completion: "
4366 "hndl-%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
4367 (void) fct_port_shutdown(qlt->qlt_port,
4368 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4369 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4370 return;
4371 }
4372
4373 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4374 if (cmd == NULL) {
4375 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4376 (void) snprintf(info, sizeof (info),
4377 "qlt_handle_ct_completion: cannot find "
4378 "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4379 (void *)rsp);
4380 (void) fct_port_shutdown(qlt->qlt_port,
4381 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4382
4383 return;
4384 }
4385
4386 ct = (fct_sol_ct_t *)cmd->cmd_specific;
4387 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4388 ASSERT(cmd->cmd_type == FCT_CMD_SOL_CT);
4389
4390 if (qcmd->flags & QLT_CMD_ABORTING) {
4391 /*
4392 * We will handle it when ABORT IO IOCB returns;
4393 */
4394 return;
4395 }
4396
4397 ASSERT(qcmd->dbuf);
4398 if (status == 0) {
4399 qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
4400 bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
4401 qcmd->param.resp_offset,
4402 ct->ct_resp_payload, ct->ct_resp_size);
4403 }
4404 qlt_dmem_free(NULL, qcmd->dbuf);
4405 qcmd->dbuf = NULL;
4406
4407 if (status == 0) {
4408 fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4409 } else {
4410 fct_send_cmd_done(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
4411 }
4412 }
4413
4414 static void
4415 qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp)
4416 {
4417 fct_cmd_t *cmd;
4418 scsi_task_t *task;
4419 qlt_cmd_t *qcmd;
4420 stmf_data_buf_t *dbuf;
4421 fct_status_t fc_st;
4422 uint32_t iof = 0;
4423 uint32_t hndl;
4424 uint16_t status;
4425 uint16_t flags;
4426 uint8_t abort_req;
4427 uint8_t n;
4428 char info[QLT_INFO_LEN];
4429
4430 /* XXX: Check validity of the IOCB by checking 4th byte. */
4431 hndl = QMEM_RD32(qlt, rsp+4);
4432 status = QMEM_RD16(qlt, rsp+8);
4433 flags = QMEM_RD16(qlt, rsp+0x1a);
4434 n = rsp[2];
4435
4436 if (!CMD_HANDLE_VALID(hndl)) {
4437 EL(qlt, "handle = %xh\n", hndl);
4438 ASSERT(hndl == 0);
4439 /*
4440 * Someone has requested to abort it, but no one is waiting for
4441 * this completion.
4442 */
4443 EL(qlt, "hndl-%xh, status-%xh, rsp-%p\n", hndl, status,
4444 (void *)rsp);
4445 if ((status != 1) && (status != 2)) {
4446 EL(qlt, "status = %xh\n", status);
4447 /*
4448 * There could be exchange resource leakage, so
4449 * throw HBA fatal error event now
4450 */
4451 (void) snprintf(info, sizeof (info),
4452 "qlt_handle_ctio_completion: hndl-"
4453 "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
4454 (void) fct_port_shutdown(qlt->qlt_port,
4455 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4456
4457 }
4458
4459 return;
4460 }
4461
4462 if (flags & BIT_14) {
4463 abort_req = 1;
4464 EL(qlt, "abort: hndl-%x, status-%x, rsp-%p\n", hndl, status,
4465 (void *)rsp);
4466 } else {
4467 abort_req = 0;
4468 }
4469
4470 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4471 if (cmd == NULL) {
4472 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4473 (void) snprintf(info, sizeof (info),
4474 "qlt_handle_ctio_completion: cannot find "
4475 "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4476 (void *)rsp);
4477 (void) fct_port_shutdown(qlt->qlt_port,
4478 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4479
4480 return;
4481 }
4482
4483 task = (scsi_task_t *)cmd->cmd_specific;
4484 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4485 if (qcmd->dbuf_rsp_iu) {
4486 ASSERT((flags & (BIT_6 | BIT_7)) == BIT_7);
4487 qlt_dmem_free(NULL, qcmd->dbuf_rsp_iu);
4488 qcmd->dbuf_rsp_iu = NULL;
4489 }
4490
4491 if ((status == 1) || (status == 2)) {
4492 if (abort_req) {
4493 fc_st = FCT_ABORT_SUCCESS;
4494 iof = FCT_IOF_FCA_DONE;
4495 } else {
4496 fc_st = FCT_SUCCESS;
4497 if (flags & BIT_15) {
4498 iof = FCT_IOF_FCA_DONE;
4520 if (dbuf->db_flags & DB_DIRECTION_FROM_RPORT)
4521 qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORCPU);
4522 if (flags & BIT_15) {
4523 dbuf->db_flags = (uint16_t)(dbuf->db_flags |
4524 DB_STATUS_GOOD_SENT);
4525 }
4526
4527 dbuf->db_xfer_status = fc_st;
4528 fct_scsi_data_xfer_done(cmd, dbuf, iof);
4529 return;
4530 }
4531 if (!abort_req) {
4532 /*
4533 * This was just a pure status xfer.
4534 */
4535 fct_send_response_done(cmd, fc_st, iof);
4536 return;
4537 }
4538
4539 fct_cmd_fca_aborted(cmd, fc_st, iof);
4540 }
4541
4542 static void
4543 qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
4544 {
4545 char info[QLT_INFO_LEN];
4546 fct_cmd_t *cmd;
4547 qlt_cmd_t *qcmd;
4548 uint32_t h;
4549 uint16_t status;
4550
4551 h = QMEM_RD32(qlt, rsp+4);
4552 status = QMEM_RD16(qlt, rsp+8);
4553
4554 if (!CMD_HANDLE_VALID(h)) {
4555 EL(qlt, "handle = %xh\n", h);
4556 /*
4557 * Solicited commands always have a valid handle.
4558 */
4559 (void) snprintf(info, sizeof (info),
4560 "qlt_handle_sol_abort_completion: hndl-"
4561 "%x, status-%x, rsp-%p", h, status, (void *)rsp);
4562 (void) fct_port_shutdown(qlt->qlt_port,
4563 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4564 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4565 return;
4566 }
4567 cmd = fct_handle_to_cmd(qlt->qlt_port, h);
4568 if (cmd == NULL) {
4569 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", h);
4570 /*
4571 * What happened to the cmd ??
4572 */
4573 (void) snprintf(info, sizeof (info),
4574 "qlt_handle_sol_abort_completion: cannot "
4575 "find cmd, hndl-%x, status-%x, rsp-%p", h, status,
4576 (void *)rsp);
4577 (void) fct_port_shutdown(qlt->qlt_port,
4578 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4579
4580 return;
4581 }
4582
4583 ASSERT((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4584 (cmd->cmd_type == FCT_CMD_SOL_CT));
4585 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4586 if (qcmd->dbuf != NULL) {
4587 qlt_dmem_free(NULL, qcmd->dbuf);
4588 qcmd->dbuf = NULL;
4589 }
4590 ASSERT(qcmd->flags & QLT_CMD_ABORTING);
4591 if (status == 0) {
4592 fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
4593 } else if (status == 0x31) {
4594 fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
4595 } else {
4596 fct_cmd_fca_aborted(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
4597 }
4598 }
4599
4600 static void
4601 qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp)
4602 {
4603 qlt_abts_cmd_t *qcmd;
4604 fct_cmd_t *cmd;
4605 uint32_t remote_portid;
4606 char info[QLT_INFO_LEN];
4607
4608 remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
4609 ((uint32_t)(resp[0x1A])) << 16;
4610 cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ABTS,
4611 sizeof (qlt_abts_cmd_t), 0);
4612 if (cmd == NULL) {
4613 EL(qlt, "fct_alloc cmd==NULL\n");
4614 (void) snprintf(info, sizeof (info),
4615 "qlt_handle_rcvd_abts: qlt-%p, can't "
4616 "allocate space for fct_cmd", (void *)qlt);
4617 (void) fct_port_shutdown(qlt->qlt_port,
4618 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4619 return;
4620 }
4621
4622 resp[0xC] = resp[0xD] = resp[0xE] = 0;
4623 qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
4624 bcopy(resp, qcmd->buf, IOCB_SIZE);
4625 cmd->cmd_port = qlt->qlt_port;
4626 cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xA);
4627 if (cmd->cmd_rp_handle == 0xFFFF)
4628 cmd->cmd_rp_handle = FCT_HANDLE_NONE;
4629
4630 cmd->cmd_rportid = remote_portid;
4631 cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
4632 ((uint32_t)(resp[0x16])) << 16;
4633 cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
4634 cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
4635 fct_post_rcvd_cmd(cmd, 0);
4636 }
4637
4638 static void
4639 qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp)
4640 {
4641 uint16_t status;
4642 char info[QLT_INFO_LEN];
4643
4644 status = QMEM_RD16(qlt, resp+8);
4645
4646 if ((status == 0) || (status == 5)) {
4647 return;
4648 }
4649 EL(qlt, "status = %xh\n", status);
4650 (void) snprintf(info, sizeof (info),
4651 "ABTS completion failed %x/%x/%x resp_off %x",
4652 status, QMEM_RD32(qlt, resp+0x34), QMEM_RD32(qlt, resp+0x38),
4653 ((uint32_t)(qlt->resp_ndx_to_fw)) << 6);
4654 (void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
4655 STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4656 }
4657
4658 #ifdef DEBUG
4659 uint32_t qlt_drop_abort_counter = 0;
4660 #endif
4661
4662 fct_status_t
4663 qlt_abort_cmd(struct fct_local_port *port, fct_cmd_t *cmd, uint32_t flags)
4664 {
4665 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
4666
4667 if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
4668 (qlt->qlt_state == FCT_STATE_OFFLINING)) {
4669 return (FCT_NOT_FOUND);
4670 }
4671
4672 #ifdef DEBUG
4673 if (qlt_drop_abort_counter > 0) {
4674 if (atomic_dec_32_nv(&qlt_drop_abort_counter) == 1)
4675 return (FCT_SUCCESS);
4676 }
4677 #endif
4678
4679 if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
4680 return (qlt_abort_unsol_scsi_cmd(qlt, cmd));
4681 }
4682
4683 if (flags & FCT_IOF_FORCE_FCA_DONE) {
4684 cmd->cmd_handle = 0;
4685 }
4686
4687 if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
4688 return (qlt_send_abts_response(qlt, cmd, 1));
4689 }
4690
4691 if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
4692 return (qlt_abort_purex(qlt, cmd));
4693 }
4694
4695 if ((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4696 (cmd->cmd_type == FCT_CMD_SOL_CT)) {
4697 return (qlt_abort_sol_cmd(qlt, cmd));
4698 }
4699 EL(qlt, "cmd->cmd_type = %xh\n", cmd->cmd_type);
4700
4701 ASSERT(0);
4702 return (FCT_FAILURE);
4703 }
4704
4705 fct_status_t
4706 qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4707 {
4708 uint8_t *req;
4709 qlt_cmd_t *qcmd;
4710
4711 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4712 qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4713 EL(qlt, "fctcmd-%p, cmd_handle-%xh\n", cmd, cmd->cmd_handle);
4714
4715 mutex_enter(&qlt->req_lock);
4716 req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4717 if (req == NULL) {
4718 mutex_exit(&qlt->req_lock);
4719
4720 return (FCT_BUSY);
4721 }
4722 bzero(req, IOCB_SIZE);
4723 req[0] = 0x33; req[1] = 1;
4724 QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4725 if (cmd->cmd_rp) {
4726 QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4727 } else {
4728 QMEM_WR16(qlt, req+8, 0xFFFF);
4729 }
4730
4731 QMEM_WR32(qlt, req+0xc, cmd->cmd_handle);
4732 QMEM_WR32(qlt, req+0x30, cmd->cmd_rportid);
4733 qlt_submit_req_entries(qlt, 1);
4734 mutex_exit(&qlt->req_lock);
4735
4736 return (FCT_SUCCESS);
4737 }
4738
4739 fct_status_t
4740 qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd)
4741 {
4742 uint8_t *req;
4743 qlt_cmd_t *qcmd;
4744 fct_els_t *els;
4745 uint8_t elsop, req1f;
4746
4747 els = (fct_els_t *)cmd->cmd_specific;
4748 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4749 elsop = els->els_req_payload[0];
4750 EL(qlt, "fctcmd-%p, cmd_handle-%xh, elsop-%xh\n", cmd, cmd->cmd_handle,
4751 elsop);
4752 req1f = 0x60; /* Terminate xchg */
4753 if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
4754 (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
4755 req1f = (uint8_t)(req1f | BIT_4);
4756 }
4757
4758 mutex_enter(&qlt->req_lock);
4759 req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4760 if (req == NULL) {
4761 mutex_exit(&qlt->req_lock);
4762
4763 return (FCT_BUSY);
4764 }
4765
4766 qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4767 bzero(req, IOCB_SIZE);
4768 req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
4769 req[0x16] = elsop; req[0x1f] = req1f;
4770 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4771 if (cmd->cmd_rp) {
4772 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4773 EL(qlt, "rp_handle-%x\n", cmd->cmd_rp->rp_handle);
4774 } else {
4775 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
4776 EL(qlt, "cmd_rp_handle-%x\n", cmd->cmd_rp_handle);
4777 }
4778
4779 QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
4780 QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
4781 qlt_submit_req_entries(qlt, 1);
4782 mutex_exit(&qlt->req_lock);
4783
4784 return (FCT_SUCCESS);
4785 }
4786
4787 fct_status_t
4788 qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4789 {
4790 qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4791 uint8_t *req;
4792 uint16_t flags;
4793
4794 flags = (uint16_t)(BIT_14 |
4795 (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
4796 EL(qlt, "fctcmd-%p, cmd_handle-%x\n", cmd, cmd->cmd_handle);
4797
4798 mutex_enter(&qlt->req_lock);
4799 req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4800 if (req == NULL) {
4801 mutex_exit(&qlt->req_lock);
4802
4803 return (FCT_BUSY);
4804 }
4805
4806 qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4807 bzero(req, IOCB_SIZE);
4808 req[0] = 0x12; req[1] = 0x1;
4809 QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4810 QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4811 QMEM_WR16(qlt, req+10, 60); /* 60 seconds timeout */
4812 QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
4813 QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
4814 QMEM_WR16(qlt, req+0x1A, flags);
4815 QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
4816 qlt_submit_req_entries(qlt, 1);
4817 mutex_exit(&qlt->req_lock);
4818
4819 return (FCT_SUCCESS);
4820 }
4821
4822 fct_status_t
4823 qlt_send_cmd(fct_cmd_t *cmd)
4824 {
4825 qlt_state_t *qlt;
4826
4827 qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
4828 if (cmd->cmd_type == FCT_CMD_SOL_ELS) {
4829 return (qlt_send_els(qlt, cmd));
4830 } else if (cmd->cmd_type == FCT_CMD_SOL_CT) {
4831 return (qlt_send_ct(qlt, cmd));
4832 }
4833 EL(qlt, "cmd->cmd_type = %xh\n", cmd->cmd_type);
4834
4835 ASSERT(0);
4836 return (FCT_FAILURE);
4837 }
4838
4839 fct_status_t
4840 qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd)
4841 {
4842 uint8_t *req;
4843 fct_els_t *els;
4844 qlt_cmd_t *qcmd;
4845 stmf_data_buf_t *buf;
4846 qlt_dmem_bctl_t *bctl;
4847 uint32_t sz, minsz;
4848
4849 els = (fct_els_t *)cmd->cmd_specific;
4850 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4851 qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4852 qcmd->param.resp_offset = (uint16_t)((els->els_req_size + 7) & ~7);
4853 sz = minsz = qcmd->param.resp_offset + els->els_resp_size;
4854 buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4855 if (buf == NULL) {
4856 return (FCT_BUSY);
4857 }
4858 bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4859
4860 qcmd->dbuf = buf;
4861 bcopy(els->els_req_payload, buf->db_sglist[0].seg_addr,
4862 els->els_req_size);
4863 qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4864
4865 mutex_enter(&qlt->req_lock);
4866 req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4867 if (req == NULL) {
4868 qlt_dmem_free(NULL, buf);
4869 mutex_exit(&qlt->req_lock);
4870 return (FCT_BUSY);
4871 }
4872 bzero(req, IOCB_SIZE);
4873 req[0] = 0x53; req[1] = 1;
4874 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4875 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4876 QMEM_WR16(qlt, (&req[0xC]), 1);
4877 QMEM_WR16(qlt, (&req[0xE]), 0x1000);
4878 QMEM_WR16(qlt, (&req[0x14]), 1);
4879 req[0x16] = els->els_req_payload[0];
4880 if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
4881 req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
4882 req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
4883 req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
4884 }
4885 QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rp->rp_id);
4886 QMEM_WR32(qlt, (&req[0x20]), els->els_resp_size);
4887 QMEM_WR32(qlt, (&req[0x24]), els->els_req_size);
4888 QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
4889 QMEM_WR32(qlt, (&req[0x30]), els->els_req_size);
4890 QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
4891 qcmd->param.resp_offset));
4892 QMEM_WR32(qlt, (&req[0x3C]), els->els_resp_size);
4893 qlt_submit_req_entries(qlt, 1);
4894 mutex_exit(&qlt->req_lock);
4895
4896 return (FCT_SUCCESS);
4897 }
4898
4899 fct_status_t
4900 qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd)
4901 {
4902 uint8_t *req;
4903 fct_sol_ct_t *ct;
4904 qlt_cmd_t *qcmd;
4905 stmf_data_buf_t *buf;
4906 qlt_dmem_bctl_t *bctl;
4907 uint32_t sz, minsz;
4908
4909 ct = (fct_sol_ct_t *)cmd->cmd_specific;
4910 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4911 qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4912 qcmd->param.resp_offset = (uint16_t)((ct->ct_req_size + 7) & ~7);
4913 sz = minsz = qcmd->param.resp_offset + ct->ct_resp_size;
4914 buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4915 if (buf == NULL) {
4916 return (FCT_BUSY);
4917 }
4918 bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4919
4920 qcmd->dbuf = buf;
4921 bcopy(ct->ct_req_payload, buf->db_sglist[0].seg_addr,
4922 ct->ct_req_size);
4923 qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4924
4925 mutex_enter(&qlt->req_lock);
4926 req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4927 if (req == NULL) {
4928 qlt_dmem_free(NULL, buf);
4929 mutex_exit(&qlt->req_lock);
4930 return (FCT_BUSY);
4931 }
4932 bzero(req, IOCB_SIZE);
4933 req[0] = 0x29; req[1] = 1;
4934 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4935 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4936 QMEM_WR16(qlt, (&req[0xC]), 1);
4937 QMEM_WR16(qlt, (&req[0x10]), 0x20); /* > (2 * RA_TOV) */
4938 QMEM_WR16(qlt, (&req[0x14]), 1);
4939
4940 QMEM_WR32(qlt, (&req[0x20]), ct->ct_resp_size);
4941 QMEM_WR32(qlt, (&req[0x24]), ct->ct_req_size);
4942
4943 QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr); /* COMMAND DSD */
4944 QMEM_WR32(qlt, (&req[0x30]), ct->ct_req_size);
4945 QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
4946 qcmd->param.resp_offset)); /* RESPONSE DSD */
4947 QMEM_WR32(qlt, (&req[0x3C]), ct->ct_resp_size);
4948
4949 qlt_submit_req_entries(qlt, 1);
4950 mutex_exit(&qlt->req_lock);
4951
4952 return (FCT_SUCCESS);
4953 }
4954
4955
4956 /*
4957 * All QLT_FIRMWARE_* will mainly be handled in this function
4958 * It can not be called in interrupt context
4959 *
4960 * FWDUMP's purpose is to serve ioctl, so we will use qlt_ioctl_flags
4961 * and qlt_ioctl_lock
4962 */
4963 static fct_status_t
4964 qlt_firmware_dump(fct_local_port_t *port, stmf_state_change_info_t *ssci)
4965 {
4966 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
4967 int i;
4968 int retries, n;
4969 uint_t size_left;
4970 char c = ' ';
4971 uint32_t addr, endaddr, words_to_read;
4972 caddr_t buf;
4973 fct_status_t ret;
4974
4975 mutex_enter(&qlt->qlt_ioctl_lock);
4976 /*
4977 * To make sure that there's no outstanding dumping task
4978 */
4979 if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
4980 mutex_exit(&qlt->qlt_ioctl_lock);
4981 EL(qlt, "qlt_ioctl_flags=%xh, inprogress\n",
4982 qlt->qlt_ioctl_flags);
4983 EL(qlt, "outstanding\n");
4984 return (FCT_FAILURE);
4985 }
4986
4987 /*
4988 * To make sure not to overwrite existing dump
4989 */
4990 if ((qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID) &&
4991 !(qlt->qlt_ioctl_flags & QLT_FWDUMP_TRIGGERED_BY_USER) &&
4992 !(qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER)) {
4993 /*
4994 * If we have alreay one dump, but it's not triggered by user
4995 * and the user hasn't fetched it, we shouldn't dump again.
4996 */
4997 mutex_exit(&qlt->qlt_ioctl_lock);
4998 EL(qlt, "qlt_ioctl_flags=%xh, already done\n",
4999 qlt->qlt_ioctl_flags);
5000 cmn_err(CE_NOTE, "qlt(%d): Skipping firmware dump as there "
5001 "is one already outstanding.", qlt->instance);
5002 return (FCT_FAILURE);
5003 }
5004 qlt->qlt_ioctl_flags |= QLT_FWDUMP_INPROGRESS;
5005 if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) {
5006 qlt->qlt_ioctl_flags |= QLT_FWDUMP_TRIGGERED_BY_USER;
5007 } else {
5008 qlt->qlt_ioctl_flags &= ~QLT_FWDUMP_TRIGGERED_BY_USER;
5009 }
5010 mutex_exit(&qlt->qlt_ioctl_lock);
5011
5012 size_left = QLT_FWDUMP_BUFSIZE;
5013 if (!qlt->qlt_fwdump_buf) {
5014 ASSERT(!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID));
5015 /*
5016 * It's the only place that we allocate buf for dumping. After
5017 * it's allocated, we will use it until the port is detached.
5018 */
5019 qlt->qlt_fwdump_buf = kmem_zalloc(size_left, KM_SLEEP);
5020 }
5021
5022 /*
5023 * Start to dump firmware
5024 */
5025 buf = (caddr_t)qlt->qlt_fwdump_buf;
5026
5027 /*
5028 * Print the ISP firmware revision number and attributes information
5029 * Read the RISC to Host Status register
5030 */
5031 n = (int)snprintf(buf, size_left, "ISP FW Version %d.%02d.%02d "
5032 "Attributes %04x\n\nR2H Status Register\n%08x",
5033 qlt->fw_major, qlt->fw_minor,
5034 qlt->fw_subminor, qlt->fw_attr, REG_RD32(qlt, REG_RISC_STATUS));
5035 buf += n; size_left -= n;
5036
5037 /*
5038 * Before pausing the RISC, make sure no mailbox can execute
5039 */
5040 mutex_enter(&qlt->mbox_lock);
5041 if (qlt->mbox_io_state != MBOX_STATE_UNKNOWN) {
5042 /*
5043 * Wait to grab the mailboxes
5044 */
5045 for (retries = 0; (qlt->mbox_io_state != MBOX_STATE_READY) &&
5046 (qlt->mbox_io_state != MBOX_STATE_UNKNOWN); retries++) {
5047 (void) cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock,
5048 ddi_get_lbolt() + drv_usectohz(1000000));
5049 if (retries > 5) {
5050 mutex_exit(&qlt->mbox_lock);
5051 EL(qlt, "can't drain out mailbox commands\n");
5052 goto dump_fail;
5053 }
5054 }
5055 qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
5056 cv_broadcast(&qlt->mbox_cv);
5057 }
5058 mutex_exit(&qlt->mbox_lock);
5059
5060 /*
5061 * Pause the RISC processor
5062 */
5063 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
5064
5065 /*
5066 * Wait for the RISC processor to pause
5067 */
5068 for (i = 0; i < 200; i++) {
5069 if (REG_RD32(qlt, REG_RISC_STATUS) & 0x100) {
5070 break;
5071 }
5072 drv_usecwait(1000);
5073 }
5074 if (i == 200) {
5075 EL(qlt, "can't pause\n");
5076 return (FCT_FAILURE);
5077 }
5078
5079 if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip)) {
5080 goto over_25xx_specific_dump;
5081 }
5082 n = (int)snprintf(buf, size_left, "\n\nHostRisc registers\n");
5083 buf += n; size_left -= n;
5084 REG_WR32(qlt, 0x54, 0x7000);
5085 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5086 buf += n; size_left -= n;
5087 REG_WR32(qlt, 0x54, 0x7010);
5088 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5089 buf += n; size_left -= n;
5090 REG_WR32(qlt, 0x54, 0x7C00);
5091
5092 n = (int)snprintf(buf, size_left, "\nPCIe registers\n");
5093 buf += n; size_left -= n;
5094 REG_WR32(qlt, 0xC0, 0x1);
5095 n = qlt_fwdump_dump_regs(qlt, buf, 0xc4, 3, size_left);
5096 buf += n; size_left -= n;
5097 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 1, size_left);
5098 buf += n; size_left -= n;
5099 REG_WR32(qlt, 0xC0, 0x0);
5100
5101 over_25xx_specific_dump:;
5102 n = (int)snprintf(buf, size_left, "\n\nHost Interface Registers\n");
5103 buf += n; size_left -= n;
5104 /*
5105 * Capture data from 32 regsiters
5106 */
5107 n = qlt_fwdump_dump_regs(qlt, buf, 0, 32, size_left);
5108 buf += n; size_left -= n;
5109
5110 /*
5111 * Disable interrupts
5112 */
5113 REG_WR32(qlt, 0xc, 0);
5114
5115 /*
5116 * Shadow registers
5117 */
5118 n = (int)snprintf(buf, size_left, "\nShadow Registers\n");
5119 buf += n; size_left -= n;
5120
5121 REG_WR32(qlt, 0x54, 0xF70);
5122 addr = 0xb0000000;
5123 for (i = 0; i < 0xb; i++) {
5124 if ((!qlt->qlt_25xx_chip) &&
5125 (!qlt->qlt_81xx_chip) &&
5126 (i >= 7)) {
5127 break;
5128 }
5129 if (i && ((i & 7) == 0)) {
5130 n = (int)snprintf(buf, size_left, "\n");
5131 buf += n; size_left -= n;
5132 }
5133 REG_WR32(qlt, 0xF0, addr);
5134 n = (int)snprintf(buf, size_left, "%08x ", REG_RD32(qlt, 0xFC));
5135 buf += n; size_left -= n;
5136 addr += 0x100000;
5137 }
5138
5139 if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5140 REG_WR32(qlt, 0x54, 0x10);
5141 n = (int)snprintf(buf, size_left,
5142 "\n\nRISC IO Register\n%08x", REG_RD32(qlt, 0xC0));
5143 buf += n; size_left -= n;
5144 }
5145
5146 /*
5147 * Mailbox registers
5148 */
5149 n = (int)snprintf(buf, size_left, "\n\nMailbox Registers\n");
5150 buf += n; size_left -= n;
5151 for (i = 0; i < 32; i += 2) {
5152 if ((i + 2) & 15) {
5153 c = ' ';
5154 } else {
5155 c = '\n';
5156 }
5157 n = (int)snprintf(buf, size_left, "%04x %04x%c",
5158 REG_RD16(qlt, 0x80 + (i << 1)),
5159 REG_RD16(qlt, 0x80 + ((i+1) << 1)), c);
5160 buf += n; size_left -= n;
5161 }
5162
5163 /*
5164 * Transfer sequence registers
5165 */
5166 n = (int)snprintf(buf, size_left, "\nXSEQ GP Registers\n");
5167 buf += n; size_left -= n;
5168
5169 REG_WR32(qlt, 0x54, 0xBF00);
5170 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5171 buf += n; size_left -= n;
5172 REG_WR32(qlt, 0x54, 0xBF10);
5173 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5174 buf += n; size_left -= n;
5175 REG_WR32(qlt, 0x54, 0xBF20);
5176 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5177 buf += n; size_left -= n;
5178 REG_WR32(qlt, 0x54, 0xBF30);
5179 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5180 buf += n; size_left -= n;
5181 REG_WR32(qlt, 0x54, 0xBF40);
5182 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5183 buf += n; size_left -= n;
5184 REG_WR32(qlt, 0x54, 0xBF50);
5185 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5186 buf += n; size_left -= n;
5187 REG_WR32(qlt, 0x54, 0xBF60);
5188 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5189 buf += n; size_left -= n;
5190 REG_WR32(qlt, 0x54, 0xBF70);
5191 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5192 buf += n; size_left -= n;
5193 n = (int)snprintf(buf, size_left, "\nXSEQ-0 registers\n");
5194 buf += n; size_left -= n;
5195 if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5196 REG_WR32(qlt, 0x54, 0xBFC0);
5197 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5198 buf += n; size_left -= n;
5199 REG_WR32(qlt, 0x54, 0xBFD0);
5200 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5201 buf += n; size_left -= n;
5202 }
5203 REG_WR32(qlt, 0x54, 0xBFE0);
5204 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5205 buf += n; size_left -= n;
5206 n = (int)snprintf(buf, size_left, "\nXSEQ-1 registers\n");
5207 buf += n; size_left -= n;
5208 REG_WR32(qlt, 0x54, 0xBFF0);
5209 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5210 buf += n; size_left -= n;
5211
5212 /*
5213 * Receive sequence registers
5214 */
5215 n = (int)snprintf(buf, size_left, "\nRSEQ GP Registers\n");
5216 buf += n; size_left -= n;
5217 REG_WR32(qlt, 0x54, 0xFF00);
5218 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5219 buf += n; size_left -= n;
5220 REG_WR32(qlt, 0x54, 0xFF10);
5221 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5222 buf += n; size_left -= n;
5223 REG_WR32(qlt, 0x54, 0xFF20);
5224 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5225 buf += n; size_left -= n;
5226 REG_WR32(qlt, 0x54, 0xFF30);
5227 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5228 buf += n; size_left -= n;
5229 REG_WR32(qlt, 0x54, 0xFF40);
5230 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5231 buf += n; size_left -= n;
5232 REG_WR32(qlt, 0x54, 0xFF50);
5233 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5234 buf += n; size_left -= n;
5235 REG_WR32(qlt, 0x54, 0xFF60);
5236 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5237 buf += n; size_left -= n;
5238 REG_WR32(qlt, 0x54, 0xFF70);
5239 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5240 buf += n; size_left -= n;
5241 n = (int)snprintf(buf, size_left, "\nRSEQ-0 registers\n");
5242 buf += n; size_left -= n;
5243 if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5244 REG_WR32(qlt, 0x54, 0xFFC0);
5245 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5246 buf += n; size_left -= n;
5247 }
5248 REG_WR32(qlt, 0x54, 0xFFD0);
5249 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5250 buf += n; size_left -= n;
5251 n = (int)snprintf(buf, size_left, "\nRSEQ-1 registers\n");
5252 buf += n; size_left -= n;
5253 REG_WR32(qlt, 0x54, 0xFFE0);
5254 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5255 buf += n; size_left -= n;
5256 n = (int)snprintf(buf, size_left, "\nRSEQ-2 registers\n");
5257 buf += n; size_left -= n;
5258 REG_WR32(qlt, 0x54, 0xFFF0);
5259 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5260 buf += n; size_left -= n;
5261
5262 if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip))
5263 goto over_aseq_regs;
5264
5265 /*
5266 * Auxiliary sequencer registers
5267 */
5268 n = (int)snprintf(buf, size_left, "\nASEQ GP Registers\n");
5269 buf += n; size_left -= n;
5270 REG_WR32(qlt, 0x54, 0xB000);
5271 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5272 buf += n; size_left -= n;
5273 REG_WR32(qlt, 0x54, 0xB010);
5274 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5275 buf += n; size_left -= n;
5276 REG_WR32(qlt, 0x54, 0xB020);
5277 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5278 buf += n; size_left -= n;
5279 REG_WR32(qlt, 0x54, 0xB030);
5280 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5281 buf += n; size_left -= n;
5282 REG_WR32(qlt, 0x54, 0xB040);
5283 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5284 buf += n; size_left -= n;
5285 REG_WR32(qlt, 0x54, 0xB050);
5286 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5287 buf += n; size_left -= n;
5288 REG_WR32(qlt, 0x54, 0xB060);
5289 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5290 buf += n; size_left -= n;
5291 REG_WR32(qlt, 0x54, 0xB070);
5292 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5293 buf += n; size_left -= n;
5294 n = (int)snprintf(buf, size_left, "\nASEQ-0 registers\n");
5295 buf += n; size_left -= n;
5296 REG_WR32(qlt, 0x54, 0xB0C0);
5297 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5298 buf += n; size_left -= n;
5299 REG_WR32(qlt, 0x54, 0xB0D0);
5300 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5301 buf += n; size_left -= n;
5302 n = (int)snprintf(buf, size_left, "\nASEQ-1 registers\n");
5303 buf += n; size_left -= n;
5304 REG_WR32(qlt, 0x54, 0xB0E0);
5305 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5306 buf += n; size_left -= n;
5307 n = (int)snprintf(buf, size_left, "\nASEQ-2 registers\n");
5308 buf += n; size_left -= n;
5309 REG_WR32(qlt, 0x54, 0xB0F0);
5310 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5311 buf += n; size_left -= n;
5312
5313 over_aseq_regs:;
5314
5315 /*
5316 * Command DMA registers
5317 */
5318 n = (int)snprintf(buf, size_left, "\nCommand DMA registers\n");
5319 buf += n; size_left -= n;
5320 REG_WR32(qlt, 0x54, 0x7100);
5321 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5322 buf += n; size_left -= n;
5323
5324 /*
5325 * Queues
5326 */
5327 n = (int)snprintf(buf, size_left,
5328 "\nRequest0 Queue DMA Channel registers\n");
5329 buf += n; size_left -= n;
5330 REG_WR32(qlt, 0x54, 0x7200);
5331 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5332 buf += n; size_left -= n;
5333 n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5334 buf += n; size_left -= n;
5335
5336 n = (int)snprintf(buf, size_left,
5337 "\n\nResponse0 Queue DMA Channel registers\n");
5338 buf += n; size_left -= n;
5339 REG_WR32(qlt, 0x54, 0x7300);
5340 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5341 buf += n; size_left -= n;
5342 n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5462 buf += n; size_left -= n;
5463 REG_WR32(qlt, 0x54, 0x3010);
5464 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5465 buf += n; size_left -= n;
5466 REG_WR32(qlt, 0x54, 0x3020);
5467 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5468 buf += n; size_left -= n;
5469 REG_WR32(qlt, 0x54, 0x3030);
5470 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5471 buf += n; size_left -= n;
5472 REG_WR32(qlt, 0x54, 0x3040);
5473 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5474 buf += n; size_left -= n;
5475 REG_WR32(qlt, 0x54, 0x3050);
5476 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5477 buf += n; size_left -= n;
5478 REG_WR32(qlt, 0x54, 0x3060);
5479 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5480 buf += n; size_left -= n;
5481
5482 if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5483 REG_WR32(qlt, 0x54, 0x3070);
5484 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5485 buf += n; size_left -= n;
5486 }
5487
5488 /*
5489 * Fibre protocol module regsiters
5490 */
5491 n = (int)snprintf(buf, size_left, "\nFPM hardware registers\n");
5492 buf += n; size_left -= n;
5493 REG_WR32(qlt, 0x54, 0x4000);
5494 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5495 buf += n; size_left -= n;
5496 REG_WR32(qlt, 0x54, 0x4010);
5497 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5498 buf += n; size_left -= n;
5499 REG_WR32(qlt, 0x54, 0x4020);
5500 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5501 buf += n; size_left -= n;
5502 REG_WR32(qlt, 0x54, 0x4030);
5503 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5504 buf += n; size_left -= n;
5505 REG_WR32(qlt, 0x54, 0x4040);
5506 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5507 buf += n; size_left -= n;
5508 REG_WR32(qlt, 0x54, 0x4050);
5509 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5510 buf += n; size_left -= n;
5511 REG_WR32(qlt, 0x54, 0x4060);
5512 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5513 buf += n; size_left -= n;
5514 REG_WR32(qlt, 0x54, 0x4070);
5515 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5516 buf += n; size_left -= n;
5517 REG_WR32(qlt, 0x54, 0x4080);
5518 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5519 buf += n; size_left -= n;
5520 REG_WR32(qlt, 0x54, 0x4090);
5521 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5522 buf += n; size_left -= n;
5523 REG_WR32(qlt, 0x54, 0x40A0);
5524 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5525 buf += n; size_left -= n;
5526 REG_WR32(qlt, 0x54, 0x40B0);
5527 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5528 buf += n; size_left -= n;
5529 if (qlt->qlt_81xx_chip) {
5530 REG_WR32(qlt, 0x54, 0x40C0);
5531 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5532 buf += n; size_left -= n;
5533 REG_WR32(qlt, 0x54, 0x40D0);
5534 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5535 buf += n; size_left -= n;
5536 }
5537
5538 /*
5539 * Fibre buffer registers
5540 */
5541 n = (int)snprintf(buf, size_left, "\nFB hardware registers\n");
5542 buf += n; size_left -= n;
5543 REG_WR32(qlt, 0x54, 0x6000);
5544 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5545 buf += n; size_left -= n;
5546 REG_WR32(qlt, 0x54, 0x6010);
5547 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5548 buf += n; size_left -= n;
5549 REG_WR32(qlt, 0x54, 0x6020);
5550 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5551 buf += n; size_left -= n;
5552 REG_WR32(qlt, 0x54, 0x6030);
5553 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5554 buf += n; size_left -= n;
5555 REG_WR32(qlt, 0x54, 0x6040);
5556 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5557 buf += n; size_left -= n;
5558 REG_WR32(qlt, 0x54, 0x6100);
5559 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5560 buf += n; size_left -= n;
5561 REG_WR32(qlt, 0x54, 0x6130);
5562 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5563 buf += n; size_left -= n;
5564 REG_WR32(qlt, 0x54, 0x6150);
5565 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5566 buf += n; size_left -= n;
5567 REG_WR32(qlt, 0x54, 0x6170);
5568 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5569 buf += n; size_left -= n;
5570 REG_WR32(qlt, 0x54, 0x6190);
5571 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5572 buf += n; size_left -= n;
5573 REG_WR32(qlt, 0x54, 0x61B0);
5574 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5575 buf += n; size_left -= n;
5576 if (qlt->qlt_81xx_chip) {
5577 REG_WR32(qlt, 0x54, 0x61C0);
5578 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5579 buf += n; size_left -= n;
5580 }
5581 if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5582 REG_WR32(qlt, 0x54, 0x6F00);
5583 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5584 buf += n; size_left -= n;
5585 }
5586
5587 qlt->intr_sneak_counter = 10;
5588 mutex_enter(&qlt->intr_lock);
5589 (void) qlt_reset_chip(qlt);
5590 drv_usecwait(20);
5591 qlt->intr_sneak_counter = 0;
5592 mutex_exit(&qlt->intr_lock);
5593
5594 /*
5595 * Memory
5596 */
5597 n = (int)snprintf(buf, size_left, "\nCode RAM\n");
5598 buf += n; size_left -= n;
5599
5600 addr = 0x20000;
5601 endaddr = 0x22000;
5602 words_to_read = 0;
5603 while (addr < endaddr) {
5604 words_to_read = MBOX_DMA_MEM_SIZE >> 2;
5605 if ((words_to_read + addr) > endaddr) {
5606 words_to_read = endaddr - addr;
5607 }
5608 if ((ret = qlt_read_risc_ram(qlt, addr, words_to_read)) !=
5609 QLT_SUCCESS) {
5610 EL(qlt, "Error reading risc ram - CODE RAM status="
5611 "%llxh\n", ret);
5612 goto dump_fail;
5613 }
5614
5615 n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
5616 buf += n; size_left -= n;
5617
5618 if (size_left < 100000) {
5619 EL(qlt, "run out of space - CODE RAM size_left=%d\n",
5620 size_left);
5621 goto dump_ok;
5648 n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
5649 buf += n; size_left -= n;
5650 if (size_left < 100000) {
5651 EL(qlt, "run out of space - EXT RAM\n");
5652 goto dump_ok;
5653 }
5654 addr += words_to_read;
5655 }
5656
5657 /*
5658 * Label the end tag
5659 */
5660 n = (int)snprintf(buf, size_left, "[<==END] ISP Debug Dump\n");
5661 buf += n; size_left -= n;
5662
5663 /*
5664 * Queue dumping
5665 */
5666 n = (int)snprintf(buf, size_left, "\nRequest Queue\n");
5667 buf += n; size_left -= n;
5668 n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET,
5669 REQUEST_QUEUE_ENTRIES, buf, size_left);
5670 buf += n; size_left -= n;
5671
5672 n = (int)snprintf(buf, size_left, "\nPriority Queue\n");
5673 buf += n; size_left -= n;
5674 n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET,
5675 PRIORITY_QUEUE_ENTRIES, buf, size_left);
5676 buf += n; size_left -= n;
5677
5678 n = (int)snprintf(buf, size_left, "\nResponse Queue\n");
5679 buf += n; size_left -= n;
5680 n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET,
5681 RESPONSE_QUEUE_ENTRIES, buf, size_left);
5682 buf += n; size_left -= n;
5683
5684 n = (int)snprintf(buf, size_left, "\nATIO queue\n");
5685 buf += n; size_left -= n;
5686 n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET,
5687 ATIO_QUEUE_ENTRIES, buf, size_left);
5688 buf += n; size_left -= n;
5689
5690 /*
5691 * Label dump reason
5692 */
5693 n = (int)snprintf(buf, size_left, "\nFirmware dump reason: %s-%s\n",
5694 qlt->qlt_port_alias, ssci->st_additional_info);
5695 buf += n; size_left -= n;
5696
5697 dump_ok:
5698 EL(qlt, "left-%d\n", size_left);
5699
5700 mutex_enter(&qlt->qlt_ioctl_lock);
5701 qlt->qlt_ioctl_flags &=
5702 ~(QLT_FWDUMP_INPROGRESS | QLT_FWDUMP_FETCHED_BY_USER);
5703 qlt->qlt_ioctl_flags |= QLT_FWDUMP_ISVALID;
5704 mutex_exit(&qlt->qlt_ioctl_lock);
5705 return (FCT_SUCCESS);
5706
5707 dump_fail:
5708 EL(qlt, "dump not done\n");
5709 mutex_enter(&qlt->qlt_ioctl_lock);
5710 qlt->qlt_ioctl_flags &= QLT_IOCTL_FLAG_MASK;
5711 mutex_exit(&qlt->qlt_ioctl_lock);
5712 return (FCT_FAILURE);
5713 }
5714
5715 static int
5716 qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr, int count,
5717 uint_t size_left)
5718 {
5719 int i;
5749 }
5750 if ((i + 1) & 7) {
5751 c = ' ';
5752 } else {
5753 c = '\n';
5754 }
5755 n = (int)(n + (int)snprintf(&buf[n], (uint_t)(size_left - n),
5756 "%08x%c", ptr[i], c));
5757 }
5758 return (n);
5759 }
5760
5761 static int
5762 qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries, caddr_t buf,
5763 uint_t size_left)
5764 {
5765 int i;
5766 int n;
5767 char c = ' ';
5768 int words;
5769 uint16_t *ptr;
5770 uint16_t w;
5771
5772 words = entries * 32;
5773 ptr = (uint16_t *)qadr;
5774 for (i = 0, n = 0; i < words; i++) {
5775 if ((i & 7) == 0) {
5776 n = (int)(n + (int)snprintf(&buf[n],
5777 (uint_t)(size_left - n), "%05x: ", i));
5778 }
5779 if ((i + 1) & 7) {
5780 c = ' ';
5781 } else {
5782 c = '\n';
5783 }
5784 w = QMEM_RD16(qlt, &ptr[i]);
5785 n = (int)(n + (int)snprintf(&buf[n], (size_left - n), "%04x%c",
5786 w, c));
5787 }
5788 return (n);
5789 }
5790
5791 /*
5792 * Only called by debug dump. Interrupts are disabled and mailboxes alongwith
5793 * mailbox ram is available.
5794 * Copy data from RISC RAM to system memory
5795 */
5796 static fct_status_t
5797 qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words)
5798 {
5799 uint64_t da;
5800 fct_status_t ret;
5801
5802 REG_WR16(qlt, REG_MBOX(0), MBC_DUMP_RAM_EXTENDED);
5803 da = qlt->queue_mem_cookie.dmac_laddress;
5804 da += MBOX_DMA_MEM_OFFSET;
5805
5806 /* System destination address */
5807 REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
5808 REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
5809 REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
5810 REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
5811
5812 /* Length */
5813 REG_WR16(qlt, REG_MBOX(5), LSW(words));
5814 REG_WR16(qlt, REG_MBOX(4), MSW(words));
5815
5816 /* RISC source address */
5817 REG_WR16(qlt, REG_MBOX(1), LSW(addr));
5818 REG_WR16(qlt, REG_MBOX(8), MSW(addr));
5819
5820 ret = qlt_raw_mailbox_command(qlt);
5821 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
5822 if (ret == QLT_SUCCESS) {
5823 (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
5824 MBOX_DMA_MEM_OFFSET, words << 2, DDI_DMA_SYNC_FORCPU);
5825 } else {
5826 EL(qlt, "qlt_raw_mailbox_command=ch status=%llxh\n", ret);
5827 }
5828 return (ret);
5829 }
5830
5831 static void
5832 qlt_verify_fw(qlt_state_t *qlt)
5833 {
5834 caddr_t req;
5835 /* Just put it on the request queue */
5836 mutex_enter(&qlt->req_lock);
5837 req = qlt_get_req_entries(qlt, 1);
5838 if (req == NULL) {
5839 mutex_exit(&qlt->req_lock);
5840 /* XXX handle this */
5841 return;
5842 }
5843
5844 bzero(req, IOCB_SIZE);
5845
5846 req[0] = 0x1b;
5847 req[1] = 1;
5848
5849 QMEM_WR32(qlt, (&req[4]), 0xffffffff);
5850 QMEM_WR16(qlt, (&req[0x8]), 1); /* options - don't update */
5851 QMEM_WR32(qlt, (&req[0x14]), 0x80010300);
5852
5853 qlt_submit_req_entries(qlt, 1);
5854 mutex_exit(&qlt->req_lock);
5855 }
5856
5857 static void
5858 qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp)
5859 {
5860 uint16_t status;
5861 char info[QLT_INFO_LEN];
5862
5863 status = QMEM_RD16(qlt, rsp+8);
5864 if (status != 0) {
5865 (void) snprintf(info, sizeof (info),
5866 "qlt_handle_verify_fw_completion: "
5867 "status:%x, rsp:%p", status, (void *)rsp);
5868 if (status == 3) {
5869 uint16_t error_code;
5870
5871 error_code = QMEM_RD16(qlt, rsp+0xA);
5872 (void) snprintf(info, sizeof (info),
5873 "qlt_handle_verify_fw_completion: error code:%x",
5874 error_code);
5875 }
5876 }
5877 }
5878
5879 /*
5880 * qlt_el_trace_desc_ctor - Construct an extended logging trace descriptor.
5881 *
5882 * Input: Pointer to the adapter state structure.
5883 * Returns: Success or Failure.
5884 * Context: Kernel context.
5885 */
5886 static int
5887 qlt_el_trace_desc_ctor(qlt_state_t *qlt)
5888 {
5889 int rval = DDI_SUCCESS;
5890
5891 qlt->el_trace_desc = (qlt_el_trace_desc_t *)
5892 kmem_zalloc(sizeof (qlt_el_trace_desc_t), KM_SLEEP);
5893
5894 if (qlt->el_trace_desc == NULL) {
5895 cmn_err(CE_WARN, "qlt(%d): can't construct trace descriptor",
5896 qlt->instance);
5897 rval = DDI_FAILURE;
5898 } else {
5899 qlt->el_trace_desc->next = 0;
5900 qlt->el_trace_desc->trace_buffer =
5901 (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
5902
5903 if (qlt->el_trace_desc->trace_buffer == NULL) {
5904 cmn_err(CE_WARN, "qlt(%d): can't get trace buffer",
5905 qlt->instance);
5906 kmem_free(qlt->el_trace_desc,
5907 sizeof (qlt_el_trace_desc_t));
5908 qlt->el_trace_desc = NULL;
5909 rval = DDI_FAILURE;
5910 } else {
5911 qlt->el_trace_desc->trace_buffer_size =
5912 EL_TRACE_BUF_SIZE;
5913 mutex_init(&qlt->el_trace_desc->mutex, NULL,
5914 MUTEX_DRIVER, NULL);
5915 }
5916 }
5917
5918 return (rval);
5919 }
5920
5921 /*
5922 * qlt_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
5923 *
5924 * Input: Pointer to the adapter state structure.
5925 * Returns: Success or Failure.
5926 * Context: Kernel context.
5927 */
5928 static int
5929 qlt_el_trace_desc_dtor(qlt_state_t *qlt)
5930 {
5931 int rval = DDI_SUCCESS;
5932
5933 if (qlt->el_trace_desc == NULL) {
5934 cmn_err(CE_WARN, "qlt(%d): can't destroy el trace descriptor",
5935 qlt->instance);
5936 rval = DDI_FAILURE;
5937 } else {
5938 if (qlt->el_trace_desc->trace_buffer != NULL) {
5939 kmem_free(qlt->el_trace_desc->trace_buffer,
5940 qlt->el_trace_desc->trace_buffer_size);
5941 }
5942 mutex_destroy(&qlt->el_trace_desc->mutex);
5943 kmem_free(qlt->el_trace_desc, sizeof (qlt_el_trace_desc_t));
5944 qlt->el_trace_desc = NULL;
5945 }
5946
5947 return (rval);
5948 }
5949
5950 /*
5951 * qlt_el_msg
5952 * Extended logging message
5953 *
5954 * Input:
5955 * qlt: adapter state pointer.
5956 * fn: function name.
5957 * ce: level
5958 * ...: Variable argument list.
5959 *
5960 * Context:
5961 * Kernel/Interrupt context.
5962 */
5963 void
5964 qlt_el_msg(qlt_state_t *qlt, const char *fn, int ce, ...)
5965 {
5966 char *s, *fmt = 0, *fmt1 = 0;
5967 char fmt2[EL_BUFFER_RESERVE];
5968 int rval, tmp;
5969 int tracing = 0;
5970 va_list vl;
5971
5972 /* Tracing is the default but it can be disabled. */
5973 if ((rval = qlt_validate_trace_desc(qlt)) == DDI_SUCCESS) {
5974 tracing = 1;
5975
5976 mutex_enter(&qlt->el_trace_desc->mutex);
5977
5978 /*
5979 * Ensure enough space for the string. Wrap to
5980 * start when default message allocation size
5981 * would overrun the end.
5982 */
5983 if ((qlt->el_trace_desc->next + EL_BUFFER_RESERVE) >=
5984 qlt->el_trace_desc->trace_buffer_size) {
5985 fmt = qlt->el_trace_desc->trace_buffer;
5986 qlt->el_trace_desc->next = 0;
5987 } else {
5988 fmt = qlt->el_trace_desc->trace_buffer +
5989 qlt->el_trace_desc->next;
5990 }
5991 }
5992
5993 /* if no buffer use the stack */
5994 if (fmt == NULL) {
5995 fmt = fmt2;
5996 }
5997
5998 va_start(vl, ce);
5999
6000 s = va_arg(vl, char *);
6001
6002 rval = (int)snprintf(fmt, (size_t)EL_BUFFER_RESERVE,
6003 "QEL qlt(%d): %s, ", qlt->instance, fn);
6004 fmt1 = fmt + rval;
6005 tmp = (int)vsnprintf(fmt1,
6006 (size_t)(uint32_t)((int)EL_BUFFER_RESERVE - rval), s, vl);
6007 rval += tmp;
6008
6009 /*
6010 * Calculate the offset where the next message will go,
6011 * skipping the NULL.
6012 */
6013 if (tracing) {
6014 uint16_t next = (uint16_t)(rval += 1);
6015 qlt->el_trace_desc->next += next;
6016 mutex_exit(&qlt->el_trace_desc->mutex);
6017 }
6018
6019 if (enable_extended_logging) {
6020 cmn_err(ce, fmt);
6021 }
6022
6023 va_end(vl);
6024 }
6025
6026 /*
6027 * qlt_dump_el_trace_buffer
6028 * Outputs extended logging trace buffer.
6029 *
6030 * Input:
6031 * qlt: adapter state pointer.
6032 */
6033 void
6034 qlt_dump_el_trace_buffer(qlt_state_t *qlt)
6035 {
6036 char *dump_start = NULL;
6037 char *dump_current = NULL;
6038 char *trace_start;
6039 char *trace_end;
6040 int wrapped = 0;
6041 int rval;
6042
6043 mutex_enter(&qlt->el_trace_desc->mutex);
6044
6045 rval = qlt_validate_trace_desc(qlt);
6046 if (rval != NULL) {
6047 cmn_err(CE_CONT, "qlt(%d) Dump EL trace - invalid desc\n",
6048 qlt->instance);
6049 } else if ((dump_start = qlt_find_trace_start(qlt)) != NULL) {
6050 dump_current = dump_start;
6051 trace_start = qlt->el_trace_desc->trace_buffer;
6052 trace_end = trace_start +
6053 qlt->el_trace_desc->trace_buffer_size;
6054
6055 cmn_err(CE_CONT, "qlt(%d) Dump EL trace - start %p %p\n",
6056 qlt->instance,
6057 (void *)dump_start, (void *)trace_start);
6058
6059 while (((uintptr_t)dump_current - (uintptr_t)trace_start) <=
6060 (uintptr_t)qlt->el_trace_desc->trace_buffer_size) {
6061 /* Show it... */
6062 cmn_err(CE_CONT, "%p - %s", (void *)dump_current,
6063 dump_current);
6064 /* Make the next the current */
6065 dump_current += (strlen(dump_current) + 1);
6066 /* check for wrap */
6067 if ((dump_current + EL_BUFFER_RESERVE) >= trace_end) {
6068 dump_current = trace_start;
6069 wrapped = 1;
6070 } else if (wrapped) {
6071 /* Don't go past next. */
6072 if ((trace_start + qlt->el_trace_desc->next) <=
6073 dump_current) {
6074 break;
6075 }
6076 } else if (*dump_current == NULL) {
6077 break;
6078 }
6079 }
6080 }
6081 mutex_exit(&qlt->el_trace_desc->mutex);
6082 }
6083
6084 /*
6085 * qlt_validate_trace_desc
6086 * Ensures the extended logging trace descriptor is good.
6087 *
6088 * Input:
6089 * qlt: adapter state pointer.
6090 *
6091 * Returns:
6092 * ql local function return status code.
6093 */
6094 static int
6095 qlt_validate_trace_desc(qlt_state_t *qlt)
6096 {
6097 int rval = DDI_SUCCESS;
6098
6099 if (qlt->el_trace_desc == NULL) {
6100 rval = DDI_FAILURE;
6101 } else if (qlt->el_trace_desc->trace_buffer == NULL) {
6102 rval = DDI_FAILURE;
6103 }
6104 return (rval);
6105 }
6106
6107 /*
6108 * qlt_find_trace_start
6109 * Locate the oldest extended logging trace entry.
6110 *
6111 * Input:
6112 * qlt: adapter state pointer.
6113 *
6114 * Returns:
6115 * Pointer to a string.
6116 *
6117 * Context:
6118 * Kernel/Interrupt context.
6119 */
6120 static char *
6121 qlt_find_trace_start(qlt_state_t *qlt)
6122 {
6123 char *trace_start = 0;
6124 char *trace_next = 0;
6125
6126 trace_next = qlt->el_trace_desc->trace_buffer +
6127 qlt->el_trace_desc->next;
6128
6129 /*
6130 * If the buffer has not wrapped next will point at a null so
6131 * start is the beginning of the buffer. If next points at a char
6132 * then we must traverse the buffer until a null is detected and
6133 * that will be the beginning of the oldest whole object in the buffer
6134 * which is the start.
6135 */
6136
6137 if ((trace_next + EL_BUFFER_RESERVE) >=
6138 (qlt->el_trace_desc->trace_buffer +
6139 qlt->el_trace_desc->trace_buffer_size)) {
6140 trace_start = qlt->el_trace_desc->trace_buffer;
6141 } else if (*trace_next != NULL) {
6142 trace_start = trace_next + (strlen(trace_next) + 1);
6143 } else {
6144 trace_start = qlt->el_trace_desc->trace_buffer;
6145 }
6146 return (trace_start);
6147 }
6148
6149
6150 static int
6151 qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval)
6152 {
6153 return (ddi_getprop(DDI_DEV_T_ANY, qlt->dip,
6154 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, prop, defval));
6155 }
6156
6157 static int
6158 qlt_read_string_prop(qlt_state_t *qlt, char *prop, char **prop_val)
6159 {
6160 return (ddi_prop_lookup_string(DDI_DEV_T_ANY, qlt->dip,
6161 DDI_PROP_DONTPASS, prop, prop_val));
6162 }
6163
6164 static int
6165 qlt_read_int_instance_prop(qlt_state_t *qlt, char *prop, int defval)
6166 {
6167 char inst_prop[256];
6168 int val;
6169
6291 }
6292
6293 /*
6294 * ql_mps_reset
6295 * Reset MPS for FCoE functions.
6296 *
6297 * Input:
6298 * ha = virtual adapter state pointer.
6299 *
6300 * Context:
6301 * Kernel context.
6302 */
6303 static void
6304 qlt_mps_reset(qlt_state_t *qlt)
6305 {
6306 uint32_t data, dctl = 1000;
6307
6308 do {
6309 if (dctl-- == 0 || qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 1) !=
6310 QLT_SUCCESS) {
6311 return;
6312 }
6313 if (qlt_raw_rd_risc_ram_word(qlt, 0x7c00, &data) !=
6314 QLT_SUCCESS) {
6315 (void) qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 0);
6316 return;
6317 }
6318 } while (!(data & BIT_0));
6319
6320 if (qlt_raw_rd_risc_ram_word(qlt, 0x7A15, &data) == QLT_SUCCESS) {
6321 dctl = (uint16_t)PCICFG_RD16(qlt, 0x54);
6322 if ((data & 0xe0) != (dctl & 0xe0)) {
6323 data &= 0xff1f;
6324 data |= dctl & 0xe0;
6325 (void) qlt_raw_wrt_risc_ram_word(qlt, 0x7A15, data);
6326 }
6327 }
6328 (void) qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 0);
6329 }
6330
6331 /*
6332 * qlt_raw_wrt_risc_ram_word
6333 * Write RISC RAM word.
6334 *
6335 * Input: qlt: adapter state pointer.
6336 * risc_address: risc ram word address.
6337 * data: data.
6338 *
6339 * Returns: qlt local function return status code.
6340 *
6341 * Context: Kernel context.
6342 */
6343 static fct_status_t
6344 qlt_raw_wrt_risc_ram_word(qlt_state_t *qlt, uint32_t risc_address,
6345 uint32_t data)
6346 {
6385 *data = REG_RD16(qlt, REG_MBOX(2));
6386 *data |= (REG_RD16(qlt, REG_MBOX(3)) << 16);
6387 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
6388 if (ret != QLT_SUCCESS) {
6389 EL(qlt, "qlt_raw_mailbox_command=MBC_READ_RAM_EXTENDED status"
6390 "=%llxh\n", ret);
6391 }
6392 return (ret);
6393 }
6394
6395 static void
6396 qlt_properties(qlt_state_t *qlt)
6397 {
6398 int32_t cnt = 0;
6399 int32_t defval = 0xffff;
6400
6401 if (qlt_wwn_overload_prop(qlt) == TRUE) {
6402 EL(qlt, "wwnn overloaded.\n");
6403 }
6404
6405 if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt2k", defval)) !=
6406 defval) {
6407 qlt->qlt_bucketcnt[0] = cnt;
6408 EL(qlt, "2k bucket o/l=%d\n", cnt);
6409 }
6410
6411 if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt8k", defval)) !=
6412 defval) {
6413 qlt->qlt_bucketcnt[1] = cnt;
6414 EL(qlt, "8k bucket o/l=%d\n", cnt);
6415 }
6416
6417 if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt64k", defval)) !=
6418 defval) {
6419 qlt->qlt_bucketcnt[2] = cnt;
6420 EL(qlt, "64k bucket o/l=%d\n", cnt);
6421 }
6422
6423 if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt128k", defval)) !=
6424 defval) {
6425 qlt->qlt_bucketcnt[3] = cnt;
6426 EL(qlt, "128k bucket o/l=%d\n", cnt);
6427 }
6428
6429 if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt256", defval)) !=
6430 defval) {
6431 qlt->qlt_bucketcnt[4] = cnt;
6432 EL(qlt, "256k bucket o/l=%d\n", cnt);
6433 }
6434 }
|
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009-2015 QLogic Corporation. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
29 */
30
31 #include <sys/conf.h>
32 #include <sys/ddi.h>
33 #include <sys/stat.h>
34 #include <sys/pci.h>
35 #include <sys/sunddi.h>
36 #include <sys/modctl.h>
37 #include <sys/file.h>
38 #include <sys/cred.h>
39 #include <sys/byteorder.h>
40 #include <sys/atomic.h>
41 #include <sys/scsi/scsi.h>
42 #include <sys/time.h>
43 #ifdef __x86
44 #include <sys/x86_archext.h>
45 #endif
46
47 #include <sys/stmf_defines.h>
48 #include <sys/stmf_ioctl.h>
49 #include <sys/fct_defines.h>
50 #include <sys/stmf.h>
51 #include <sys/portif.h>
52 #include <sys/fct.h>
53
54 #include "qlt.h"
55 #include "qlt_dma.h"
56 #include "qlt_ioctl.h"
57 #include "qlt_open.h"
58
59 static int qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
60 static int qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
61 static uint8_t *qlt_vpd_findtag(qlt_state_t *qlt, uint8_t *vpdbuf,
62 int8_t *opcode);
63 static int qlt_vpd_lookup(qlt_state_t *qlt, uint8_t *opcode, uint8_t *bp,
64 int32_t bplen);
65 static void qlt_enable_intr(qlt_state_t *);
66 static void qlt_disable_intr(qlt_state_t *);
67 static fct_status_t qlt_reset_chip(qlt_state_t *qlt);
68 static fct_status_t qlt_download_fw(qlt_state_t *qlt);
69 static fct_status_t qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
70 uint32_t word_count, uint32_t risc_addr);
71 static fct_status_t qlt_raw_mailbox_command(qlt_state_t *qlt);
72 static mbox_cmd_t *qlt_alloc_mailbox_command(qlt_state_t *qlt,
73 uint32_t dma_size);
74 void qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
75 static fct_status_t qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
76 static uint_t qlt_isr(caddr_t arg, caddr_t arg2);
77 static uint_t qlt_msix_resp_handler(caddr_t arg, caddr_t arg2);
78 static uint_t qlt_msix_default_handler(caddr_t arg, caddr_t arg2);
79 static fct_status_t qlt_firmware_dump(fct_local_port_t *port,
80 stmf_state_change_info_t *ssci);
81 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
82 static void qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp);
83 static void qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio);
84 static void qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp,
85 uint16_t qi);
86 static void qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp);
87 static void qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp);
88 static void qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
89 static void qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt,
90 uint8_t *rsp);
91 static void qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
92 static void qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp, uint16_t qi);
93 static void qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp,
94 uint16_t qi);
95 static fct_status_t qlt_read_nvram(qlt_state_t *qlt);
96 static fct_status_t qlt_read_vpd(qlt_state_t *qlt);
97 static fct_status_t qlt_read_rom_image(qlt_state_t *qlt);
98 static void qlt_verify_fw(qlt_state_t *qlt);
99 static void qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp);
100 fct_status_t qlt_port_start(caddr_t arg);
101 fct_status_t qlt_port_stop(caddr_t arg);
102 fct_status_t qlt_port_online(qlt_state_t *qlt);
103 fct_status_t qlt_port_offline(qlt_state_t *qlt);
104 static fct_status_t qlt_get_link_info(fct_local_port_t *port,
105 fct_link_info_t *li);
106 static void qlt_ctl(struct fct_local_port *port, int cmd, void *arg);
107 static fct_status_t qlt_force_lip(qlt_state_t *);
108 static fct_status_t qlt_do_flogi(struct fct_local_port *port,
109 fct_flogi_xchg_t *fx);
110 void qlt_handle_atio_queue_update(qlt_state_t *qlt);
111 void qlt_handle_resp_queue_update(qlt_state_t *qlt, uint16_t qi);
112 fct_status_t qlt_register_remote_port(fct_local_port_t *port,
113 fct_remote_port_t *rp, fct_cmd_t *login);
114 fct_status_t qlt_deregister_remote_port(fct_local_port_t *port,
115 fct_remote_port_t *rp);
116 fct_status_t qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags);
117 fct_status_t qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd);
118 fct_status_t qlt_send_abts_response(qlt_state_t *qlt,
119 fct_cmd_t *cmd, int terminate);
120 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
121 int qlt_set_uniq_flag(uint16_t *ptr, uint16_t setf, uint16_t abortf);
122 fct_status_t qlt_abort_cmd(struct fct_local_port *port,
123 fct_cmd_t *cmd, uint32_t flags);
124 fct_status_t qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
125 fct_status_t qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd);
126 fct_status_t qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
127 fct_status_t qlt_send_cmd(fct_cmd_t *cmd);
128 fct_status_t qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd);
129 fct_status_t qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd);
130 fct_status_t qlt_xfer_scsi_data(fct_cmd_t *cmd,
131 stmf_data_buf_t *dbuf, uint32_t ioflags);
132 fct_status_t qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd);
133 static void qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp);
134 static void qlt_release_intr(qlt_state_t *qlt);
135 static int qlt_setup_interrupts(qlt_state_t *qlt);
136 static void qlt_destroy_mutex(qlt_state_t *qlt);
137
138 static fct_status_t qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr,
139 uint32_t words);
140 static fct_status_t qlt_mbx_mpi_ram(qlt_state_t *qlt, uint32_t addr,
141 uint32_t words, uint16_t direction);
142 static int qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries,
143 caddr_t buf, uint_t size_left);
144 static int qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
145 caddr_t buf, uint_t size_left);
146 static int qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr,
147 int count, uint_t size_left);
148 static int qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
149 cred_t *credp, int *rval);
150 static int qlt_open(dev_t *devp, int flag, int otype, cred_t *credp);
151 static int qlt_close(dev_t dev, int flag, int otype, cred_t *credp);
152
153 static int qlt_setup_msi(qlt_state_t *qlt);
154 static int qlt_setup_msix(qlt_state_t *qlt);
155
156 static int qlt_el_trace_desc_ctor(qlt_state_t *qlt);
157 static int qlt_el_trace_desc_dtor(qlt_state_t *qlt);
158
159 static int qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval);
160 static int qlt_read_string_prop(qlt_state_t *qlt, char *prop, char **prop_val);
161 static int qlt_read_string_instance_prop(qlt_state_t *qlt, char *prop,
162 char **prop_val);
163 static int qlt_read_int_instance_prop(qlt_state_t *, char *, int);
164 static int qlt_convert_string_to_ull(char *prop, int radix,
165 u_longlong_t *result);
166 static boolean_t qlt_wwn_overload_prop(qlt_state_t *qlt);
167 static int qlt_quiesce(dev_info_t *dip);
168 static void qlt_disable_intr(qlt_state_t *qlt);
169 static fct_status_t qlt_raw_wrt_risc_ram_word(qlt_state_t *qlt, uint32_t,
170 uint32_t);
171 static fct_status_t qlt_raw_rd_risc_ram_word(qlt_state_t *qlt, uint32_t,
172 uint32_t *);
173 static void qlt_mps_reset(qlt_state_t *qlt);
174 static void qlt_properties(qlt_state_t *qlt);
175
176 static fct_status_t qlt_mq_create(qlt_state_t *qlt, int idx);
177 static fct_status_t qlt_mq_destroy(qlt_state_t *qlt);
178
179 static fct_status_t qlt_27xx_get_dmp_template(qlt_state_t *);
180 static uint32_t qlt_27xx_dmp_parse_template(qlt_state_t *, qlt_dt_hdr_t *,
181 uint8_t *, uint32_t);
182 static int qlt_27xx_dump_ram(qlt_state_t *, uint16_t, uint32_t,
183 uint32_t, uint8_t *);
184
185 #define SETELSBIT(bmp, els) (bmp)[((els) >> 3) & 0x1F] = \
186 (uint8_t)((bmp)[((els) >> 3) & 0x1F] | ((uint8_t)1) << ((els) & 7))
187
188 int qlt_enable_msix = 1;
189 int qlt_enable_msi = 1;
190
191
192 string_table_t prop_status_tbl[] = DDI_PROP_STATUS();
193
194 /* Array to quickly calculate next free buf index to use */
195 #if 0
196 static int qlt_nfb[] = { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
197 #endif
198
199 static struct cb_ops qlt_cb_ops = {
200 qlt_open,
201 qlt_close,
202 nodev,
203 nodev,
204 nodev,
205 nodev,
206 nodev,
207 qlt_ioctl,
208 nodev,
212 ddi_prop_op,
213 0,
214 D_MP | D_NEW
215 };
216
217 static struct dev_ops qlt_ops = {
218 DEVO_REV,
219 0,
220 nodev,
221 nulldev,
222 nulldev,
223 qlt_attach,
224 qlt_detach,
225 nodev,
226 &qlt_cb_ops,
227 NULL,
228 ddi_power,
229 qlt_quiesce
230 };
231
232 #ifndef PORT_SPEED_16G
233 #define PORT_SPEED_16G 32
234 #endif
235
236 #ifndef PORT_SPEED_32G
237 #define PORT_SPEED_32G 64
238 #endif
239
240 #ifndef QL_NAME
241 #define QL_NAME "qlt"
242 #endif
243
244 static struct modldrv modldrv = {
245 &mod_driverops,
246 QLT_NAME" "QLT_VERSION,
247 &qlt_ops,
248 };
249
250 static struct modlinkage modlinkage = {
251 MODREV_1, &modldrv, NULL
252 };
253
254 void *qlt_state = NULL;
255 kmutex_t qlt_global_lock;
256 static uint32_t qlt_loaded_counter = 0;
257 uint8_t qlt_reprocess_attempt_cnt = 5;
258 uint32_t qlt_reprocess_delay = 75; /* default 75 microseconds */
259
260 static char *pci_speeds[] = { " 33", "-X Mode 1 66", "-X Mode 1 100",
261 "-X Mode 1 133", "--Invalid--",
262 "-X Mode 2 66", "-X Mode 2 100",
263 "-X Mode 2 133", " 66" };
264
265 /* Always use 64 bit DMA. */
266 static ddi_dma_attr_t qlt_queue_dma_attr = {
267 DMA_ATTR_V0, /* dma_attr_version */
268 0, /* low DMA address range */
269 0xffffffffffffffff, /* high DMA address range */
270 0xffffffff, /* DMA counter register */
271 64, /* DMA address alignment */
272 0xff, /* DMA burstsizes */
273 1, /* min effective DMA size */
274 0xffffffff, /* max DMA xfer size */
275 0xffffffff, /* segment boundary */
276 1, /* s/g list length */
277 1, /* granularity of device */
278 0 /* DMA transfer flags */
279 };
280
281
282 /* Always use 64 bit DMA. */
283 static ddi_dma_attr_t qlt_queue_dma_attr_mq_req1 = {
284 DMA_ATTR_V0, /* dma_attr_version */
285 0, /* low DMA address range */
286 0xffffffffffffffff, /* high DMA address range */
287 0xffffffff, /* DMA counter register */
288 64, /* DMA address alignment */
289 0xff, /* DMA burstsizes */
290 1, /* min effective DMA size */
291 0xffffffff, /* max DMA xfer size */
292 0xffffffff, /* segment boundary */
293 1, /* s/g list length */
294 1, /* granularity of device */
295 0 /* DMA transfer flags */
296 };
297
298 /* Always use 64 bit DMA. */
299 static ddi_dma_attr_t qlt_queue_dma_attr_mq_rsp1 = {
300 DMA_ATTR_V0, /* dma_attr_version */
301 0, /* low DMA address range */
302 0xffffffffffffffff, /* high DMA address range */
303 0xffffffff, /* DMA counter register */
304 64, /* DMA address alignment */
305 0xff, /* DMA burstsizes */
306 1, /* min effective DMA size */
307 0xffffffff, /* max DMA xfer size */
308 0xffffffff, /* segment boundary */
309 1, /* s/g list length */
310 1, /* granularity of device */
311 0 /* DMA transfer flags */
312 };
313
314
315 /* qlogic logging */
316 int enable_extended_logging = 0;
317 static char qlt_provider_name[] = "qlt";
318 static struct stmf_port_provider *qlt_pp;
319
320 int
321 _init(void)
322 {
323 int ret;
324
325 ret = ddi_soft_state_init(&qlt_state, sizeof (qlt_state_t), 0);
326 if (ret == 0) {
327 mutex_init(&qlt_global_lock, 0, MUTEX_DRIVER, 0);
328 qlt_pp = (stmf_port_provider_t *)stmf_alloc(
329 STMF_STRUCT_PORT_PROVIDER, 0, 0);
330 qlt_pp->pp_portif_rev = PORTIF_REV_1;
331 qlt_pp->pp_name = qlt_provider_name;
332 if (stmf_register_port_provider(qlt_pp) != STMF_SUCCESS) {
333 stmf_free(qlt_pp);
334 mutex_destroy(&qlt_global_lock);
335 ddi_soft_state_fini(&qlt_state);
336 return (EIO);
352 int ret;
353
354 if (qlt_loaded_counter)
355 return (EBUSY);
356 ret = mod_remove(&modlinkage);
357 if (ret == 0) {
358 (void) stmf_deregister_port_provider(qlt_pp);
359 stmf_free(qlt_pp);
360 mutex_destroy(&qlt_global_lock);
361 ddi_soft_state_fini(&qlt_state);
362 }
363 return (ret);
364 }
365
366 int
367 _info(struct modinfo *modinfop)
368 {
369 return (mod_info(&modlinkage, modinfop));
370 }
371
372 static int
373 qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
374 {
375 int instance;
376 qlt_state_t *qlt;
377 ddi_device_acc_attr_t dev_acc_attr;
378 uint16_t did;
379 uint16_t val;
380 uint16_t mr;
381 size_t discard;
382 uint_t ncookies;
383 int max_read_size;
384 int max_payload_size;
385 fct_status_t ret;
386
387 /* No support for suspend resume yet */
388 if (cmd != DDI_ATTACH)
389 return (DDI_FAILURE);
390 instance = ddi_get_instance(dip);
391
392 cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
393 QLT_NAME, instance, QLT_VERSION);
394
395 if (ddi_soft_state_zalloc(qlt_state, instance) != DDI_SUCCESS) {
396 cmn_err(CE_WARN, "qlt(%d): soft state alloc failed", instance);
397 return (DDI_FAILURE);
398 }
399
400 if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
401 NULL) {
402 cmn_err(CE_WARN, "qlt(%d): can't get soft state", instance);
403 goto attach_fail_1;
404 }
405
406 qlt->instance = instance;
407
408 qlt->nvram = (qlt_nvram_t *)kmem_zalloc(sizeof (qlt_nvram_t), KM_SLEEP);
409 qlt->vpd = (uint32_t *)kmem_zalloc(QL_24XX_VPD_SIZE, KM_SLEEP);
410 qlt->dip = dip;
411
412 if (qlt_el_trace_desc_ctor(qlt) != DDI_SUCCESS) {
413 cmn_err(CE_WARN, "qlt(%d): can't setup el tracing", instance);
414 goto attach_fail_2;
415 }
416
417 EL(qlt, "instance=%d, ptr=%p\n", instance, (void *)qlt);
418
419 if (pci_config_setup(dip, &qlt->pcicfg_acc_handle) != DDI_SUCCESS) {
420 cmn_err(CE_WARN, "qlt(%d): pci_config_setup failed", instance);
421 goto attach_fail_3;
422 }
423
424 did = PCICFG_RD16(qlt, PCI_CONF_DEVID);
425 if ((did != 0x2422) && (did != 0x2432) &&
426 (did != 0x8432) && (did != 0x2532) &&
427 (did != 0x8001) && (did != 0x2031) &&
428 (did != 0x2071) && (did != 0x2261)) {
429 cmn_err(CE_WARN, "qlt(%d): unknown devid(%x), failing attach",
430 instance, did);
431 goto attach_fail_4;
432 }
433
434 if ((did & 0xFFFF) == 0x2071) {
435 qlt->qlt_27xx_chip = 1;
436 qlt->qlt_fcoe_enabled = 0;
437 } else if ((did & 0xFFFF) == 0x2261) {
438 qlt->qlt_27xx_chip = 1;
439 qlt->qlt_fcoe_enabled = 0;
440 } else if ((did & 0xFFFF) == 0x2031) {
441 qlt->qlt_83xx_chip = 1;
442 qlt->qlt_fcoe_enabled = 0;
443 } else if ((did & 0xFFF0) == 0x8000) {
444 qlt->qlt_81xx_chip = 1;
445 qlt->qlt_fcoe_enabled = 1;
446 } else if ((did & 0xFF00) == 0x2500)
447 qlt->qlt_25xx_chip = 1;
448
449 dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
450 dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
451 dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
452
453 if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
454 int stat_1;
455 off_t regsize_1;
456
457 stat_1 = ddi_dev_regsize(dip, 1, ®size_1);
458 if (stat_1 != DDI_SUCCESS) {
459 stmf_trace(qlt->qlt_port_alias,
460 "instance=%d, reg 1 regsize failed,"
461 " stat %x", instance, stat_1);
462 goto attach_fail_4;
463 }
464
465 if (ddi_regs_map_setup(dip, 1, &qlt->regs, 0, regsize_1,
466 &dev_acc_attr, &qlt->regs_acc_handle) != DDI_SUCCESS) {
467 cmn_err(CE_NOTE, "qlt(%d) ddi_regs_map_setup failed\n",
468 instance);
469 goto attach_fail_4;
470 }
471 } else {
472 /*
473 * 24xx and 25xx: rnumber 0 is config space
474 * rnumber 1 is for IO space
475 * rnumber 2 is for MBAR0: ISP, MSIX, PBA
476 */
477 if (ddi_regs_map_setup(dip, 2, &qlt->regs, 0, 0x100,
478 &dev_acc_attr, &qlt->regs_acc_handle) != DDI_SUCCESS) {
479 goto attach_fail_4;
480 }
481 }
482
483 if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
484
485 uint32_t w32h;
486 uint32_t w32l;
487 int stat;
488 off_t regsize;
489
490 w32l = PCICFG_RD32(qlt, PCI_CONF_BASE2);
491 w32h = PCICFG_RD32(qlt, PCI_CONF_BASE3);
492
493 if ((w32h > 0) || w32l > 0) {
494 stat = ddi_dev_regsize(dip, 2, ®size);
495 if (stat != DDI_SUCCESS) {
496 stmf_trace(qlt->qlt_port_alias,
497 "instance=%d, MSI-X regsize failed,"
498 " stat %x", instance, stat);
499 }
500 stmf_trace(qlt->qlt_port_alias,
501 "instance=%d, MSI-X MEM Bar size %x",
502 instance, regsize);
503
504 stat = ddi_regs_map_setup(dip, 2, &qlt->msix_base, 0,
505 /* ((MQ_MAX_QUEUES * 2) +1) << 2, */
506 regsize,
507 &dev_acc_attr, &qlt->msix_acc_handle);
508
509 if (stat != DDI_SUCCESS || qlt->msix_base == NULL ||
510 qlt->msix_acc_handle == NULL) {
511
512 cmn_err(CE_WARN,
513 "qlt(%d): can't map MBar for MSI-X",
514 instance);
515 stmf_trace(qlt->qlt_port_alias,
516 "instance=%d, MSI-X MEM Bar map fail",
517 instance);
518
519 if (qlt->msix_acc_handle != NULL) {
520 ddi_regs_map_free(
521 &qlt->msix_acc_handle);
522 }
523 goto attach_fail_5;
524 }
525 } else {
526 cmn_err(CE_WARN, "qlt(%d): can't setup MBar for MSI-X",
527 instance);
528 stmf_trace(qlt->qlt_port_alias,
529 "instance=%d, No MSI-X MEM Bar", instance);
530 goto attach_fail_5;
531 }
532
533 w32l = PCICFG_RD32(qlt, PCI_CONF_BASE4);
534 w32h = PCICFG_RD32(qlt, PCI_CONF_BASE5);
535
536 if ((w32h > 0) || w32l > 0) {
537 stat = ddi_dev_regsize(dip, 3, ®size);
538 if (stat != DDI_SUCCESS) {
539 stmf_trace(qlt->qlt_port_alias,
540 "instance=%d, MQ regsize failed, stat %x",
541 instance, stat);
542 }
543 stmf_trace(qlt->qlt_port_alias,
544 "instance=%d, MQ MEM Bar size %x",
545 instance, regsize);
546
547 /* for 83xx the QP pointers are in the 3rd MBar */
548 stat = ddi_regs_map_setup(dip, 3, &qlt->mq_reg_base, 0,
549 (MQ_MAX_QUEUES * MQBAR_REG_OFFSET),
550 &dev_acc_attr, &qlt->mq_reg_acc_handle);
551
552 if (stat != DDI_SUCCESS || qlt->mq_reg_base == NULL ||
553 qlt->mq_reg_acc_handle == NULL) {
554
555 cmn_err(CE_WARN, "qlt(%d): can't map QP MBar",
556 instance);
557 stmf_trace(qlt->qlt_port_alias,
558 "instance=%d, QP MEM Bar map fail st:%x",
559 instance, stat);
560
561 if (qlt->msix_acc_handle != NULL) {
562 ddi_regs_map_free(
563 &qlt->msix_acc_handle);
564 }
565 if (qlt->mq_reg_acc_handle != NULL) {
566 ddi_regs_map_free(
567 &qlt->mq_reg_acc_handle);
568 }
569 goto attach_fail_5;
570 } else {
571 qlt->qlt_mq_enabled = 1;
572 }
573 } else {
574 cmn_err(CE_WARN, "qlt(%d): can't setup MBar for QPs",
575 instance);
576 stmf_trace(qlt->qlt_port_alias,
577 "instance=%d, No QPs MEM Bar", instance);
578
579 if (qlt->msix_acc_handle != NULL) {
580 ddi_regs_map_free(
581 &qlt->msix_acc_handle);
582 }
583 goto attach_fail_5;
584 }
585 } else if (qlt->qlt_81xx_chip) {
586
587 uint32_t w32;
588 int stat;
589
590 w32 = PCICFG_RD32(qlt, PCI_CONF_BASE3);
591 if (w32 == 0) {
592
593 cmn_err(CE_WARN, "qlt(%d): can't setup MBar2",
594 instance);
595 stmf_trace(qlt->qlt_port_alias,
596 "instance=%d, No MEM Bar2", instance);
597 goto attach_fail_5;
598 }
599
600 stat = ddi_regs_map_setup(dip, 3, &qlt->mq_reg_base, 0,
601 (MQ_MAX_QUEUES * MQBAR_REG_OFFSET),
602 &dev_acc_attr, &qlt->mq_reg_acc_handle);
603
604 if (stat != DDI_SUCCESS || qlt->mq_reg_base == NULL ||
605 qlt->mq_reg_acc_handle == NULL) {
606
607 cmn_err(CE_WARN, "qlt(%d): can't map MBar2",
608 instance);
609 stmf_trace(qlt->qlt_port_alias,
610 "instance=%d, MEM Bar2 map fail", instance);
611
612 if (qlt->mq_reg_acc_handle != NULL) {
613 ddi_regs_map_free(&qlt->mq_reg_acc_handle);
614 }
615 goto attach_fail_5;
616 } else {
617 qlt->qlt_mq_enabled = 1;
618 }
619 } else if (qlt->qlt_25xx_chip) {
620 uint32_t w32h;
621 uint32_t w32l;
622 int stat;
623 off_t regsize;
624
625 /* MBAR2 rnumber 3 */
626 w32l = PCICFG_RD32(qlt, PCI_CONF_BASE3);
627 w32h = PCICFG_RD32(qlt, PCI_CONF_BASE4);
628
629 if ((w32h > 0) || (w32l > 0)) {
630 stat = ddi_dev_regsize(dip, 3, ®size);
631 if (stat != DDI_SUCCESS) {
632 stmf_trace(qlt->qlt_port_alias,
633 "ISP25xx inst=%d, MQ regsize failed, stat %x",
634 instance, stat);
635 EL(qlt, "ISP25xx MQ regsize failed, stat %x\n",
636 stat);
637
638 }
639 stmf_trace(qlt->qlt_port_alias,
640 "ISP25xx instance=%d, MQ MEM Bar size %lx",
641 instance, regsize);
642 EL(qlt, "ISP25xx MQ MEM Bar (MBAR2) size: %x\n",
643 regsize);
644
645 stat = ddi_regs_map_setup(dip, 3, &qlt->mq_reg_base, 0,
646 (MQ_MAX_QUEUES * MQBAR_REG_OFFSET),
647 &dev_acc_attr, &qlt->mq_reg_acc_handle);
648 if (stat != DDI_SUCCESS || qlt->mq_reg_base == NULL ||
649 qlt->mq_reg_acc_handle == NULL) {
650 cmn_err(CE_WARN,
651 "qlt(%d): ISP25xx can't map QP MBar",
652 instance);
653 stmf_trace(qlt->qlt_port_alias,
654 "instance=%d, QP MEM Bar map fail st:%x",
655 instance, stat);
656 if (qlt->mq_reg_acc_handle != NULL) {
657 ddi_regs_map_free(
658 &qlt->mq_reg_acc_handle);
659 }
660 } else {
661 qlt->qlt_mq_enabled = 1;
662 }
663 } else {
664 stmf_trace(qlt->qlt_port_alias,
665 "instance=%d, No QPs MEM Bar", instance);
666 EL(qlt,
667 "ISP25xx can't setup MBar QPs, use baseq\n");
668 }
669 }
670
671 if (qlt->qlt_mq_enabled) {
672 qlt->mq_req = kmem_zalloc(
673 ((sizeof (qlt_mq_req_ptr_blk_t)) * MQ_MAX_QUEUES),
674 KM_SLEEP);
675 qlt->mq_resp = kmem_zalloc(
676 ((sizeof (qlt_mq_rsp_ptr_blk_t)) * MQ_MAX_QUEUES),
677 KM_SLEEP);
678 } else {
679 qlt->mq_req = kmem_zalloc(
680 (sizeof (qlt_mq_req_ptr_blk_t)), KM_SLEEP);
681 qlt->mq_resp = kmem_zalloc(
682 (sizeof (qlt_mq_rsp_ptr_blk_t)), KM_SLEEP);
683 }
684
685 if (did == 0x2422) {
686 uint32_t pci_bits = REG_RD32(qlt, REG_CTRL_STATUS);
687 uint32_t slot = pci_bits & PCI_64_BIT_SLOT;
688 pci_bits >>= 8;
689 pci_bits &= 0xf;
690 if ((pci_bits == 3) || (pci_bits == 7)) {
691 cmn_err(CE_NOTE,
692 "!qlt(%d): HBA running at PCI%sMHz (%d)",
693 instance, pci_speeds[pci_bits], pci_bits);
694 } else {
695 cmn_err(CE_WARN,
696 "qlt(%d): HBA running at PCI%sMHz %s(%d)",
697 instance, (pci_bits <= 8) ? pci_speeds[pci_bits] :
698 "(Invalid)", ((pci_bits == 0) ||
699 (pci_bits == 8)) ? (slot ? "64 bit slot " :
700 "32 bit slot ") : "", pci_bits);
701 }
702 }
703 if ((ret = qlt_read_nvram(qlt)) != QLT_SUCCESS) {
704 cmn_err(CE_WARN, "qlt(%d): read nvram failure %llx", instance,
705 (unsigned long long)ret);
706 goto attach_fail_5;
707 }
708 if ((ret = qlt_read_vpd(qlt)) != QLT_SUCCESS) {
709 cmn_err(CE_WARN, "qlt(%d): read vpd failure %llx", instance,
710 (unsigned long long)ret);
711 goto attach_fail_5;
712 }
713 if ((ret = qlt_read_rom_image(qlt)) != QLT_SUCCESS) {
714 cmn_err(CE_WARN, "qlt(%d): read rom image failure %llx",
715 instance, (unsigned long long)ret);
716 goto attach_fail_5;
717 }
718
719 qlt_properties(qlt);
720
721 if (ddi_dma_alloc_handle(dip, &qlt_queue_dma_attr, DDI_DMA_SLEEP,
722 0, &qlt->queue_mem_dma_handle) != DDI_SUCCESS) {
723 goto attach_fail_5;
724 }
725 if (ddi_dma_mem_alloc(qlt->queue_mem_dma_handle, TOTAL_DMA_MEM_SIZE,
726 &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
727 &qlt->queue_mem_ptr, &discard, &qlt->queue_mem_acc_handle) !=
728 DDI_SUCCESS) {
729 goto attach_fail_6;
730 }
731 if (ddi_dma_addr_bind_handle(qlt->queue_mem_dma_handle, NULL,
732 qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE,
733 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
734 &qlt->queue_mem_cookie, &ncookies) != DDI_SUCCESS) {
735 goto attach_fail_7;
736 }
737 if (ncookies != 1)
738 goto attach_fail_8;
739
740 /*
741 * Base queue (0), alwasy available
742 */
743 qlt->mq_req[0].queue_mem_mq_base_addr =
744 qlt->mq_req[0].mq_ptr =
745 qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET;
746 qlt->mq_resp[0].queue_mem_mq_base_addr =
747 qlt->mq_resp[0].mq_ptr =
748 qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET;
749
750 qlt->preq_ptr = qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET;
751 qlt->atio_ptr = qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET;
752
753 /* mutex are inited in this function */
754 if (qlt_setup_interrupts(qlt) != DDI_SUCCESS)
755 goto attach_fail_8;
756
757 qlt->qlt_queue_cnt = 1;
758 if ((qlt->qlt_mq_enabled) && (qlt->intr_cnt > 1)) {
759 int i;
760
761 for (i = 1; i < qlt->intr_cnt; i++) {
762 if (qlt_mq_create(qlt, i) != QLT_SUCCESS) {
763 cmn_err(CE_WARN, "qlt(%d) mq create (%d) "
764 "failed\n", qlt->instance, i);
765 break;
766 }
767 qlt->qlt_queue_cnt++;
768 if (qlt->qlt_queue_cnt >= MQ_MAX_QUEUES)
769 break;
770 }
771 }
772 EL(qlt, "Queue count = %d\n", qlt->qlt_queue_cnt);
773
774 (void) snprintf(qlt->qlt_minor_name, sizeof (qlt->qlt_minor_name),
775 "qlt%d", instance);
776 (void) snprintf(qlt->qlt_port_alias, sizeof (qlt->qlt_port_alias),
777 "%s,0", qlt->qlt_minor_name);
778
779 if (ddi_create_minor_node(dip, qlt->qlt_minor_name, S_IFCHR,
780 instance, DDI_NT_STMF_PP, 0) != DDI_SUCCESS) {
781 goto attach_fail_9;
782 }
783
784 cv_init(&qlt->rp_dereg_cv, NULL, CV_DRIVER, NULL);
785 cv_init(&qlt->mbox_cv, NULL, CV_DRIVER, NULL);
786 mutex_init(&qlt->qlt_ioctl_lock, NULL, MUTEX_DRIVER, NULL);
787
788 /* Setup PCI cfg space registers */
789 max_read_size = qlt_read_int_prop(qlt, "pci-max-read-request", 11);
790 if (max_read_size == 11)
791 goto over_max_read_xfer_setting;
792 if (did == 0x2422) {
793 if (max_read_size == 512)
794 val = 0;
795 else if (max_read_size == 1024)
796 val = 1;
797 else if (max_read_size == 2048)
798 val = 2;
799 else if (max_read_size == 4096)
800 val = 3;
801 else {
802 cmn_err(CE_WARN, "qlt(%d) malformed "
803 "pci-max-read-request in qlt.conf. Valid values "
804 "for this HBA are 512/1024/2048/4096", instance);
805 goto over_max_read_xfer_setting;
806 }
807 mr = (uint16_t)PCICFG_RD16(qlt, 0x4E);
808 mr = (uint16_t)(mr & 0xfff3);
809 mr = (uint16_t)(mr | (val << 2));
810 PCICFG_WR16(qlt, 0x4E, mr);
811 } else if ((did == 0x2432) || (did == 0x8432) ||
812 (did == 0x2532) || (did == 0x8001) ||
813 (did == 0x2031) || (did == 0x2071) ||
814 (did == 0x2261)) {
815 if (max_read_size == 128)
816 val = 0;
817 else if (max_read_size == 256)
818 val = 1;
819 else if (max_read_size == 512)
820 val = 2;
821 else if (max_read_size == 1024)
822 val = 3;
823 else if (max_read_size == 2048)
824 val = 4;
825 else if (max_read_size == 4096)
826 val = 5;
827 else {
828 cmn_err(CE_WARN, "qlt(%d) malformed "
829 "pci-max-read-request in qlt.conf. Valid values "
830 "for this HBA are 128/256/512/1024/2048/4096",
831 instance);
832 goto over_max_read_xfer_setting;
833 }
834 mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
835 mr = (uint16_t)(mr & 0x8fff);
836 mr = (uint16_t)(mr | (val << 12));
837 PCICFG_WR16(qlt, 0x54, mr);
838 } else {
839 cmn_err(CE_WARN, "qlt(%d): dont know how to set "
840 "pci-max-read-request for this device (%x)",
841 instance, did);
842 }
843 over_max_read_xfer_setting:;
844
845 max_payload_size = qlt_read_int_prop(qlt, "pcie-max-payload-size", 11);
846 if (max_payload_size == 11)
847 goto over_max_payload_setting;
848 if ((did == 0x2432) || (did == 0x8432) ||
849 (did == 0x2532) || (did == 0x8001) ||
850 (did == 0x2031) || (did == 0x2071) ||
851 (did == 0x2261)) {
852 if (max_payload_size == 128)
853 val = 0;
854 else if (max_payload_size == 256)
855 val = 1;
856 else if (max_payload_size == 512)
857 val = 2;
858 else if (max_payload_size == 1024)
859 val = 3;
860 else {
861 cmn_err(CE_WARN, "qlt(%d) malformed "
862 "pcie-max-payload-size in qlt.conf. Valid values "
863 "for this HBA are 128/256/512/1024",
864 instance);
865 goto over_max_payload_setting;
866 }
867 mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
868 mr = (uint16_t)(mr & 0xff1f);
869 mr = (uint16_t)(mr | (val << 5));
870 PCICFG_WR16(qlt, 0x54, mr);
871 } else {
872 cmn_err(CE_WARN, "qlt(%d): dont know how to set "
873 "pcie-max-payload-size for this device (%x)",
874 instance, did);
875 }
876
877 over_max_payload_setting:;
878
879 qlt_enable_intr(qlt);
880
881 if (qlt_port_start((caddr_t)qlt) != QLT_SUCCESS) {
882 EL(qlt, "qlt_port_start failed, tear down\n");
883 qlt_disable_intr(qlt);
884 goto attach_fail_10;
885 }
886
887 ddi_report_dev(dip);
888 return (DDI_SUCCESS);
889
890 attach_fail_10:;
891 mutex_destroy(&qlt->qlt_ioctl_lock);
892 cv_destroy(&qlt->mbox_cv);
893 cv_destroy(&qlt->rp_dereg_cv);
894 ddi_remove_minor_node(dip, qlt->qlt_minor_name);
895 attach_fail_9:;
896 qlt_destroy_mutex(qlt);
897 qlt_release_intr(qlt);
898 (void) qlt_mq_destroy(qlt);
899
900 attach_fail_8:;
901 (void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
902 attach_fail_7:;
903 ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
904 attach_fail_6:;
905 ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
906 attach_fail_5:;
907 if (qlt->mq_resp) {
908 kmem_free(qlt->mq_resp,
909 (qlt->qlt_mq_enabled ?
910 (sizeof (qlt_mq_rsp_ptr_blk_t) * MQ_MAX_QUEUES) :
911 (sizeof (qlt_mq_rsp_ptr_blk_t))));
912 }
913 qlt->mq_resp = NULL;
914 if (qlt->mq_req) {
915 kmem_free(qlt->mq_req,
916 (qlt->qlt_mq_enabled ?
917 (sizeof (qlt_mq_req_ptr_blk_t) * MQ_MAX_QUEUES) :
918 (sizeof (qlt_mq_req_ptr_blk_t))));
919 }
920 qlt->mq_req = NULL;
921
922 ddi_regs_map_free(&qlt->regs_acc_handle);
923 attach_fail_4:;
924 pci_config_teardown(&qlt->pcicfg_acc_handle);
925 attach_fail_3:;
926 (void) qlt_el_trace_desc_dtor(qlt);
927 attach_fail_2:;
928 kmem_free(qlt->vpd, QL_24XX_VPD_SIZE);
929 kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
930 attach_fail_1:;
931 ddi_soft_state_free(qlt_state, instance);
932 return (DDI_FAILURE);
933 }
934
935 #define FCT_I_EVENT_BRING_PORT_OFFLINE 0x83
936
937 /* ARGSUSED */
938 static int
939 qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
940 {
941 qlt_state_t *qlt;
942
943 int instance;
944
945 instance = ddi_get_instance(dip);
946 if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
947 NULL) {
948 return (DDI_FAILURE);
949 }
950
951 if (qlt->fw_code01) {
952 return (DDI_FAILURE);
953 }
954
955 if ((qlt->qlt_state != FCT_STATE_OFFLINE) ||
956 qlt->qlt_state_not_acked) {
957 return (DDI_FAILURE);
958 }
959 if (qlt_port_stop((caddr_t)qlt) != FCT_SUCCESS) {
960 return (DDI_FAILURE);
961 }
962
963 qlt_disable_intr(qlt);
964
965 if (qlt->dmp_template_addr != NULL) {
966 (void) ddi_dma_unbind_handle(qlt->dmp_template_dma_handle);
967 ddi_dma_mem_free(&qlt->dmp_template_acc_handle);
968 ddi_dma_free_handle(&qlt->dmp_template_dma_handle);
969 }
970
971 if (qlt->fw_bin_dump_buf != NULL) {
972 kmem_free(qlt->fw_bin_dump_buf, qlt->fw_bin_dump_size);
973 qlt->fw_bin_dump_buf = NULL;
974 qlt->fw_bin_dump_size = 0;
975 qlt->fw_ascii_dump_size = 0;
976 }
977
978 if (qlt->qlt_fwdump_buf) {
979 kmem_free(qlt->qlt_fwdump_buf, qlt->fw_dump_size);
980 qlt->qlt_fwdump_buf = NULL;
981 }
982
983 ddi_remove_minor_node(dip, qlt->qlt_minor_name);
984 qlt_destroy_mutex(qlt);
985 qlt_release_intr(qlt);
986 if (qlt->qlt_mq_enabled == 1) {
987 (void) qlt_mq_destroy(qlt);
988 }
989
990 (void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
991 ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
992 ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
993 ddi_regs_map_free(&qlt->regs_acc_handle);
994
995 if (qlt->mq_resp) {
996 kmem_free(qlt->mq_resp,
997 (qlt->qlt_mq_enabled ?
998 (sizeof (qlt_mq_rsp_ptr_blk_t) * MQ_MAX_QUEUES) :
999 (sizeof (qlt_mq_rsp_ptr_blk_t))));
1000 }
1001 qlt->mq_resp = NULL;
1002 if (qlt->mq_req) {
1003 kmem_free(qlt->mq_req,
1004 (qlt->qlt_mq_enabled ?
1005 (sizeof (qlt_mq_req_ptr_blk_t) * MQ_MAX_QUEUES) :
1006 (sizeof (qlt_mq_req_ptr_blk_t))));
1007 }
1008 qlt->mq_req = NULL;
1009
1010 if (qlt->qlt_mq_enabled == 1) {
1011 if ((qlt->msix_acc_handle != NULL) &&
1012 ((qlt->qlt_83xx_chip == 1) ||
1013 (qlt->qlt_27xx_chip == 1))) {
1014 ddi_regs_map_free(&qlt->msix_acc_handle);
1015 }
1016 ddi_regs_map_free(&qlt->mq_reg_acc_handle);
1017 }
1018 pci_config_teardown(&qlt->pcicfg_acc_handle);
1019 kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
1020 cv_destroy(&qlt->mbox_cv);
1021 cv_destroy(&qlt->rp_dereg_cv);
1022 (void) qlt_el_trace_desc_dtor(qlt);
1023 ddi_soft_state_free(qlt_state, instance);
1024
1025 return (DDI_SUCCESS);
1026 }
1027
1028 /*
1029 * qlt_quiesce quiesce a device attached to the system.
1030 */
1031 static int
1032 qlt_quiesce(dev_info_t *dip)
1033 {
1034 qlt_state_t *qlt;
1035 uint32_t timer;
1036 uint32_t stat;
1037
1038 qlt = ddi_get_soft_state(qlt_state, ddi_get_instance(dip));
1039 if (qlt == NULL) {
1040 /* Oh well.... */
1041 return (DDI_SUCCESS);
1042 }
1043
1044 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_HOST_TO_RISC_INTR));
1045 REG_WR16(qlt, REG_MBOX0, MBC_STOP_FIRMWARE);
1046 REG_WR16(qlt, REG_MBOX(1), 0);
1047 REG_WR16(qlt, REG_MBOX(2), 0);
1048 REG_WR16(qlt, REG_MBOX(3), 0);
1049 REG_WR16(qlt, REG_MBOX(4), 0);
1050 REG_WR16(qlt, REG_MBOX(5), 0);
1051 REG_WR16(qlt, REG_MBOX(6), 0);
1052 REG_WR16(qlt, REG_MBOX(7), 0);
1053 REG_WR16(qlt, REG_MBOX(8), 0);
1054 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
1055 for (timer = 0; timer < 30000; timer++) {
1056 stat = REG_RD32(qlt, REG_RISC_STATUS);
1057 if (stat & RISC_HOST_INTR_REQUEST) {
1058 if ((stat & FW_INTR_STATUS_MASK) < 0x12) {
1059 REG_WR32(qlt, REG_HCCR,
1060 HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1061 break;
1062 }
1063 REG_WR32(qlt, REG_HCCR,
1064 HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1065 }
1066 drv_usecwait(100);
1067 }
1068
1069
1070 /* need to ensure no one accesses the hw during the reset 100us */
1071 if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
1072 REG_WR32(qlt, REG_INTR_CTRL, 0);
1073 mutex_enter(&qlt->mbox_lock);
1074 if (qlt->qlt_mq_enabled == 1) {
1075 int i;
1076 for (i = 1; i < qlt->qlt_queue_cnt; i++) {
1077 mutex_enter(&qlt->mq_req[i].mq_lock);
1078 }
1079 }
1080 mutex_enter(&qlt->mq_req[0].mq_lock);
1081 drv_usecwait(40);
1082 }
1083
1084 /* Reset the chip. */
1085 REG_WR32(qlt, REG_CTRL_STATUS, CHIP_SOFT_RESET | DMA_SHUTDOWN_CTRL |
1086 PCI_X_XFER_CTRL);
1087 drv_usecwait(100);
1088
1089 if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
1090 mutex_exit(&qlt->mq_req[0].mq_lock);
1091 if (qlt->qlt_mq_enabled == 1) {
1092 int i;
1093 for (i = 1; i < qlt->qlt_queue_cnt; i++) {
1094 mutex_exit(&qlt->mq_req[i].mq_lock);
1095 }
1096 }
1097 mutex_exit(&qlt->mbox_lock);
1098 }
1099
1100 qlt_disable_intr(qlt);
1101
1102 return (DDI_SUCCESS);
1103 }
1104
1105 static void
1106 qlt_enable_intr(qlt_state_t *qlt)
1107 {
1108 if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
1109 int stat;
1110
1111 stat = ddi_intr_block_enable(qlt->htable, qlt->intr_cnt);
1112 if (stat != DDI_SUCCESS) {
1113 stmf_trace(qlt->qlt_port_alias,
1114 "qlt_enable_intr: ddi_intr_block_enable failed:%x",
1115 stat);
1116
1117 cmn_err(CE_WARN, "!qlt(%d): qlt_enable_intr: "
1118 "ddi_intr_block_enable failed:%x",
1119 qlt->instance, stat);
1120 }
1121
1122 #ifndef __sparc
1123 else {
1124 /* Please see CR6840537, MSI isn't re-enabled x86 */
1125 off_t offset;
1126 uint8_t val8;
1127 ddi_intr_handle_impl_t *hdlp;
1128
1129 if (qlt->qlt_81xx_chip || qlt->qlt_25xx_chip) {
1130 offset = (off_t)0x8a;
1131 } else {
1132 offset = (off_t)0x66;
1133 }
1134
1135 hdlp = (ddi_intr_handle_impl_t *)qlt->htable[0];
1136 if ((hdlp->ih_state == DDI_IHDL_STATE_ENABLE) &&
1137 (hdlp->ih_type == DDI_INTR_TYPE_MSI)) {
1138
1139 /* get MSI control */
1140 val8 = pci_config_get8(qlt->pcicfg_acc_handle,
1141 offset);
1142
1143 if ((val8 & 1) == 0) {
1144 stmf_trace(qlt->qlt_port_alias,
1145 "qlt(%d): qlt_enable_intr: "
1146 "MSI enable failed (%x)",
1147 qlt->instance, val8);
1148
1149 /* write enable to MSI control */
1150 val8 = (uint8_t)(val8 | 1);
1151 pci_config_put8(qlt->pcicfg_acc_handle,
1152 offset, val8);
1153
1154 /* read back to veriy */
1155 val8 = pci_config_get8
1156 (qlt->pcicfg_acc_handle, offset);
1157
1158 if (val8 & 1) {
1159 stmf_trace(qlt->qlt_port_alias,
1160 "qlt(%d): qlt_enable_intr: "
1161 "MSI enabled kludge!(%x)",
1162 qlt->instance, val8);
1163 }
1164 }
1165 }
1166 }
1167 #endif /* x86 specific hack */
1168 } else {
1169 int i;
1170 int stat = DDI_SUCCESS;
1171
1172 for (i = 0;
1173 ((i < qlt->intr_cnt) && (stat == DDI_SUCCESS)); i++) {
1174 stat = ddi_intr_enable(qlt->htable[i]);
1175 }
1176 if (stat != DDI_SUCCESS) {
1177 stmf_trace(qlt->qlt_port_alias,
1178 "qlt_enable_intr: ddi_intr_enable failed:%x",
1179 stat);
1180
1181 cmn_err(CE_WARN, "!qlt(%d): qlt_enable_intr: "
1182 "ddi_intr_enable failed:%x", qlt->instance, stat);
1183 }
1184 }
1185 }
1186
1187 static void
1188 qlt_disable_intr(qlt_state_t *qlt)
1189 {
1190 if (qlt->qlt_intr_enabled == 0) {
1191 /* ---- If we've disabled it once, just return ---- */
1192 return;
1193 }
1194
1195 if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
1196 (void) ddi_intr_block_disable(qlt->htable, qlt->intr_cnt);
1197 } else {
1198 int i;
1199 for (i = 0; i < qlt->intr_cnt; i++)
1200 (void) ddi_intr_disable(qlt->htable[i]);
1201 }
1202 qlt->qlt_intr_enabled = 0;
1203 }
1204
1205 static void
1206 qlt_release_intr(qlt_state_t *qlt)
1207 {
1208 if (qlt->htable) {
1209 int i;
1210 for (i = 0; i < qlt->intr_cnt; i++) {
1211 (void) ddi_intr_remove_handler(qlt->htable[i]);
1212 (void) ddi_intr_free(qlt->htable[i]);
1213 }
1214 kmem_free(qlt->htable, (uint_t)qlt->intr_size);
1215 }
1216 qlt->htable = NULL;
1217 qlt->intr_pri = 0;
1218 qlt->intr_cnt = 0;
1219 qlt->intr_size = 0;
1220 qlt->intr_cap = 0;
1221 }
1222
1223 static void
1224 qlt_init_mutex(qlt_state_t *qlt)
1225 {
1226 if (qlt->qlt_mq_enabled == 1) {
1227 int i;
1228
1229 for (i = 1; i < MQ_MAX_QUEUES; i++) {
1230 mutex_init(&qlt->mq_req[i].mq_lock, 0, MUTEX_DRIVER,
1231 INT2PTR(qlt->intr_pri, void *));
1232 mutex_init(&qlt->mq_resp[i].mq_lock, 0, MUTEX_DRIVER,
1233 INT2PTR(qlt->intr_pri, void *));
1234 }
1235 }
1236 mutex_init(&qlt->mq_req[0].mq_lock, 0, MUTEX_DRIVER,
1237 INT2PTR(qlt->intr_pri, void *));
1238 mutex_init(&qlt->preq_lock, 0, MUTEX_DRIVER,
1239 INT2PTR(qlt->intr_pri, void *));
1240 mutex_init(&qlt->mbox_lock, NULL, MUTEX_DRIVER,
1241 INT2PTR(qlt->intr_pri, void *));
1242 mutex_init(&qlt->intr_lock, NULL, MUTEX_DRIVER,
1243 INT2PTR(qlt->intr_pri, void *));
1244 }
1245
1246 static void
1247 qlt_destroy_mutex(qlt_state_t *qlt)
1248 {
1249 if (qlt->qlt_mq_enabled == 1) {
1250 int i;
1251
1252 for (i = 1; i < MQ_MAX_QUEUES; i++) {
1253 mutex_destroy(&qlt->mq_req[i].mq_lock);
1254 mutex_destroy(&qlt->mq_resp[i].mq_lock);
1255 }
1256 }
1257 mutex_destroy(&qlt->mq_req[0].mq_lock);
1258 mutex_destroy(&qlt->preq_lock);
1259 mutex_destroy(&qlt->mbox_lock);
1260 mutex_destroy(&qlt->intr_lock);
1261 }
1262
1263 static int
1264 qlt_setup_msix(qlt_state_t *qlt)
1265 {
1266 int count, avail, actual;
1267 int ret;
1268 int itype = DDI_INTR_TYPE_MSIX;
1269 int i;
1270
1271 #ifdef __x86
1272 if (get_hwenv() == HW_VMWARE) {
1273 EL(qlt, "running under hypervisor, disabling MSI-X\n");
1274 return (DDI_FAILURE);
1275 }
1276 #endif
1277
1278 /* check 24xx revision */
1279 if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip) &&
1280 (!qlt->qlt_83xx_chip) && (!qlt->qlt_27xx_chip)) {
1281 uint8_t rev_id;
1282 rev_id = (uint8_t)
1283 pci_config_get8(qlt->pcicfg_acc_handle, PCI_CONF_REVID);
1284 if (rev_id < 3) {
1285 return (DDI_FAILURE);
1286 }
1287 }
1288
1289 ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
1290 if (ret != DDI_SUCCESS || count == 0) {
1291 EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
1292 count);
1293 return (DDI_FAILURE);
1294 }
1295 ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
1296 if (ret != DDI_SUCCESS || avail == 0) {
1297 EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
1298 avail);
1299 return (DDI_FAILURE);
1300 }
1301 if (avail < count) {
1302 stmf_trace(qlt->qlt_port_alias,
1303 "qlt_setup_msix: nintrs=%d,avail=%d", count, avail);
1304 }
1305
1306 if ((qlt->qlt_25xx_chip) && (qlt->qlt_mq_enabled == 0)) {
1307 count = 2;
1308 }
1309
1310 qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
1311 qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
1312 ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
1313 DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
1314
1315 EL(qlt, "qlt_setup_msix: count=%d,avail=%d,actual=%d\n", count,
1316 avail, actual);
1317
1318 /* we need at least 2 interrupt vectors */
1319 if (((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) &&
1320 (ret != DDI_SUCCESS || actual < 2)) {
1321 EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
1322 actual);
1323 ret = DDI_FAILURE;
1324 goto release_intr;
1325 } else if ((qlt->qlt_81xx_chip) && (ret != DDI_SUCCESS || actual < 3)) {
1326 EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
1327 actual);
1328 ret = DDI_FAILURE;
1329 goto release_intr;
1330 } else if (ret != DDI_SUCCESS || actual < 2) {
1331 EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
1332 actual);
1333 ret = DDI_FAILURE;
1334 goto release_intr;
1335 }
1336 if (actual < count) {
1337 EL(qlt, "requested: %d, received: %d\n", count, actual);
1338 }
1339
1340 qlt->intr_cnt = actual;
1341 ret = ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
1342 if (ret != DDI_SUCCESS) {
1343 EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
1344 ret = DDI_FAILURE;
1345 goto release_intr;
1346 }
1347 qlt_init_mutex(qlt);
1348 for (i = 0; i < qlt->intr_cnt; i++) {
1349 ret = ddi_intr_add_handler(qlt->htable[i],
1350 (i != 0) ? qlt_msix_resp_handler :
1351 qlt_msix_default_handler,
1352 qlt, INT2PTR((uint_t)i, void *));
1353 if (ret != DDI_SUCCESS) {
1354 EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
1355 goto release_mutex;
1356 }
1357 }
1358
1359 (void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
1360 qlt->intr_flags |= QLT_INTR_MSIX;
1361 return (DDI_SUCCESS);
1362
1363 release_mutex:
1364 qlt_destroy_mutex(qlt);
1365 release_intr:
1366 for (i = 0; i < actual; i++)
1367 (void) ddi_intr_free(qlt->htable[i]);
1368
1369 kmem_free(qlt->htable, (uint_t)qlt->intr_size);
1370 qlt->htable = NULL;
1371 qlt_release_intr(qlt);
1372 return (ret);
1373 }
1374
1375 static int
1376 qlt_setup_msi(qlt_state_t *qlt)
1377 {
1378 int count, avail, actual;
1379 int itype = DDI_INTR_TYPE_MSI;
1380 int ret;
1381 int i;
1382
1383 /* 83xx and 27xx doesn't do MSI - don't even bother? */
1384 if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
1385 return (DDI_FAILURE);
1386 }
1387
1388 /* get the # of interrupts */
1389 ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
1390 if (ret != DDI_SUCCESS || count == 0) {
1391 EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
1392 count);
1393 return (DDI_FAILURE);
1394 }
1395 ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
1396 if (ret != DDI_SUCCESS || avail == 0) {
1397 EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
1398 avail);
1399 return (DDI_FAILURE);
1400 }
1401 if (avail < count) {
1402 EL(qlt, "nintrs=%d, avail=%d\n", count, avail);
1403 }
1404 /* MSI requires only 1 interrupt. */
1405 count = 1;
1406
1407 /* allocate interrupt */
1504 release_mutex:
1505 qlt_destroy_mutex(qlt);
1506 release_intr:
1507 (void) ddi_intr_free(qlt->htable[0]);
1508 free_mem:
1509 kmem_free(qlt->htable, (uint_t)qlt->intr_size);
1510 qlt->htable = NULL;
1511 qlt_release_intr(qlt);
1512 return (ret);
1513 }
1514
1515 static int
1516 qlt_setup_interrupts(qlt_state_t *qlt)
1517 {
1518 int itypes = 0;
1519
1520 /*
1521 * x86 has a bug in the ddi_intr_block_enable/disable area (6562198).
1522 */
1523 #ifndef __sparc
1524 if ((qlt_enable_msi != 0) || (qlt_enable_msix != 0)) {
1525 #endif
1526 if (ddi_intr_get_supported_types(qlt->dip, &itypes) != DDI_SUCCESS) {
1527 itypes = DDI_INTR_TYPE_FIXED;
1528 }
1529 if (qlt_enable_msix && (itypes & DDI_INTR_TYPE_MSIX)) {
1530 if (qlt_setup_msix(qlt) == DDI_SUCCESS)
1531 return (DDI_SUCCESS);
1532 }
1533 if (qlt_enable_msi && (itypes & DDI_INTR_TYPE_MSI)) {
1534 if (qlt_setup_msi(qlt) == DDI_SUCCESS)
1535 return (DDI_SUCCESS);
1536 }
1537 #ifndef __sparc
1538 }
1539 #endif
1540 return (qlt_setup_fixed(qlt));
1541 }
1542
1543 static uint8_t *
1544 qlt_vpd_findtag(qlt_state_t *qlt, uint8_t *vpdbuf, int8_t *opcode)
1545 {
1546 uint8_t *vpd = vpdbuf;
1547 uint8_t *end = vpdbuf + QL_24XX_VPD_SIZE;
1548 uint32_t found = 0;
1549
1550 if (vpdbuf == NULL || opcode == NULL) {
1551 EL(qlt, "null parameter passed!\n");
1552 return (NULL);
1553 }
1554
1555 while (vpd < end) {
1556 if (vpd[0] == VPD_TAG_END) {
1557 if (opcode[0] == VPD_TAG_END) {
1558 found = 1;
1559 } else {
1560 found = 0;
1561 }
1562 break;
1563 }
1564
1565 if (bcmp(opcode, vpd, strlen(opcode)) == 0) {
1566 found = 1;
1567 break;
1568 }
1569
1570 if (!(strncmp((char *)vpd, (char *)VPD_TAG_PRODID, 1))) {
1571 vpd += (vpd[2] << 8) + vpd[1] + 3;
1572 } else if (*vpd == VPD_TAG_LRT || *vpd == VPD_TAG_LRTC) {
1573 vpd += 3;
1574 } else {
1575 vpd += vpd[2] +3;
1576 }
1577 }
1578 return (found == 1 ? vpd : NULL);
1579 }
1580
1581 /*
1582 * qlt_vpd_lookup
1583 * Return the VPD data for the request VPD tag
1584 *
1585 * Input:
1586 * qlt = adapter state pointer.
1587 * opcode = VPD opcode to find (must be NULL terminated).
1588 * bp = Pointer to returned data buffer.
1589 * bplen = Length of returned data buffer.
1590 *
1591 * Returns:
1592 * Length of data copied into returned data buffer.
1593 * >0 = VPD data field (NULL terminated)
1594 * 0 = no data.
1595 * -1 = Could not find opcode in vpd buffer / error.
1596 *
1597 * Context:
1598 * Kernel context.
1599 *
1600 * NB: The opcode buffer and the bp buffer *could* be the same buffer!
1601 *
1602 */
1603 static int
1604 qlt_vpd_lookup(qlt_state_t *qlt, uint8_t *opcode, uint8_t *bp,
1605 int32_t bplen)
1606 {
1607 uint8_t *vpd = NULL;
1608 uint8_t *vpdbuf = NULL;
1609 int32_t len = -1;
1610
1611 if (opcode == NULL || bp == NULL || bplen < 1) {
1612 EL(qlt, "invalid parameter passed: opcode=%ph, "
1613 "bp=%ph, bplen=%xh\n", opcode, bp, bplen);
1614 return (len);
1615 }
1616
1617 vpdbuf = (uint8_t *)qlt->vpd;
1618 if ((vpd = qlt_vpd_findtag(qlt, vpdbuf, (int8_t *)opcode)) != NULL) {
1619 /*
1620 * Found the tag
1621 */
1622 if (*opcode == VPD_TAG_END || *opcode == VPD_TAG_LRT ||
1623 *opcode == VPD_TAG_LRTC) {
1624 /*
1625 * We found it, but the tag doesn't have a data
1626 * field.
1627 */
1628 len = 0;
1629 } else if (!(strncmp((char *)vpd, (char *)
1630 VPD_TAG_PRODID, 1))) {
1631 len = vpd[2] << 8;
1632 len += vpd[1];
1633 } else {
1634 len = vpd[2];
1635 }
1636
1637 /*
1638 * Make sure that the vpd len does not exceed the
1639 * vpd end
1640 */
1641 if (vpd+len > vpdbuf + QL_24XX_VPD_SIZE) {
1642 EL(qlt, "vpd tag len (%xh) exceeds vpd buffer "
1643 "length\n", len);
1644 len = -1;
1645 }
1646 } else {
1647 EL(qlt, "Cna't find vpd tag \n");
1648 return (-1);
1649 }
1650
1651 if (len >= 0) {
1652 /*
1653 * make sure we don't exceed callers buffer space len
1654 */
1655 if (len > bplen) {
1656 len = bplen - 1;
1657 }
1658 /* copy the data back */
1659 (void) strncpy((int8_t *)bp, (int8_t *)(vpd+3), (int64_t)len);
1660 bp[len] = NULL;
1661 } else {
1662 /* error -- couldn't find tag */
1663 bp[0] = NULL;
1664 if (opcode[1] != NULL) {
1665 EL(qlt, "unable to find tag '%s'\n", opcode);
1666 } else {
1667 EL(qlt, "unable to find tag '%xh'\n", opcode[0]);
1668 }
1669 }
1670 return (len);
1671 }
1672
1673 void
1674 qlt_get_rom_version(qlt_state_t *qlt, caddr_t orv)
1675 {
1676 int i;
1677 char bios0_str[32];
1678 char fcode_str[32];
1679 char efi_str[32];
1680 char hppa_str[32];
1681 char tmp[80];
1682 uint32_t bios_cnt = 0;
1683 uint32_t fcode_cnt = 0;
1684 boolean_t last_image = FALSE;
1685
1686 /* collect right rom_version from image[] */
1687 i = 0;
1688 do {
1689 if (qlt->rimage[0].header.signature[0] != PCI_HEADER0) {
1690 break;
1691 }
1692
1693 if (qlt->rimage[i].data.codetype == PCI_CODE_X86PC) {
1694 /* BIOS */
1695 if (bios_cnt == 0) {
1696 (void) snprintf(bios0_str,
1697 32,
1698 "%d.%02d",
1699 qlt->rimage[i].data.
1700 revisionlevel[1],
1701 qlt->rimage[i].data.
1702 revisionlevel[0]);
1703 (void) snprintf(tmp, 80,
1704 " BIOS: %s;", bios0_str);
1705 (void) strcat(orv, tmp);
1706 }
1707 bios_cnt++;
1708 } else if (qlt->rimage[i].data.codetype == PCI_CODE_FCODE) {
1709 /* FCode */
1710 if (fcode_cnt == 0) {
1711 (void) snprintf(fcode_str,
1712 32,
1713 "%d.%02d",
1714 qlt->rimage[i].data.revisionlevel[1],
1715 qlt->rimage[i].data.revisionlevel[0]);
1716 (void) snprintf(tmp, 80,
1717 " FCode: %s;", fcode_str);
1718 (void) strcat(orv, tmp);
1719 }
1720 fcode_cnt++;
1721 } else if (qlt->rimage[i].data.codetype == PCI_CODE_EFI) {
1722 /* EFI */
1723 (void) snprintf(efi_str,
1724 32,
1725 "%d.%02d",
1726 qlt->rimage[i].data.revisionlevel[1],
1727 qlt->rimage[i].data.revisionlevel[0]);
1728 (void) snprintf(tmp, 80, " EFI: %s;", efi_str);
1729 (void) strcat(orv, tmp);
1730 } else if (qlt->rimage[i].data.codetype == PCI_CODE_HPPA) {
1731 /* HPPA */
1732 (void) snprintf(hppa_str,
1733 32,
1734 "%d.%02d",
1735 qlt->rimage[i].data.revisionlevel[1],
1736 qlt->rimage[i].data.revisionlevel[0]);
1737 (void) snprintf(orv, 80, " HPPA: %s;", hppa_str);
1738 (void) strcat(orv, tmp);
1739 } else if (qlt->rimage[i].data.codetype == PCI_CODE_FW) {
1740 EL(qlt, "fw infor skip\n");
1741 } else {
1742 /* Unknown */
1743 EL(qlt, "unknown image\n");
1744 break;
1745 }
1746
1747 if (qlt->rimage[i].data.indicator == PCI_IND_LAST_IMAGE) {
1748 last_image = TRUE;
1749 break;
1750 }
1751
1752 i ++;
1753 } while ((last_image != TRUE) && (i < 6));
1754
1755 if (last_image != TRUE) {
1756 /* No boot image detected */
1757 (void) snprintf(orv, FCHBA_OPTION_ROM_VERSION_LEN, "%s",
1758 "No boot image detected");
1759 }
1760 }
1761
1762 /*
1763 * Filling the hba attributes
1764 */
1765 void
1766 qlt_populate_hba_fru_details(struct fct_local_port *port,
1767 struct fct_port_attrs *port_attrs)
1768 {
1769 int len;
1770 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
1771
1772 (void) snprintf(port_attrs->manufacturer, FCHBA_MANUFACTURER_LEN,
1773 "QLogic Corp.");
1774 (void) snprintf(port_attrs->driver_name, FCHBA_DRIVER_NAME_LEN,
1775 "%s", QLT_NAME);
1776 (void) snprintf(port_attrs->driver_version, FCHBA_DRIVER_VERSION_LEN,
1777 "%s", QLT_VERSION);
1778 /* get serial_number from vpd data */
1779 if (qlt_vpd_lookup(qlt, (uint8_t *)VPD_TAG_SN, (uint8_t *)
1780 port_attrs->serial_number, FCHBA_SERIAL_NUMBER_LEN) == -1) {
1781 port_attrs->serial_number[0] = '\0';
1782 }
1783 port_attrs->hardware_version[0] = '\0';
1784
1785 (void) snprintf(port_attrs->firmware_version,
1786 FCHBA_FIRMWARE_VERSION_LEN, "%d.%d.%d", qlt->fw_major,
1787 qlt->fw_minor, qlt->fw_subminor);
1788
1789 /* Get FCode version */
1790 qlt_get_rom_version(qlt, (caddr_t)&port_attrs->option_rom_version[0]);
1791
1792 port_attrs->vendor_specific_id = qlt->nvram->subsystem_vendor_id[0] |
1793 qlt->nvram->subsystem_vendor_id[1] << 8;
1794
1795 port_attrs->max_frame_size = qlt->nvram->max_frame_length[1] << 8 |
1796 qlt->nvram->max_frame_length[0];
1797
1798 port_attrs->supported_cos = 0x10000000;
1799
1800 if (qlt->qlt_fcoe_enabled) {
1801 port_attrs->supported_speed = PORT_SPEED_10G;
1802 } else if (qlt->qlt_27xx_chip) {
1803 if ((qlt->qlt_27xx_speed & MAX_SPEED_MASK) == MAX_SPEED_32G) {
1804 port_attrs->supported_speed = PORT_SPEED_8G |
1805 PORT_SPEED_16G | PORT_SPEED_32G;
1806 } else {
1807 port_attrs->supported_speed = PORT_SPEED_4G |
1808 PORT_SPEED_8G | PORT_SPEED_16G;
1809 }
1810 } else if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
1811 port_attrs->supported_speed = PORT_SPEED_4G |
1812 PORT_SPEED_8G | PORT_SPEED_16G;
1813 } else if (qlt->qlt_25xx_chip) {
1814 port_attrs->supported_speed = PORT_SPEED_2G | PORT_SPEED_4G |
1815 PORT_SPEED_8G;
1816 } else {
1817 port_attrs->supported_speed = PORT_SPEED_1G |
1818 PORT_SPEED_2G | PORT_SPEED_4G;
1819 }
1820
1821 /* limit string length to nvr model_name length */
1822 len = ((qlt->qlt_81xx_chip) || (qlt->qlt_83xx_chip) ||
1823 (qlt->qlt_27xx_chip)) ? 16 : 8;
1824 (void) snprintf(port_attrs->model,
1825 (uint_t)(len < FCHBA_MODEL_LEN ? len : FCHBA_MODEL_LEN),
1826 "%s", qlt->nvram->model_name);
1827
1828 (void) snprintf(port_attrs->model_description,
1829 (uint_t)(len < FCHBA_MODEL_DESCRIPTION_LEN ? len :
1830 FCHBA_MODEL_DESCRIPTION_LEN),
1831 "%s", qlt->nvram->model_name);
1832 }
1833
1834 /* ARGSUSED */
1835 fct_status_t
1836 qlt_info(uint32_t cmd, fct_local_port_t *port,
1837 void *arg, uint8_t *buf, uint32_t *bufsizep)
1838 {
1839 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
1840 mbox_cmd_t *mcp;
1841 fct_status_t ret = FCT_SUCCESS;
1842 uint8_t *p;
1843 fct_port_link_status_t *link_status;
1844
1845 switch (cmd) {
1846 case FC_TGT_PORT_RLS:
1847 if (qlt->qlt_state != FCT_STATE_ONLINE) {
1848 break;
1849 }
1850 if ((*bufsizep) < sizeof (fct_port_link_status_t)) {
1851 EL(qlt, "FC_TGT_PORT_RLS bufsizep=%xh < "
1852 "fct_port_link_status_t=%xh\n", *bufsizep,
1853 sizeof (fct_port_link_status_t));
1854 ret = FCT_FAILURE;
1855 break;
1856 }
1857 /* send mailbox command to get link status */
1858 mcp = qlt_alloc_mailbox_command(qlt, 156);
1859 if (mcp == NULL) {
1860 EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
1861 ret = FCT_ALLOC_FAILURE;
1862 break;
1863 }
1864
1865 /* GET LINK STATUS count */
1866 mcp->to_fw[0] = MBC_GET_STATUS_COUNTS;
1867 mcp->to_fw[8] = 156/4;
1868 mcp->to_fw_mask |= BIT_1 | BIT_8;
1869 mcp->from_fw_mask |= BIT_1 | BIT_2;
1870
1871 ret = qlt_mailbox_command(qlt, mcp);
1872 if (ret != QLT_SUCCESS) {
1873 EL(qlt, "qlt_mbox_command=6dh status=%llxh\n", ret);
1874 qlt_free_mailbox_command(qlt, mcp);
1875 break;
1876 }
1877 qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1878
1879 p = mcp->dbuf->db_sglist[0].seg_addr;
1880 link_status = (fct_port_link_status_t *)buf;
1881 link_status->LinkFailureCount = LE_32(*((uint32_t *)p));
1882 link_status->LossOfSyncCount = LE_32(*((uint32_t *)(p + 4)));
1883 link_status->LossOfSignalsCount = LE_32(*((uint32_t *)(p + 8)));
1884 link_status->PrimitiveSeqProtocolErrorCount =
1885 LE_32(*((uint32_t *)(p + 12)));
1886 link_status->InvalidTransmissionWordCount =
1887 LE_32(*((uint32_t *)(p + 16)));
1888 link_status->InvalidCRCCount =
1889 LE_32(*((uint32_t *)(p + 20)));
1890
1891 qlt_free_mailbox_command(qlt, mcp);
1892 break;
1893 default:
1894 EL(qlt, "Unknown cmd=%xh\n", cmd);
1895 ret = FCT_FAILURE;
1896 break;
1897 }
1898 return (ret);
1899 }
1900
1901 fct_status_t
1902 qlt_port_start(caddr_t arg)
1903 {
1904 qlt_state_t *qlt = (qlt_state_t *)arg;
1905 fct_local_port_t *port;
1906 fct_dbuf_store_t *fds;
1907 fct_status_t ret;
1908
1909 if (qlt_dmem_init(qlt) != QLT_SUCCESS) {
1910 return (FCT_FAILURE);
1911 }
1912
1913 /* Initialize the ddi_dma_handle free pool */
1914 qlt_dma_handle_pool_init(qlt);
1915
1916 port = (fct_local_port_t *)fct_alloc(FCT_STRUCT_LOCAL_PORT, 0, 0);
1917 if (port == NULL) {
1918 goto qlt_pstart_fail_1;
1919 }
1920 fds = (fct_dbuf_store_t *)fct_alloc(FCT_STRUCT_DBUF_STORE, 0, 0);
1921 if (fds == NULL) {
1922 goto qlt_pstart_fail_2;
1923 }
1924 qlt->qlt_port = port;
1925 fds->fds_alloc_data_buf = qlt_dmem_alloc;
1926 fds->fds_free_data_buf = qlt_dmem_free;
1927 fds->fds_setup_dbuf = qlt_dma_setup_dbuf;
1928 fds->fds_teardown_dbuf = qlt_dma_teardown_dbuf;
1929 fds->fds_max_sgl_xfer_len = QLT_DMA_SG_LIST_LENGTH * MMU_PAGESIZE;
1930 fds->fds_copy_threshold = (uint32_t)MMU_PAGESIZE;
1931 fds->fds_fca_private = (void *)qlt;
1932 /*
1933 * Since we keep everything in the state struct and dont allocate any
1934 * port private area, just use that pointer to point to the
1935 * state struct.
1936 */
1937 port->port_fca_private = qlt;
1938 port->port_fca_abort_timeout = 5 * 1000; /* 5 seconds */
1939 bcopy(qlt->nvram->node_name, port->port_nwwn, 8);
1940 bcopy(qlt->nvram->port_name, port->port_pwwn, 8);
1941 fct_wwn_to_str(port->port_nwwn_str, port->port_nwwn);
1942 fct_wwn_to_str(port->port_pwwn_str, port->port_pwwn);
1943 port->port_default_alias = qlt->qlt_port_alias;
1944 port->port_pp = qlt_pp;
1945 port->port_fds = fds;
1946 port->port_max_logins = QLT_MAX_LOGINS;
1947 port->port_max_xchges = QLT_MAX_XCHGES;
1948 port->port_fca_fcp_cmd_size = sizeof (qlt_cmd_t);
1949 port->port_fca_rp_private_size = sizeof (qlt_remote_port_t);
1950 port->port_fca_sol_els_private_size = sizeof (qlt_cmd_t);
1951 port->port_fca_sol_ct_private_size = sizeof (qlt_cmd_t);
1952 port->port_get_link_info = qlt_get_link_info;
1953 port->port_register_remote_port = qlt_register_remote_port;
1954 port->port_deregister_remote_port = qlt_deregister_remote_port;
1955 port->port_send_cmd = qlt_send_cmd;
1956 port->port_xfer_scsi_data = qlt_xfer_scsi_data;
1957 port->port_send_cmd_response = qlt_send_cmd_response;
1958 port->port_abort_cmd = qlt_abort_cmd;
1959 port->port_ctl = qlt_ctl;
1960 port->port_flogi_xchg = qlt_do_flogi;
1961 port->port_populate_hba_details = qlt_populate_hba_fru_details;
1962 port->port_info = qlt_info;
1963 port->port_fca_version = FCT_FCA_MODREV_1;
1964
1965 if ((ret = fct_register_local_port(port)) != FCT_SUCCESS) {
1966 EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1967 goto qlt_pstart_fail_2_5;
1968 }
1969
1970 EL(qlt, "Qlogic qlt(%d) "
1971 "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x:"
1972 "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
1973 qlt->instance,
1974 qlt->nvram->port_name[0],
1975 qlt->nvram->port_name[1],
1976 qlt->nvram->port_name[2],
1977 qlt->nvram->port_name[3],
1978 qlt->nvram->port_name[4],
1979 qlt->nvram->port_name[5],
1980 qlt->nvram->port_name[6],
1981 qlt->nvram->port_name[7],
1982 qlt->nvram->node_name[0],
1983 qlt->nvram->node_name[1],
1984 qlt->nvram->node_name[2],
1985 qlt->nvram->node_name[3],
1986 qlt->nvram->node_name[4],
1987 qlt->nvram->node_name[5],
1988 qlt->nvram->node_name[6],
1989 qlt->nvram->node_name[7]);
1990
1991 return (QLT_SUCCESS);
1992 #if 0
1993 qlt_pstart_fail_3:
1994 (void) fct_deregister_local_port(port);
1995 #endif
1996 qlt_pstart_fail_2_5:
1997 fct_free(fds);
1998 qlt_pstart_fail_2:
1999 fct_free(port);
2000 qlt->qlt_port = NULL;
2001 qlt_pstart_fail_1:
2002 qlt_dma_handle_pool_fini(qlt);
2003 qlt_dmem_fini(qlt);
2004 return (QLT_FAILURE);
2005 }
2006
2007 fct_status_t
2008 qlt_port_stop(caddr_t arg)
2009 {
2010 qlt_state_t *qlt = (qlt_state_t *)arg;
2011 fct_status_t ret;
2012
2013 if ((ret = fct_deregister_local_port(qlt->qlt_port)) != FCT_SUCCESS) {
2014 EL(qlt, "fct_register_local_port status=%llxh\n", ret);
2015 return (QLT_FAILURE);
2016 }
2017 fct_free(qlt->qlt_port->port_fds);
2018 fct_free(qlt->qlt_port);
2019 qlt_dma_handle_pool_fini(qlt);
2020 qlt->qlt_port = NULL;
2021 qlt_dmem_fini(qlt);
2022 return (QLT_SUCCESS);
2023 }
2024
2025 /*
2026 * Called by framework to init the HBA.
2027 * Can be called in the middle of I/O. (Why ??)
2028 * Should make sure sane state both before and after the initialization
2029 */
2030 fct_status_t
2031 qlt_port_online(qlt_state_t *qlt)
2032 {
2033 uint64_t da;
2034 int instance, i, j;
2035 fct_status_t ret;
2036 uint16_t rcount;
2037 caddr_t icb;
2038 mbox_cmd_t *mcp;
2039 uint8_t *elsbmp;
2040
2041 instance = ddi_get_instance(qlt->dip);
2042
2043 /* XXX Make sure a sane state */
2044
2045 if ((ret = qlt_download_fw(qlt)) != QLT_SUCCESS) {
2046 cmn_err(CE_NOTE, "qlt(%d): reset chip failed %llx",
2047 qlt->instance, (long long)ret);
2048 return (ret);
2049 }
2050
2051 bzero(qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE);
2052
2053 /* Get resource count */
2054 REG_WR16(qlt, REG_MBOX(0), MBC_GET_RESOURCE_COUNTS);
2055 ret = qlt_raw_mailbox_command(qlt);
2056 rcount = REG_RD16(qlt, REG_MBOX(3));
2057 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2058 if (ret != QLT_SUCCESS) {
2059 EL(qlt, "qlt_raw_mailbox_command=42h status=%llxh\n", ret);
2060 return (ret);
2061 }
2062
2063 /* Enable PUREX */
2064 REG_WR16(qlt, REG_MBOX(0), MBC_SET_ADDITIONAL_FIRMWARE_OPT);
2065 REG_WR16(qlt, REG_MBOX(1), OPT_PUREX_ENABLE);
2066 REG_WR16(qlt, REG_MBOX(2), 0x0);
2067 REG_WR16(qlt, REG_MBOX(3), 0x0);
2070 if (ret != QLT_SUCCESS) {
2071 EL(qlt, "qlt_raw_mailbox_command=38h status=%llxh\n", ret);
2072 cmn_err(CE_NOTE, "Enable PUREX failed");
2073 return (ret);
2074 }
2075
2076 /* Pass ELS bitmap to fw */
2077 REG_WR16(qlt, REG_MBOX(0), MBC_SET_PARAMETERS);
2078 REG_WR16(qlt, REG_MBOX(1), PARAM_TYPE(PUREX_ELS_CMDS));
2079 elsbmp = (uint8_t *)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET;
2080 bzero(elsbmp, 32);
2081 da = qlt->queue_mem_cookie.dmac_laddress;
2082 da += MBOX_DMA_MEM_OFFSET;
2083 REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
2084 REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
2085 REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
2086 REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
2087 SETELSBIT(elsbmp, ELS_OP_PLOGI);
2088 SETELSBIT(elsbmp, ELS_OP_LOGO);
2089 SETELSBIT(elsbmp, ELS_OP_ABTX);
2090 /* SETELSBIT(elsbmp, ELS_OP_ECHO); till fct handles it */
2091 SETELSBIT(elsbmp, ELS_OP_PRLI);
2092 SETELSBIT(elsbmp, ELS_OP_PRLO);
2093 SETELSBIT(elsbmp, ELS_OP_SCN);
2094 SETELSBIT(elsbmp, ELS_OP_TPRLO);
2095 SETELSBIT(elsbmp, ELS_OP_PDISC);
2096 SETELSBIT(elsbmp, ELS_OP_ADISC);
2097 SETELSBIT(elsbmp, ELS_OP_RSCN);
2098 SETELSBIT(elsbmp, ELS_OP_RNID);
2099 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, MBOX_DMA_MEM_OFFSET, 32,
2100 DDI_DMA_SYNC_FORDEV);
2101 ret = qlt_raw_mailbox_command(qlt);
2102 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2103 if (ret != QLT_SUCCESS) {
2104 EL(qlt, "qlt_raw_mailbox_command=59h status=llxh\n", ret);
2105 cmn_err(CE_NOTE, "Set ELS Bitmap failed ret=%llx, "
2106 "elsbmp0=%x elabmp1=%x", (long long)ret, elsbmp[0],
2107 elsbmp[1]);
2108 return (ret);
2109 }
2110
2111 /* Init queue pointers */
2112 if (qlt->qlt_mq_enabled == 1) {
2113 uint16_t qi;
2114
2115 for (qi = 0; qi < MQ_MAX_QUEUES; qi++) {
2116 MQBAR_WR32(qlt,
2117 (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_IN, 0);
2118 MQBAR_WR32(qlt,
2119 (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_OUT, 0);
2120 MQBAR_WR32(qlt,
2121 (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_IN, 0);
2122 MQBAR_WR32(qlt,
2123 (qi * MQBAR_REG_OFFSET) +
2124 MQBAR_RESP_OUT, 0);
2125 }
2126 } else {
2127 REG_WR32(qlt, REG_REQ_IN_PTR, 0);
2128 REG_WR32(qlt, REG_REQ_OUT_PTR, 0);
2129 REG_WR32(qlt, REG_RESP_IN_PTR, 0);
2130 REG_WR32(qlt, REG_RESP_OUT_PTR, 0);
2131 }
2132
2133 if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
2134 REG_WR32(qlt, REG_PREQ_IN_PTR, 0);
2135 REG_WR32(qlt, REG_PREQ_OUT_PTR, 0);
2136 REG_WR32(qlt, REG_ATIO_IN_PTR, 0);
2137 REG_WR32(qlt, REG_ATIO_OUT_PTR, 0);
2138 }
2139 qlt->mq_req[0].mq_ndx_to_fw = qlt->mq_req[0].mq_ndx_from_fw = 0;
2140 qlt->mq_req[0].mq_available = REQUEST_QUEUE_ENTRIES - 1;
2141
2142 if (qlt->qlt_mq_enabled == 1) {
2143 for (i = 1; i < qlt->qlt_queue_cnt; i++) {
2144 qlt->mq_req[i].mq_ndx_to_fw = 0;
2145 qlt->mq_req[i].mq_ndx_from_fw = 0;
2146 qlt->mq_req[i].mq_available =
2147 REQUEST_QUEUE_MQ_ENTRIES - 1;
2148 }
2149 }
2150 qlt->mq_resp[0].mq_ndx_to_fw = qlt->mq_resp[0].mq_ndx_from_fw = 0;
2151
2152 if (qlt->qlt_mq_enabled == 1) {
2153 caddr_t resp;
2154
2155 for (i = 1; i < qlt->qlt_queue_cnt; i++) {
2156 qlt->mq_resp[i].mq_ndx_to_fw = 0;
2157 qlt->mq_resp[i].mq_ndx_from_fw = 0;
2158 for (j = 0; j < RESPONSE_QUEUE_MQ_ENTRIES; j++) {
2159 resp = &qlt->mq_resp[i].mq_ptr[j << 6];
2160 QMEM_WR32_RSPQ(qlt, i, resp+0x3c, 0xdeadbeef);
2161 }
2162 }
2163 }
2164
2165 for (i = 0; i < ATIO_QUEUE_ENTRIES; i++) {
2166 caddr_t atio;
2167
2168 atio = &qlt->atio_ptr[i << 6];
2169 QMEM_WR32(qlt, atio+0x3c, 0xdeadbeef);
2170 }
2171
2172 qlt->preq_ndx_to_fw = qlt->preq_ndx_from_fw = 0;
2173 qlt->atio_ndx_to_fw = qlt->atio_ndx_from_fw = 0;
2174
2175 /*
2176 * XXX support for tunables. Also should we cache icb ?
2177 */
2178 if ((qlt->qlt_83xx_chip) || (qlt->qlt_81xx_chip) ||
2179 (qlt->qlt_27xx_chip) || ((qlt->qlt_25xx_chip) &&
2180 (qlt->qlt_mq_enabled))) {
2181 /*
2182 * allocate extra 64 bytes for Extended init control block,
2183 * with separation to allow for a minimal MID section.
2184 */
2185 mcp = qlt_alloc_mailbox_command(qlt, 0xE0);
2186 } else {
2187 mcp = qlt_alloc_mailbox_command(qlt, 0x80);
2188 }
2189 if (mcp == NULL) {
2190 EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
2191 return (STMF_ALLOC_FAILURE);
2192 }
2193 icb = (caddr_t)mcp->dbuf->db_sglist[0].seg_addr;
2194 if ((qlt->qlt_83xx_chip) || (qlt->qlt_81xx_chip) ||
2195 (qlt->qlt_27xx_chip) || ((qlt->qlt_25xx_chip) &&
2196 (qlt->qlt_mq_enabled))) {
2197 bzero(icb, 0xE0);
2198 } else {
2199 bzero(icb, 0x80);
2200 }
2201 da = qlt->queue_mem_cookie.dmac_laddress;
2202 DMEM_WR16(qlt, icb, 1); /* Version */
2203 DMEM_WR16(qlt, icb+4, 2112); /* Max frame length */
2204 DMEM_WR16(qlt, icb+6, 16); /* Execution throttle */
2205 DMEM_WR16(qlt, icb+8, rcount); /* Xchg count */
2206 DMEM_WR16(qlt, icb+0x0a, 0x00); /* Hard address (not used) */
2207 bcopy(qlt->qlt_port->port_pwwn, icb+0x0c, 8);
2208 bcopy(qlt->qlt_port->port_nwwn, icb+0x14, 8);
2209 DMEM_WR16(qlt, icb+0x20, 3); /* Login retry count */
2210 DMEM_WR16(qlt, icb+0x24, RESPONSE_QUEUE_ENTRIES);
2211 DMEM_WR16(qlt, icb+0x26, REQUEST_QUEUE_ENTRIES);
2212 if ((!qlt->qlt_83xx_chip) && (!qlt->qlt_81xx_chip) &&
2213 (!qlt->qlt_27xx_chip)) {
2214 DMEM_WR16(qlt, icb+0x28, 100); /* ms of NOS/OLS for Link down */
2215 }
2216 if ((!qlt->qlt_83xx_chip) || (!qlt->qlt_27xx_chip)) {
2217 DMEM_WR16(qlt, icb+0x2a, PRIORITY_QUEUE_ENTRIES);
2218 }
2219 DMEM_WR64(qlt, icb+0x2c, (da+REQUEST_QUEUE_OFFSET));
2220 DMEM_WR64(qlt, icb+0x34, (da+RESPONSE_QUEUE_OFFSET));
2221 if ((!qlt->qlt_83xx_chip) || (!qlt->qlt_27xx_chip)) {
2222 DMEM_WR64(qlt, icb+0x3c, (da+PRIORITY_QUEUE_OFFSET));
2223 }
2224 /* XXX: all hba model atio/resp 0 use vector 0 */
2225 DMEM_WR16(qlt, icb+0x4e, ATIO_QUEUE_ENTRIES);
2226 DMEM_WR64(qlt, icb+0x50, (da+ATIO_QUEUE_OFFSET));
2227 DMEM_WR16(qlt, icb+0x58, 2); /* Interrupt delay Timer */
2228 DMEM_WR16(qlt, icb+0x5a, 4); /* Login timeout (secs) */
2229 if ((qlt->qlt_83xx_chip) || (qlt->qlt_81xx_chip) ||
2230 (qlt->qlt_27xx_chip) || ((qlt->qlt_25xx_chip) &&
2231 (qlt->qlt_mq_enabled))) {
2232 qlt_nvram_81xx_t *qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
2233
2234 /* fw options 1 */
2235 if (qlt->qlt_fcoe_enabled) {
2236 DMEM_WR32(qlt, icb+0x5c, BIT_5 | BIT_4);
2237 } else {
2238 DMEM_WR32(qlt, icb+0x5c,
2239 BIT_11 | BIT_5 | BIT_4 | BIT_2 | BIT_1 | BIT_0);
2240 }
2241 /* fw options 2 */
2242 if (qlt->qlt_mq_enabled) {
2243 if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
2244 if (qlt->qlt_fcoe_enabled) {
2245 DMEM_WR32(qlt, icb+0x60,
2246 BIT_26 | BIT_23 | BIT_22);
2247 } else {
2248 DMEM_WR32(qlt, icb+0x60,
2249 BIT_26 | BIT_23 | BIT_22 | BIT_5);
2250 }
2251 } else {
2252 DMEM_WR32(qlt,
2253 icb+0x60, BIT_26 | BIT_23 | BIT_22 | BIT_5);
2254 }
2255 }
2256
2257 /* fw options 3 */
2258 if (qlt->qlt_fcoe_enabled) {
2259 DMEM_WR32(qlt, icb+0x64, BIT_4);
2260 } else {
2261 DMEM_WR32(qlt, icb+0x64,
2262 BIT_14 | BIT_8 | BIT_7 | BIT_4);
2263 }
2264
2265 if (qlt->qlt_mq_enabled) {
2266 DMEM_WR16(qlt, icb+0x68, 5); /* QoS priority = 5 */
2267 }
2268
2269 DMEM_WR32(qlt, icb+0x70,
2270 qlt81nvr->enode_mac[0] |
2271 (qlt81nvr->enode_mac[1] << 8) |
2272 (qlt81nvr->enode_mac[2] << 16) |
2273 (qlt81nvr->enode_mac[3] << 24));
2274 DMEM_WR16(qlt, icb+0x74,
2275 qlt81nvr->enode_mac[4] |
2276 (qlt81nvr->enode_mac[5] << 8));
2277 } else {
2278 DMEM_WR32(qlt, icb+0x5c, BIT_11 | BIT_5 | BIT_4 |
2279 BIT_2 | BIT_1 | BIT_0);
2280 DMEM_WR32(qlt, icb+0x60, BIT_5);
2281 DMEM_WR32(qlt, icb+0x64, BIT_14 | BIT_8 | BIT_7 |
2282 BIT_4);
2283
2284 /* null MID setup */
2285 DMEM_WR16(qlt, icb+0x80, 1); /* VP count 1 */
2286 }
2287
2288 if (qlt->qlt_fcoe_enabled) {
2289 qlt_dmem_bctl_t *bctl;
2290 uint32_t index;
2291 caddr_t src;
2292 caddr_t dst;
2293 qlt_nvram_81xx_t *qlt81nvr;
2294
2295 dst = icb+0xA0;
2296 qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
2297 src = (caddr_t)&qlt81nvr->ext_blk;
2298 index = sizeof (qlt_ext_icb_81xx_t);
2299
2300 /* Use defaults for cases where we find nothing in NVR */
2301 if ((qlt->qlt_83xx_chip) || (*src == 0)) {
2302 if (*src == 0) {
2303 EL(qlt, "nvram eicb=null\n");
2304 cmn_err(CE_NOTE, "qlt(%d) NVR eicb is zeroed",
2305 instance);
2306 }
2307 qlt81nvr->ext_blk.version[0] = 1;
2308 /*
2309 * not yet, for !FIP firmware at least
2310 *
2311 * qlt81nvr->ext_blk.fcf_vlan_match = 0x81;
2312 */
2313 #ifdef _LITTLE_ENDIAN
2314 qlt81nvr->ext_blk.fcf_vlan_id[0] = 0xEA;
2315 qlt81nvr->ext_blk.fcf_vlan_id[1] = 0x03;
2316 #else
2317 qlt81nvr->ext_blk.fcf_vlan_id[1] = 0xEA;
2318 qlt81nvr->ext_blk.fcf_vlan_id[0] = 0x03;
2319 #endif
2320 }
2321
2322 while (index--) {
2323 *dst++ = *src++;
2324 }
2325
2326 bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
2327 da = bctl->bctl_dev_addr + 0xA0; /* base addr of eicb (phys) */
2328
2329 mcp->to_fw[11] = LSW(LSD(da));
2330 mcp->to_fw[10] = MSW(LSD(da));
2331 mcp->to_fw[13] = LSW(MSD(da));
2332 mcp->to_fw[12] = MSW(MSD(da));
2333 mcp->to_fw[14] = (uint16_t)(sizeof (qlt_ext_icb_81xx_t) &
2334 0xffff);
2335
2336 /* eicb enable */
2337 mcp->to_fw[1] = (uint16_t)(mcp->to_fw[1] | BIT_0);
2338 mcp->to_fw_mask |= BIT_14 | BIT_13 | BIT_12 | BIT_11 | BIT_10 |
2339 BIT_1;
2340 }
2341
2342 qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORDEV);
2343 if (((qlt->qlt_83xx_chip) || (qlt->qlt_81xx_chip) ||
2344 (qlt->qlt_27xx_chip) || ((qlt->qlt_25xx_chip) &&
2345 (qlt->qlt_mq_enabled))) && (qlt->fw_attr & BIT_6)) {
2346 mcp->to_fw[0] = MBC_INITIALIZE_MULTI_ID_FW;
2347 } else {
2348 mcp->to_fw[0] = MBC_INITIALIZE_FIRMWARE;
2349 }
2350
2351 /*
2352 * This is the 1st command after adapter initialize which will
2353 * use interrupts and regular mailbox interface.
2354 */
2355 qlt->qlt_intr_enabled = 1;
2356 qlt->mbox_io_state = MBOX_STATE_READY;
2357 REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
2358 /* Issue mailbox to firmware */
2359 ret = qlt_mailbox_command(qlt, mcp);
2360 if (ret != QLT_SUCCESS) {
2361 EL(qlt, "qlt_mbox_command=48h/60h status=%llxh\n", ret);
2362 cmn_err(CE_NOTE, "qlt(%d) init fw failed %llx, intr status %x",
2363 instance, (long long)ret, REG_RD32(qlt, REG_INTR_STATUS));
2364 qlt_free_mailbox_command(qlt, mcp);
2365 return (ret);
2366 }
2367
2368 mcp->to_fw_mask = BIT_0;
2369 mcp->from_fw_mask = BIT_0 | BIT_1;
2370 mcp->to_fw[0] = 0x28;
2371 ret = qlt_mailbox_command(qlt, mcp);
2372 if (ret != QLT_SUCCESS) {
2373 EL(qlt, "qlt_mbox_command=28h status=%llxh\n", ret);
2374 cmn_err(CE_NOTE, "qlt(%d) get_fw_options %llx", instance,
2375 (long long)ret);
2376 qlt_free_mailbox_command(qlt, mcp);
2377 return (ret);
2378 }
2379
2380 if (qlt->qlt_mq_enabled == 1) {
2381
2382 for (i = 1; i < qlt->qlt_queue_cnt; i++) {
2383 da = qlt->mq_resp[i].queue_mem_mq_cookie.dmac_laddress;
2384
2385 mcp->to_fw_mask = BIT_14 | BIT_13 | BIT_12 | BIT_11 |
2386 BIT_10 | BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_5 |
2387 BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0;
2388 mcp->from_fw_mask = BIT_0 | BIT_1;
2389
2390 /* msix vector setup */
2391 mcp->to_fw[14] = (uint16_t)(i);
2392
2393 mcp->to_fw[13] = 0;
2394 mcp->to_fw[12] = 0;
2395 mcp->to_fw[11] = 0;
2396 mcp->to_fw[10] = 0;
2397 mcp->to_fw[9] = 0;
2398 mcp->to_fw[8] = 0;
2399 mcp->to_fw[7] = LSW(MSD(da));
2400 mcp->to_fw[6] = MSW(MSD(da));
2401 mcp->to_fw[5] = RESPONSE_QUEUE_MQ_ENTRIES;
2402 mcp->to_fw[4] = (uint16_t)(i);
2403 mcp->to_fw[3] = LSW(LSD(da));
2404 mcp->to_fw[2] = MSW(LSD(da));
2405 mcp->to_fw[1] = BIT_6 | BIT_1;
2406 mcp->to_fw[0] = 0x1F;
2407 ret = qlt_mailbox_command(qlt, mcp);
2408
2409 if (ret != QLT_SUCCESS) {
2410 EL(qlt, "qlt_mbox_command=1fh status=%llxh\n",
2411 ret);
2412 cmn_err(CE_NOTE, "qlt(%d) queue manage %llx",
2413 instance, (long long)ret);
2414 qlt_free_mailbox_command(qlt, mcp);
2415 return (ret);
2416 }
2417
2418 da = qlt->mq_req[i].queue_mem_mq_cookie.dmac_laddress;
2419
2420 mcp->to_fw_mask = BIT_14 | BIT_13 | BIT_12 | BIT_11 |
2421 BIT_10 | BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_5 |
2422 BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0;
2423 mcp->from_fw_mask = BIT_0 | BIT_1;
2424
2425 /*
2426 * msix vector does not apply for request queue create
2427 */
2428 mcp->to_fw[14] = 2;
2429 mcp->to_fw[13] = 0;
2430 mcp->to_fw[12] = 4;
2431 mcp->to_fw[11] = 0;
2432 mcp->to_fw[10] = (uint16_t)(i);
2433 mcp->to_fw[9] = 0;
2434 mcp->to_fw[8] = 0;
2435 mcp->to_fw[7] = LSW(MSD(da));
2436 mcp->to_fw[6] = MSW(MSD(da));
2437 mcp->to_fw[5] = REQUEST_QUEUE_MQ_ENTRIES;
2438 mcp->to_fw[4] = (uint16_t)(i);
2439 mcp->to_fw[3] = LSW(LSD(da));
2440 mcp->to_fw[2] = MSW(LSD(da));
2441 mcp->to_fw[1] = BIT_6;
2442 mcp->to_fw[0] = 0x1F;
2443 ret = qlt_mailbox_command(qlt, mcp);
2444
2445 if (ret != QLT_SUCCESS) {
2446 EL(qlt, "qlt_mbox_command=1fh status=%llxh\n",
2447 ret);
2448 cmn_err(CE_NOTE, "qlt(%d) queue manage %llx",
2449 instance, (long long)ret);
2450 qlt_free_mailbox_command(qlt, mcp);
2451 return (ret);
2452 }
2453 }
2454 }
2455
2456 /*
2457 * Report FW versions for 81xx - MPI rev is useful
2458 */
2459 /* if ((qlt->qlt_83xx_chip) || (qlt->qlt_81xx_chip)) { */
2460 if (qlt->qlt_fcoe_enabled) {
2461 mcp->to_fw_mask = BIT_0;
2462 mcp->from_fw_mask = BIT_11 | BIT_10 | BIT_6 | BIT_3 | BIT_2 |
2463 BIT_1 | BIT_0;
2464
2465 mcp->to_fw[0] = MBC_ABOUT_FIRMWARE;
2466 ret = qlt_mailbox_command(qlt, mcp);
2467 if (ret != QLT_SUCCESS) {
2468 EL(qlt, "about fw failed: %llx\n", (long long)ret);
2469 } else {
2470 EL(qlt, "Firmware version %d.%d.%d, MPI: %d.%d.%d\n",
2471 mcp->from_fw[1], mcp->from_fw[2], mcp->from_fw[3],
2472 mcp->from_fw[10] & 0xff, mcp->from_fw[11] >> 8,
2473 mcp->from_fw[11] & 0xff);
2474 EL(qlt, "Firmware Attributes %x[h]\n",
2475 mcp->from_fw[6]);
2476 }
2477 }
2478
2479 qlt_free_mailbox_command(qlt, mcp);
2480
2481 for (i = 0; i < 5; i++) {
2482 qlt->qlt_bufref[i] = 0;
2483 }
2484 qlt->qlt_bumpbucket = 0;
2485 qlt->qlt_pmintry = 0;
2486 qlt->qlt_pmin_ok = 0;
2487
2488 if (ret != QLT_SUCCESS)
2489 return (ret);
2490
2491 return (FCT_SUCCESS);
2492 }
2493
2494 fct_status_t
2495 qlt_port_offline(qlt_state_t *qlt)
2496 {
2497 int retries;
2498 int i;
2499
2500 mutex_enter(&qlt->mbox_lock);
2501
2502 if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
2503 mutex_exit(&qlt->mbox_lock);
2504 goto poff_mbox_done;
2505 }
2506
2507 /* Wait to grab the mailboxes */
2508 for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
2509 retries++) {
2510 cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
2511 if ((retries > 5) ||
2512 (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
2513 qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
2514 mutex_exit(&qlt->mbox_lock);
2515 goto poff_mbox_done;
2516 }
2517 }
2518 qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
2519 mutex_exit(&qlt->mbox_lock);
2520 poff_mbox_done:;
2521 qlt->intr_sneak_counter = 10;
2522 mutex_enter(&qlt->intr_lock);
2523 if (qlt->qlt_mq_enabled == 1) {
2524 for (i = 1; i < qlt->qlt_queue_cnt; i++) {
2525 mutex_enter(&qlt->mq_resp[i].mq_lock);
2526 }
2527 }
2528 (void) qlt_reset_chip(qlt);
2529 drv_usecwait(20);
2530 qlt->intr_sneak_counter = 0;
2531 if (qlt->qlt_mq_enabled == 1) {
2532 for (i = 1; i < qlt->qlt_queue_cnt; i++) {
2533 mutex_exit(&qlt->mq_resp[i].mq_lock);
2534 }
2535 }
2536 mutex_exit(&qlt->intr_lock);
2537
2538 return (FCT_SUCCESS);
2539 }
2540
2541 static fct_status_t
2542 qlt_get_link_info(fct_local_port_t *port, fct_link_info_t *li)
2543 {
2544 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
2545 mbox_cmd_t *mcp;
2546 fct_status_t fc_ret;
2547 fct_status_t ret;
2548 clock_t et;
2549
2550 et = ddi_get_lbolt() + drv_usectohz(5000000);
2551 mcp = qlt_alloc_mailbox_command(qlt, 0);
2552 link_info_retry:
2553 mcp->to_fw[0] = MBC_GET_ID;
2554 mcp->to_fw[9] = 0;
2555 mcp->to_fw_mask |= BIT_0 | BIT_9;
2556 mcp->from_fw_mask |= BIT_0 | BIT_1 | BIT_2 | BIT_3 | BIT_6 | BIT_7;
2557 /* Issue mailbox to firmware */
2558 ret = qlt_mailbox_command(qlt, mcp);
2559 if (ret != QLT_SUCCESS) {
2560 EL(qlt, "qlt_mbox_command=20h status=%llxh\n", ret);
2561 if ((mcp->from_fw[0] == 0x4005) &&
2562 ((mcp->from_fw[1] == 7) || (mcp->from_fw[1] == 0x1b))) {
2563 /* Firmware is not ready */
2564 if (ddi_get_lbolt() < et) {
2565 delay(drv_usectohz(50000));
2566 goto link_info_retry;
2567 }
2568 }
2569 EL(qlt, "GET ID mbox failed, ret=%llx mb0=%x mb1=%x",
2570 ret, mcp->from_fw[0], mcp->from_fw[1]);
2571 stmf_trace(qlt->qlt_port_alias, "GET ID mbox failed, ret=%llx "
2572 "mb0=%x mb1=%x", ret, mcp->from_fw[0], mcp->from_fw[1]);
2573 fc_ret = FCT_FAILURE;
2574 } else {
2575 li->portid = ((uint32_t)(mcp->from_fw[2])) |
2576 (((uint32_t)(mcp->from_fw[3])) << 16);
2577
2578 li->port_speed = qlt->link_speed;
2579 switch (mcp->from_fw[6]) {
2580 case 1:
2581 li->port_topology = PORT_TOPOLOGY_PUBLIC_LOOP;
2582 li->port_fca_flogi_done = 1;
2583 break;
2584 case 0:
2585 li->port_topology = PORT_TOPOLOGY_PRIVATE_LOOP;
2586 li->port_no_fct_flogi = 1;
2587 break;
2588 case 3:
2589 li->port_topology = PORT_TOPOLOGY_FABRIC_PT_TO_PT;
2590 li->port_fca_flogi_done = 1;
2591 break;
2592 case 2: /*FALLTHROUGH*/
2593 case 4:
2594 li->port_topology = PORT_TOPOLOGY_PT_TO_PT;
2595 li->port_fca_flogi_done = 1;
2596 break;
2597 default:
2598 li->port_topology = PORT_TOPOLOGY_UNKNOWN;
2599 EL(qlt, "Unknown topology=%xh\n", mcp->from_fw[6]);
2600 }
2601 qlt->cur_topology = li->port_topology;
2602 fc_ret = FCT_SUCCESS;
2603
2604 EL(qlt, "MBC_GET_ID done, Topology=%x, portid=%xh, "
2605 "port speed=%xh\n", li->port_topology, li->portid,
2606 li->port_speed);
2607 }
2608 qlt_free_mailbox_command(qlt, mcp);
2609
2610 if ((fc_ret == FCT_SUCCESS) && (li->port_fca_flogi_done)) {
2611 mcp = qlt_alloc_mailbox_command(qlt, 64);
2612 mcp->to_fw[0] = MBC_GET_PORT_DATABASE;
2613 mcp->to_fw[1] = 0x7FE;
2614 mcp->to_fw[9] = 0;
2615 mcp->to_fw[10] = 0;
2616 mcp->to_fw_mask |= BIT_0 | BIT_1 | BIT_9 | BIT_10;
2617 fc_ret = qlt_mailbox_command(qlt, mcp);
2618 if (fc_ret != QLT_SUCCESS) {
2619 EL(qlt, "qlt_mbox_command=64h status=%llxh\n",
2620 fc_ret);
2621 stmf_trace(qlt->qlt_port_alias, "Attempt to get port "
2622 "database for F_port failed, ret = %llx", fc_ret);
2623 } else {
2624 uint8_t *p;
2625
2626 qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
2627 p = mcp->dbuf->db_sglist[0].seg_addr;
2628 bcopy(p + 0x18, li->port_rpwwn, 8);
2629 bcopy(p + 0x20, li->port_rnwwn, 8);
2630 EL(qlt, "qlt_mbox_command=64h, GET_PORT_DATABASE "
2631 "complete\n");
2632 }
2633 qlt_free_mailbox_command(qlt, mcp);
2634 }
2635 return (fc_ret);
2636 }
2637
2638 static int
2639 qlt_open(dev_t *devp, int flag, int otype, cred_t *credp)
2640 {
2641 int instance;
2642 qlt_state_t *qlt;
2643
2644 if (otype != OTYP_CHR) {
2645 return (EINVAL);
2646 }
2647
2648 /*
2649 * Since this is for debugging only, only allow root to issue ioctl now
2650 */
2651 if (drv_priv(credp)) {
2726 * uploaded firmware is not supported and is provided here for test
2727 * purposes only.
2728 */
2729 /* ARGSUSED */
2730 static int
2731 qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
2732 cred_t *credp, int *rval)
2733 {
2734 qlt_state_t *qlt;
2735 int ret = 0;
2736 #ifdef _LITTLE_ENDIAN
2737 int i;
2738 #endif
2739 stmf_iocdata_t *iocd;
2740 void *ibuf = NULL;
2741 void *obuf = NULL;
2742 uint32_t *intp;
2743 qlt_fw_info_t *fwi;
2744 mbox_cmd_t *mcp;
2745 fct_status_t st;
2746 char info[80];
2747 fct_status_t ret2;
2748
2749 if (drv_priv(credp) != 0)
2750 return (EPERM);
2751
2752 qlt = ddi_get_soft_state(qlt_state, (int32_t)getminor(dev));
2753 ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
2754 if (ret)
2755 return (ret);
2756 iocd->stmf_error = 0;
2757
2758 switch (cmd) {
2759 case QLT_IOCTL_FETCH_FWDUMP:
2760 if (iocd->stmf_obuf_size < QLT_FWDUMP_BUFSIZE) {
2761 EL(qlt, "FETCH_FWDUMP obuf_size=%d < %d\n",
2762 iocd->stmf_obuf_size, QLT_FWDUMP_BUFSIZE);
2763 ret = EINVAL;
2764 break;
2765 }
2766 mutex_enter(&qlt->qlt_ioctl_lock);
2780 }
2781 if (qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER) {
2782 mutex_exit(&qlt->qlt_ioctl_lock);
2783 ret = EEXIST;
2784 EL(qlt, "fwdump already fetched\n");
2785 iocd->stmf_error = QLTIO_ALREADY_FETCHED;
2786 break;
2787 }
2788 bcopy(qlt->qlt_fwdump_buf, obuf, QLT_FWDUMP_BUFSIZE);
2789 qlt->qlt_ioctl_flags |= QLT_FWDUMP_FETCHED_BY_USER;
2790 mutex_exit(&qlt->qlt_ioctl_lock);
2791
2792 break;
2793
2794 case QLT_IOCTL_TRIGGER_FWDUMP:
2795 if (qlt->qlt_state != FCT_STATE_ONLINE) {
2796 ret = EACCES;
2797 iocd->stmf_error = QLTIO_NOT_ONLINE;
2798 break;
2799 }
2800 (void) snprintf(info, 80, "qlt_ioctl: qlt-%p, "
2801 "user triggered FWDUMP with RFLAG_RESET", (void *)qlt);
2802 info[79] = 0;
2803 if ((ret2 = fct_port_shutdown(qlt->qlt_port,
2804 STMF_RFLAG_USER_REQUEST | STMF_RFLAG_RESET |
2805 STMF_RFLAG_COLLECT_DEBUG_DUMP, info)) != FCT_SUCCESS) {
2806 EL(qlt, "TRIGGER_FWDUMP fct_port_shutdown status="
2807 "%llxh\n", ret2);
2808 ret = EIO;
2809 }
2810 break;
2811 case QLT_IOCTL_UPLOAD_FW:
2812 if ((iocd->stmf_ibuf_size < 1024) ||
2813 (iocd->stmf_ibuf_size & 3)) {
2814 EL(qlt, "UPLOAD_FW ibuf_size=%d < 1024\n",
2815 iocd->stmf_ibuf_size);
2816 ret = EINVAL;
2817 iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
2818 break;
2819 }
2820 intp = (uint32_t *)ibuf;
2821 #ifdef _LITTLE_ENDIAN
2822 for (i = 0; (i << 2) < iocd->stmf_ibuf_size; i++) {
2823 intp[i] = BSWAP_32(intp[i]);
2824 }
2825 #endif
2826 if (((intp[3] << 2) >= iocd->stmf_ibuf_size) ||
2827 (((intp[intp[3] + 3] + intp[3]) << 2) !=
2828 iocd->stmf_ibuf_size)) {
2829 EL(qlt, "UPLOAD_FW fw_size=%d >= %d\n", intp[3] << 2,
2830 iocd->stmf_ibuf_size);
2831 ret = EINVAL;
2832 iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
2833 break;
2834 }
2835 if ((qlt->qlt_81xx_chip && ((intp[8] & 8) == 0)) ||
2836 (qlt->qlt_25xx_chip && ((intp[8] & 4) == 0)) ||
2837 (!qlt->qlt_25xx_chip && !qlt->qlt_81xx_chip &&
2838 !qlt->qlt_83xx_chip && !qlt->qlt_27xx_chip &&
2839 ((intp[8] & 3) == 0))) {
2840 EL(qlt, "UPLOAD_FW fw_type=%d\n", intp[8]);
2841 ret = EACCES;
2842 iocd->stmf_error = QLTIO_INVALID_FW_TYPE;
2843 break;
2844 }
2845
2846 /* Everything looks ok, lets copy this firmware */
2847 if (qlt->fw_code01) {
2848 kmem_free(qlt->fw_code01, (qlt->fw_length01 +
2849 qlt->fw_length02) << 2);
2850 qlt->fw_code01 = NULL;
2851 } else {
2852 atomic_inc_32(&qlt_loaded_counter);
2853 }
2854 qlt->fw_length01 = intp[3];
2855 qlt->fw_code01 = (uint32_t *)kmem_alloc(iocd->stmf_ibuf_size,
2856 KM_SLEEP);
2857 bcopy(intp, qlt->fw_code01, iocd->stmf_ibuf_size);
2858 qlt->fw_addr01 = intp[2];
2878 break;
2879 }
2880 fwi = (qlt_fw_info_t *)obuf;
2881 if (qlt->qlt_stay_offline) {
2882 fwi->fwi_stay_offline = 1;
2883 }
2884 if (qlt->qlt_state == FCT_STATE_ONLINE) {
2885 fwi->fwi_port_active = 1;
2886 }
2887 fwi->fwi_active_major = qlt->fw_major;
2888 fwi->fwi_active_minor = qlt->fw_minor;
2889 fwi->fwi_active_subminor = qlt->fw_subminor;
2890 fwi->fwi_active_attr = qlt->fw_attr;
2891 if (qlt->fw_code01) {
2892 fwi->fwi_fw_uploaded = 1;
2893 fwi->fwi_loaded_major = (uint16_t)qlt->fw_code01[4];
2894 fwi->fwi_loaded_minor = (uint16_t)qlt->fw_code01[5];
2895 fwi->fwi_loaded_subminor = (uint16_t)qlt->fw_code01[6];
2896 fwi->fwi_loaded_attr = (uint16_t)qlt->fw_code01[7];
2897 }
2898 if (qlt->qlt_27xx_chip) {
2899 fwi->fwi_default_major = (uint16_t)fw2700_code01[4];
2900 fwi->fwi_default_minor = (uint16_t)fw2700_code01[5];
2901 fwi->fwi_default_subminor = (uint16_t)fw2700_code01[6];
2902 fwi->fwi_default_attr = (uint16_t)fw2700_code01[7];
2903 } else if (qlt->qlt_83xx_chip) {
2904 fwi->fwi_default_major = (uint16_t)fw8300fc_code01[4];
2905 fwi->fwi_default_minor = (uint16_t)fw8300fc_code01[5];
2906 fwi->fwi_default_subminor =
2907 (uint16_t)fw8300fc_code01[6];
2908 fwi->fwi_default_attr = (uint16_t)fw8300fc_code01[7];
2909 } else if (qlt->qlt_81xx_chip) {
2910 fwi->fwi_default_major = (uint16_t)fw8100_code01[4];
2911 fwi->fwi_default_minor = (uint16_t)fw8100_code01[5];
2912 fwi->fwi_default_subminor = (uint16_t)fw8100_code01[6];
2913 fwi->fwi_default_attr = (uint16_t)fw8100_code01[7];
2914 } else if (qlt->qlt_25xx_chip) {
2915 fwi->fwi_default_major = (uint16_t)fw2500_code01[4];
2916 fwi->fwi_default_minor = (uint16_t)fw2500_code01[5];
2917 fwi->fwi_default_subminor = (uint16_t)fw2500_code01[6];
2918 fwi->fwi_default_attr = (uint16_t)fw2500_code01[7];
2919 } else {
2920 fwi->fwi_default_major = (uint16_t)fw2400_code01[4];
2921 fwi->fwi_default_minor = (uint16_t)fw2400_code01[5];
2922 fwi->fwi_default_subminor = (uint16_t)fw2400_code01[6];
2923 fwi->fwi_default_attr = (uint16_t)fw2400_code01[7];
2924 }
2925 break;
2926
2927 case QLT_IOCTL_STAY_OFFLINE:
2928 if (!iocd->stmf_ibuf_size) {
2929 EL(qlt, "STAY_OFFLINE ibuf_size=%d\n",
2964 EL(qlt, "IOCTL_MBOX status=%xh\n", st);
2965 ret = EIO;
2966 switch (st) {
2967 case QLT_MBOX_NOT_INITIALIZED:
2968 iocd->stmf_error = QLTIO_MBOX_NOT_INITIALIZED;
2969 break;
2970 case QLT_MBOX_BUSY:
2971 iocd->stmf_error = QLTIO_CANT_GET_MBOXES;
2972 break;
2973 case QLT_MBOX_TIMEOUT:
2974 iocd->stmf_error = QLTIO_MBOX_TIMED_OUT;
2975 break;
2976 case QLT_MBOX_ABORTED:
2977 iocd->stmf_error = QLTIO_MBOX_ABORTED;
2978 break;
2979 }
2980 }
2981 break;
2982
2983 case QLT_IOCTL_ELOG:
2984 EL(qlt, "Not support yet, ioctl-%xh\n", cmd);
2985 break;
2986
2987 default:
2988 EL(qlt, "Unknown ioctl-%xh\n", cmd);
2989 ret = ENOTTY;
2990 }
2991
2992 if (ret == 0) {
2993 ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
2994 } else if (iocd->stmf_error) {
2995 (void) stmf_copyout_iocdata(data, mode, iocd, obuf);
2996 }
2997 if (obuf) {
2998 kmem_free(obuf, iocd->stmf_obuf_size);
2999 obuf = NULL;
3000 }
3001 if (ibuf) {
3002 kmem_free(ibuf, iocd->stmf_ibuf_size);
3003 ibuf = NULL;
3004 }
3005 kmem_free(iocd, sizeof (stmf_iocdata_t));
3006 return (ret);
3007 }
3008
3009 static fct_status_t
3010 qlt_force_lip(qlt_state_t *qlt)
3011 {
3012 mbox_cmd_t *mcp;
3013 fct_status_t rval;
3014
3015 mcp = qlt_alloc_mailbox_command(qlt, 0);
3016 if (qlt->qlt_fcoe_enabled) {
3017 mcp->to_fw[0] = MBC_PORT_RESET;
3018 } else {
3019 mcp->to_fw[0] = MBC_LIP_FULL_LOGIN;
3020 mcp->to_fw[1] = BIT_4;
3021 mcp->to_fw[3] = 1;
3022 mcp->to_fw_mask |= BIT_1 | BIT_3;
3023 }
3024 rval = qlt_mailbox_command(qlt, mcp);
3025 if (rval != FCT_SUCCESS) {
3026 EL(qlt, "qlt force lip MB failed: rval=%x\n", rval);
3027 } else {
3028 if (mcp->from_fw[0] != QLT_MBX_CMD_SUCCESS) {
3029 QLT_LOG(qlt->qlt_port_alias, "qlt FLIP: fw[0]=%x",
3030 mcp->from_fw[0]);
3031 rval = FCT_FAILURE;
3032 }
3033 }
3034 qlt_free_mailbox_command(qlt, mcp);
3035 return (rval);
3036 }
3037
3038 static void
3039 qlt_ctl(struct fct_local_port *port, int cmd, void *arg)
3040 {
3041 stmf_change_status_t st;
3042 stmf_state_change_info_t *ssci = (stmf_state_change_info_t *)arg;
3043 qlt_state_t *qlt;
3044 fct_status_t ret;
3045
3046 ASSERT((cmd == FCT_CMD_PORT_ONLINE) ||
3047 (cmd == FCT_CMD_PORT_OFFLINE) ||
3048 (cmd == FCT_CMD_FORCE_LIP) ||
3049 (cmd == FCT_ACK_PORT_ONLINE_COMPLETE) ||
3050 (cmd == FCT_ACK_PORT_OFFLINE_COMPLETE));
3051
3052 qlt = (qlt_state_t *)port->port_fca_private;
3053 st.st_completion_status = FCT_SUCCESS;
3054 st.st_additional_info = NULL;
3055
3056 EL(qlt, "port (%p) qlt_state (%xh) cmd (%xh) arg (%p)\n",
3057 port, qlt->qlt_state, cmd, arg);
3058
3059 switch (cmd) {
3060 case FCT_CMD_PORT_ONLINE:
3061 if (qlt->qlt_state == FCT_STATE_ONLINE)
3062 st.st_completion_status = STMF_ALREADY;
3063 else if (qlt->qlt_state != FCT_STATE_OFFLINE)
3064 st.st_completion_status = FCT_FAILURE;
3065 if (st.st_completion_status == FCT_SUCCESS) {
3066 qlt->qlt_state = FCT_STATE_ONLINING;
3067 qlt->qlt_state_not_acked = 1;
3068 st.st_completion_status = qlt_port_online(qlt);
3069 if (st.st_completion_status != STMF_SUCCESS) {
3070 EL(qlt, "PORT_ONLINE status=%xh\n",
3071 st.st_completion_status);
3072 qlt->qlt_state = FCT_STATE_OFFLINE;
3073 qlt->qlt_state_not_acked = 0;
3074 } else {
3075 qlt->qlt_state = FCT_STATE_ONLINE;
3076 }
3077 }
3078 fct_ctl(port->port_lport, FCT_CMD_PORT_ONLINE_COMPLETE, &st);
3112
3113 case FCT_ACK_PORT_OFFLINE_COMPLETE:
3114 qlt->qlt_state_not_acked = 0;
3115 if ((qlt->qlt_change_state_flags & STMF_RFLAG_RESET) &&
3116 (qlt->qlt_stay_offline == 0)) {
3117 if ((ret = fct_port_initialize(port,
3118 qlt->qlt_change_state_flags,
3119 "qlt_ctl FCT_ACK_PORT_OFFLINE_COMPLETE "
3120 "with RLFLAG_RESET")) != FCT_SUCCESS) {
3121 EL(qlt, "fct_port_initialize status=%llxh\n",
3122 ret);
3123 cmn_err(CE_WARN, "qlt_ctl: "
3124 "fct_port_initialize failed, please use "
3125 "stmfstate to start the port-%s manualy",
3126 qlt->qlt_port_alias);
3127 }
3128 }
3129 break;
3130
3131 case FCT_CMD_FORCE_LIP:
3132 if (qlt->qlt_fcoe_enabled) {
3133 EL(qlt, "force lip is an unsupported command "
3134 "for this adapter type\n");
3135 } else {
3136 if (qlt->qlt_state == FCT_STATE_ONLINE) {
3137 *((fct_status_t *)arg) = qlt_force_lip(qlt);
3138 EL(qlt, "forcelip done\n");
3139 }
3140 }
3141 break;
3142
3143 default:
3144 EL(qlt, "unsupport cmd - 0x%02X\n", cmd);
3145 break;
3146 }
3147 }
3148
3149 /* ARGSUSED */
3150 static fct_status_t
3151 qlt_do_flogi(fct_local_port_t *port, fct_flogi_xchg_t *fx)
3152 {
3153 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
3154
3155 EL(qlt, "FLOGI requested not supported\n");
3156 cmn_err(CE_WARN, "qlt: FLOGI requested (not supported)");
3157 return (FCT_FAILURE);
3158 }
3159
3160 /*
3161 * Return a pointer to n entries in the request queue. Assumes that
3162 * request queue lock is held. Does a very short busy wait if
3163 * less/zero entries are available. Retuns NULL if it still cannot
3164 * fullfill the request.
3165 * **CALL qlt_submit_req_entries() BEFORE DROPPING THE LOCK**
3166 */
3167 caddr_t
3168 qlt_get_req_entries(qlt_state_t *qlt, uint32_t n, uint16_t qi)
3169 {
3170 int try = 0;
3171
3172 while (qlt->mq_req[qi].mq_available < n) {
3173 uint32_t val1, val2, val3;
3174
3175 if (qlt->qlt_mq_enabled) {
3176 /* debounce */
3177 val1 = MQBAR_RD32(qlt,
3178 (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_OUT);
3179 val2 = MQBAR_RD32(qlt,
3180 (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_OUT);
3181 val3 = MQBAR_RD32(qlt,
3182 (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_OUT);
3183 } else {
3184 val1 = REG_RD32(qlt, REG_REQ_OUT_PTR);
3185 val2 = REG_RD32(qlt, REG_REQ_OUT_PTR);
3186 val3 = REG_RD32(qlt, REG_REQ_OUT_PTR);
3187 }
3188 if ((val1 != val2) || (val2 != val3))
3189 continue;
3190
3191 qlt->mq_req[qi].mq_ndx_from_fw = val1;
3192 if (qi != 0) {
3193 qlt->mq_req[qi].mq_available =
3194 REQUEST_QUEUE_MQ_ENTRIES - 1 -
3195 ((qlt->mq_req[qi].mq_ndx_to_fw -
3196 qlt->mq_req[qi].mq_ndx_from_fw) &
3197 (REQUEST_QUEUE_MQ_ENTRIES - 1));
3198 } else {
3199 qlt->mq_req[qi].mq_available =
3200 REQUEST_QUEUE_ENTRIES - 1 -
3201 ((qlt->mq_req[qi].mq_ndx_to_fw -
3202 qlt->mq_req[qi].mq_ndx_from_fw) &
3203 (REQUEST_QUEUE_ENTRIES - 1));
3204 }
3205 if (qlt->mq_req[qi].mq_available < n) {
3206 if (try < 2) {
3207 drv_usecwait(100);
3208 try++;
3209 continue;
3210 } else {
3211 stmf_trace(qlt->qlt_port_alias,
3212 "Req Q# %xh is full", qi);
3213 EL(qlt, "Req %xh is full (%d,%d) (%d,%d)\n",
3214 qi, qlt->mq_req[qi].mq_ndx_to_fw,
3215 qlt->mq_req[qi].mq_ndx_from_fw,
3216 n, qlt->mq_req[qi].mq_available);
3217 return (NULL);
3218 }
3219 }
3220 break;
3221 }
3222 /* We dont change anything until the entries are sumitted */
3223 return (&qlt->mq_req[qi].mq_ptr[qlt->mq_req[qi].mq_ndx_to_fw << 6]);
3224 }
3225
3226 /*
3227 * updates the req in ptr to fw. Assumes that req lock is held.
3228 */
3229 void
3230 qlt_submit_req_entries(qlt_state_t *qlt, uint32_t n, uint16_t qi)
3231 {
3232
3233 ASSERT(n >= 1);
3234
3235 qlt->mq_req[qi].mq_ndx_to_fw += n;
3236 if (qi != 0) {
3237 qlt->mq_req[qi].mq_ndx_to_fw &= REQUEST_QUEUE_MQ_ENTRIES - 1;
3238 } else {
3239 qlt->mq_req[qi].mq_ndx_to_fw &= REQUEST_QUEUE_ENTRIES - 1;
3240 }
3241 qlt->mq_req[qi].mq_available -= n;
3242
3243 if (qlt->qlt_mq_enabled) {
3244 MQBAR_WR32(qlt, (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_IN,
3245 qlt->mq_req[qi].mq_ndx_to_fw);
3246 } else {
3247 REG_WR32(qlt, REG_REQ_IN_PTR, qlt->mq_req[0].mq_ndx_to_fw);
3248 }
3249 }
3250
3251 /*
3252 * Return a pointer to n entries in the priority request queue. Assumes that
3253 * priority request queue lock is held. Does a very short busy wait if
3254 * less/zero entries are available. Retuns NULL if it still cannot
3255 * fullfill the request.
3256 * **CALL qlt_submit_preq_entries() BEFORE DROPPING THE LOCK**
3257 */
3258 caddr_t
3259 qlt_get_preq_entries(qlt_state_t *qlt, uint32_t n)
3260 {
3261 int try = 0;
3262 uint32_t req_available = PRIORITY_QUEUE_ENTRIES - 1 -
3263 ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
3264 (PRIORITY_QUEUE_ENTRIES - 1));
3265
3266 while (req_available < n) {
3267 uint32_t val1, val2, val3;
3268 val1 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
3269 val2 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
3270 val3 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
3307 * - A very hardware specific function. Does not touch driver state.
3308 * - Assumes that interrupts are disabled or not there.
3309 * - Expects that the caller makes sure that all activity has stopped
3310 * and its ok now to go ahead and reset the chip. Also the caller
3311 * takes care of post reset damage control.
3312 * - called by initialize adapter() and dump_fw(for reset only).
3313 * - During attach() nothing much is happening and during initialize_adapter()
3314 * the function (caller) does all the housekeeping so that this function
3315 * can execute in peace.
3316 * - Returns 0 on success.
3317 */
3318 static fct_status_t
3319 qlt_reset_chip(qlt_state_t *qlt)
3320 {
3321 int cntr;
3322
3323 EL(qlt, "initiated\n");
3324
3325 /* XXX: Switch off LEDs */
3326
3327 qlt->qlt_intr_enabled = 0;
3328 /* Disable Interrupts */
3329 REG_WR32(qlt, REG_INTR_CTRL, 0);
3330 (void) REG_RD32(qlt, REG_INTR_CTRL);
3331 /* Stop DMA */
3332 REG_WR32(qlt, REG_CTRL_STATUS, DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL);
3333
3334 /* Wait for DMA to be stopped */
3335 cntr = 0;
3336 while (REG_RD32(qlt, REG_CTRL_STATUS) & DMA_ACTIVE_STATUS) {
3337 delay(drv_usectohz(10000)); /* mostly 10ms is 1 tick */
3338 cntr++;
3339 /* 3 sec should be more than enough */
3340 if (cntr == 300)
3341 return (QLT_DMA_STUCK);
3342 }
3343
3344 /* need to ensure no one accesses the hw during the reset 100us */
3345 if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
3346 mutex_enter(&qlt->mbox_lock);
3347 if (qlt->qlt_mq_enabled == 1) {
3348 int i;
3349
3350 for (i = 1; i < qlt->qlt_queue_cnt; i++) {
3351 mutex_enter(&qlt->mq_req[i].mq_lock);
3352 }
3353 }
3354 mutex_enter(&qlt->mq_req[0].mq_lock);
3355 /*
3356 * We need to give time for other threads to finsh their
3357 * interupts (or we need another lock)
3358 */
3359 drv_usecwait(40);
3360 }
3361
3362 /* Reset the Chip */
3363 REG_WR32(qlt, REG_CTRL_STATUS,
3364 DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL | CHIP_SOFT_RESET);
3365
3366 qlt->qlt_link_up = 0;
3367
3368 drv_usecwait(100);
3369
3370 if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
3371 mutex_exit(&qlt->mq_req[0].mq_lock);
3372 if (qlt->qlt_mq_enabled == 1) {
3373 int i;
3374
3375 for (i = 1; i < qlt->qlt_queue_cnt; i++) {
3376 mutex_exit(&qlt->mq_req[i].mq_lock);
3377 }
3378 }
3379 mutex_exit(&qlt->mbox_lock);
3380 }
3381
3382 /* Wait for ROM firmware to initialize (0x0000) in mailbox 0 */
3383 cntr = 0;
3384 while (REG_RD16(qlt, REG_MBOX(0)) != 0) {
3385 delay(drv_usectohz(10000));
3386 cntr++;
3387 /* 3 sec should be more than enough */
3388 if (cntr == 300)
3389 return (QLT_ROM_STUCK);
3390 }
3391 /* Disable Interrupts (Probably not needed) */
3392 REG_WR32(qlt, REG_INTR_CTRL, 0);
3393
3394 return (QLT_SUCCESS);
3395 }
3396
3397 /*
3398 * - Should not be called from Interrupt.
3399 * - A very hardware specific function. Does not touch driver state.
3400 * - Assumes that interrupts are disabled or not there.
3401 * - Expects that the caller makes sure that all activity has stopped
3402 * and its ok now to go ahead and reset the chip. Also the caller
3403 * takes care of post reset damage control.
3404 * - called by initialize adapter() and dump_fw(for reset only).
3405 * - During attach() nothing much is happening and during initialize_adapter()
3406 * the function (caller) does all the housekeeping so that this function
3407 * can execute in peace.
3408 * - Returns 0 on success.
3409 */
3410 static fct_status_t
3411 qlt_download_fw(qlt_state_t *qlt)
3412 {
3413 uint32_t start_addr;
3414 fct_status_t ret;
3415
3416 EL(qlt, "initiated\n");
3417
3418 (void) qlt_reset_chip(qlt);
3419
3420 if (qlt->qlt_81xx_chip) {
3421 qlt_mps_reset(qlt);
3422 }
3423
3424 /* Load the two segments */
3425 if (qlt->fw_code01 != NULL) {
3426 ret = qlt_load_risc_ram(qlt, qlt->fw_code01, qlt->fw_length01,
3427 qlt->fw_addr01);
3428 if (ret == QLT_SUCCESS) {
3429 ret = qlt_load_risc_ram(qlt, qlt->fw_code02,
3430 qlt->fw_length02, qlt->fw_addr02);
3431 }
3432 start_addr = qlt->fw_addr01;
3433 } else if (qlt->qlt_27xx_chip) {
3434 (void) qlt_27xx_get_dmp_template(qlt);
3435 ret = qlt_load_risc_ram(qlt, fw2700_code01,
3436 fw2700_length01, fw2700_addr01);
3437 if (ret == QLT_SUCCESS) {
3438 ret = qlt_load_risc_ram(qlt, fw2700_code02,
3439 fw2700_length02, fw2700_addr02);
3440 }
3441 start_addr = fw2700_addr01;
3442 } else if (qlt->qlt_83xx_chip) {
3443 ret = qlt_load_risc_ram(qlt, fw8300fc_code01,
3444 fw8300fc_length01, fw8300fc_addr01);
3445 if (ret == QLT_SUCCESS) {
3446 ret = qlt_load_risc_ram(qlt, fw8300fc_code02,
3447 fw8300fc_length02, fw8300fc_addr02);
3448 }
3449 start_addr = fw8300fc_addr01;
3450 } else if (qlt->qlt_81xx_chip) {
3451 ret = qlt_load_risc_ram(qlt, fw8100_code01, fw8100_length01,
3452 fw8100_addr01);
3453 if (ret == QLT_SUCCESS) {
3454 ret = qlt_load_risc_ram(qlt, fw8100_code02,
3455 fw8100_length02, fw8100_addr02);
3456 }
3457 start_addr = fw8100_addr01;
3458 } else if (qlt->qlt_25xx_chip) {
3459 ret = qlt_load_risc_ram(qlt, fw2500_code01, fw2500_length01,
3460 fw2500_addr01);
3461 if (ret == QLT_SUCCESS) {
3462 ret = qlt_load_risc_ram(qlt, fw2500_code02,
3463 fw2500_length02, fw2500_addr02);
3464 }
3465 start_addr = fw2500_addr01;
3466 } else {
3467 ret = qlt_load_risc_ram(qlt, fw2400_code01, fw2400_length01,
3468 fw2400_addr01);
3469 if (ret == QLT_SUCCESS) {
3476 EL(qlt, "qlt_load_risc_ram status=%llxh\n", ret);
3477 return (ret);
3478 }
3479
3480 /* Verify Checksum */
3481 REG_WR16(qlt, REG_MBOX(0), MBC_VERIFY_CHECKSUM);
3482 REG_WR16(qlt, REG_MBOX(1), MSW(start_addr));
3483 REG_WR16(qlt, REG_MBOX(2), LSW(start_addr));
3484 ret = qlt_raw_mailbox_command(qlt);
3485 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3486 if (ret != QLT_SUCCESS) {
3487 EL(qlt, "qlt_raw_mailbox_command=7h status=%llxh\n", ret);
3488 return (ret);
3489 }
3490
3491 /* Execute firmware */
3492 REG_WR16(qlt, REG_MBOX(0), MBC_EXECUTE_FIRMWARE);
3493 REG_WR16(qlt, REG_MBOX(1), MSW(start_addr));
3494 REG_WR16(qlt, REG_MBOX(2), LSW(start_addr));
3495 REG_WR16(qlt, REG_MBOX(3), 0);
3496 #ifdef EXTRA_CREDIT
3497 /* enable extra credits (reduces available buffers) */
3498 if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip) ||
3499 (qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
3500 REG_WR16(qlt, REG_MBOX(4), 1);
3501 } else {
3502 REG_WR16(qlt, REG_MBOX(4), 0);
3503 }
3504 #else
3505 REG_WR16(qlt, REG_MBOX(4), 0);
3506 #endif
3507 ret = qlt_raw_mailbox_command(qlt);
3508 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3509 if (ret != QLT_SUCCESS) {
3510 EL(qlt, "qlt_raw_mailbox_command=2h status=%llxh\n", ret);
3511 return (ret);
3512 } else {
3513 if (qlt->qlt_27xx_chip) {
3514 qlt->qlt_27xx_speed = (uint32_t)
3515 (REG_RD16(qlt, REG_MBOX(3)) << 16 |
3516 REG_RD16(qlt, REG_MBOX(2)));
3517
3518 }
3519 }
3520
3521 /* Get revisions (About Firmware) */
3522 REG_WR16(qlt, REG_MBOX(0), MBC_ABOUT_FIRMWARE);
3523 ret = qlt_raw_mailbox_command(qlt);
3524 qlt->fw_major = REG_RD16(qlt, REG_MBOX(1));
3525 qlt->fw_minor = REG_RD16(qlt, REG_MBOX(2));
3526 qlt->fw_subminor = REG_RD16(qlt, REG_MBOX(3));
3527 qlt->fw_endaddrlo = REG_RD16(qlt, REG_MBOX(4));
3528 qlt->fw_endaddrhi = REG_RD16(qlt, REG_MBOX(5));
3529 qlt->fw_attr = REG_RD16(qlt, REG_MBOX(6));
3530 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3531 if (ret != QLT_SUCCESS) {
3532 EL(qlt, "qlt_raw_mailbox_command=8h status=%llxh\n", ret);
3533 return (ret);
3534 }
3535
3536 if (qlt->qlt_27xx_chip) {
3537 qlt->fw_ext_memory_end = SHORT_TO_LONG(
3538 REG_RD16(qlt, REG_MBOX(4)),
3539 REG_RD16(qlt, REG_MBOX(5)));
3540 qlt->fw_shared_ram_start = SHORT_TO_LONG(
3541 REG_RD16(qlt, REG_MBOX(18)),
3542 REG_RD16(qlt, REG_MBOX(19)));
3543 qlt->fw_shared_ram_end = SHORT_TO_LONG(
3544 REG_RD16(qlt, REG_MBOX(20)),
3545 REG_RD16(qlt, REG_MBOX(21)));
3546 qlt->fw_ddr_ram_start = SHORT_TO_LONG(
3547 REG_RD16(qlt, REG_MBOX(22)),
3548 REG_RD16(qlt, REG_MBOX(23)));
3549 qlt->fw_ddr_ram_end = SHORT_TO_LONG(
3550 REG_RD16(qlt, REG_MBOX(24)),
3551 REG_RD16(qlt, REG_MBOX(25)));
3552 }
3553
3554
3555 return (QLT_SUCCESS);
3556 }
3557
3558 /*
3559 * Used only from qlt_download_fw().
3560 */
3561 static fct_status_t
3562 qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
3563 uint32_t word_count, uint32_t risc_addr)
3564 {
3565 uint32_t words_sent = 0;
3566 uint32_t words_being_sent;
3567 uint32_t *cur_host_addr;
3568 uint32_t cur_risc_addr;
3569 uint64_t da;
3570 fct_status_t ret;
3571
3572 while (words_sent < word_count) {
3573 cur_host_addr = &(host_addr[words_sent]);
3574 cur_risc_addr = risc_addr + (words_sent << 2);
3581 words_being_sent << 2, DDI_DMA_SYNC_FORDEV);
3582 da = qlt->queue_mem_cookie.dmac_laddress;
3583 REG_WR16(qlt, REG_MBOX(0), MBC_LOAD_RAM_EXTENDED);
3584 REG_WR16(qlt, REG_MBOX(1), LSW(risc_addr));
3585 REG_WR16(qlt, REG_MBOX(8), MSW(cur_risc_addr));
3586 REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
3587 REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
3588 REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
3589 REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
3590 REG_WR16(qlt, REG_MBOX(5), LSW(words_being_sent));
3591 REG_WR16(qlt, REG_MBOX(4), MSW(words_being_sent));
3592 ret = qlt_raw_mailbox_command(qlt);
3593 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3594 if (ret != QLT_SUCCESS) {
3595 EL(qlt, "qlt_raw_mailbox_command=0Bh status=%llxh\n",
3596 ret);
3597 return (ret);
3598 }
3599 words_sent += words_being_sent;
3600 }
3601 EL(qlt, "qlt_raw_mailbox_command=0Bh, LOAD_RAM_EXTENDED complete\n");
3602 return (QLT_SUCCESS);
3603 }
3604
3605 /*
3606 * Not used during normal operation. Only during driver init.
3607 * Assumes that interrupts are disabled and mailboxes are loaded.
3608 * Just triggers the mailbox command an waits for the completion.
3609 * Also expects that There is nothing else going on and we will only
3610 * get back a mailbox completion from firmware.
3611 * ---DOES NOT CLEAR INTERRUPT---
3612 * Used only from the code path originating from
3613 * qlt_reset_chip()
3614 */
3615 static fct_status_t
3616 qlt_raw_mailbox_command(qlt_state_t *qlt)
3617 {
3618 int cntr = 0;
3619 uint32_t status;
3620 fct_local_port_t *port = qlt->qlt_port;
3621
3622 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
3623 retry_raw:;
3624 while ((REG_RD32(qlt, REG_INTR_STATUS) & RISC_PCI_INTR_REQUEST) == 0) {
3625 cntr++;
3626 if (cntr == 3000) {
3627 EL(qlt, "polling exhausted, dump fw now..\n");
3628 (void) qlt_firmware_dump(port,
3629 (stmf_state_change_info_t *)NULL);
3630 return (QLT_MAILBOX_STUCK);
3631 }
3632 delay(drv_usectohz(10000));
3633 }
3634 status = (REG_RD32(qlt, REG_RISC_STATUS) & FW_INTR_STATUS_MASK);
3635
3636 if ((status == ROM_MBX_CMD_SUCCESSFUL) ||
3637 (status == ROM_MBX_CMD_NOT_SUCCESSFUL) ||
3638 (status == MBX_CMD_SUCCESSFUL) ||
3639 (status == MBX_CMD_NOT_SUCCESSFUL)) {
3640 uint16_t mbox0 = REG_RD16(qlt, REG_MBOX(0));
3641 if (mbox0 == QLT_MBX_CMD_SUCCESS) {
3642 return (QLT_SUCCESS);
3643 } else {
3644 EL(qlt, "mbx cmd failed, dump fw now..\n");
3645 (void) qlt_firmware_dump(port,
3646 (stmf_state_change_info_t *)NULL);
3647 return (QLT_MBOX_FAILED | mbox0);
3648 }
3649 } else if (status == ASYNC_EVENT) {
3650 uint16_t mbox0, mbox1, mbox2, mbox3;
3651 uint16_t mbox4, mbox5, mbox6, mbox7;
3652
3653 mbox0 = REG_RD16(qlt, REG_MBOX(0));
3654 mbox1 = REG_RD16(qlt, REG_MBOX(1));
3655 mbox2 = REG_RD16(qlt, REG_MBOX(2));
3656 mbox3 = REG_RD16(qlt, REG_MBOX(3));
3657 mbox4 = REG_RD16(qlt, REG_MBOX(4));
3658 mbox5 = REG_RD16(qlt, REG_MBOX(5));
3659 mbox6 = REG_RD16(qlt, REG_MBOX(6));
3660 mbox7 = REG_RD16(qlt, REG_MBOX(7));
3661
3662 cmn_err(CE_NOTE, "!qlt(%d): Async event %x mb1=%x mb2=%x"
3663 "mb3=%x mb4=%x mb5=%x mb6=%x mb7=%x",
3664 qlt->instance, mbox0, mbox1, mbox2, mbox3,
3665 mbox4, mbox5, mbox6, mbox7);
3666 if (mbox0 == 0x8002) {
3667 (void) qlt_firmware_dump(port,
3668 (stmf_state_change_info_t *)NULL);
3669 return (QLT_UNEXPECTED_RESPONSE);
3670 } else {
3671 REG_WR32(qlt,
3672 REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3673 cntr = 0;
3674 goto retry_raw;
3675 }
3676 }
3677
3678 /* This is unexpected, dump a message */
3679 cmn_err(CE_WARN, "qlt(%d): Unexpect intr status %llx",
3680 ddi_get_instance(qlt->dip), (unsigned long long)status);
3681 return (QLT_UNEXPECTED_RESPONSE);
3682 }
3683
3684 static mbox_cmd_t *
3685 qlt_alloc_mailbox_command(qlt_state_t *qlt, uint32_t dma_size)
3686 {
3687 mbox_cmd_t *mcp;
3688
3689 mcp = (mbox_cmd_t *)kmem_zalloc(sizeof (mbox_cmd_t), KM_SLEEP);
3690 if (dma_size) {
3691 qlt_dmem_bctl_t *bctl;
3692 uint64_t da;
3693
3694 mcp->dbuf = qlt_i_dmem_alloc(qlt, dma_size, &dma_size, 0);
3695 if (mcp->dbuf == NULL) {
3696 kmem_free(mcp, sizeof (*mcp));
3697 return (NULL);
3712 mcp->from_fw_mask |= BIT_0;
3713 return (mcp);
3714 }
3715
3716 void
3717 qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
3718 {
3719 if (mcp->dbuf)
3720 qlt_i_dmem_free(qlt, mcp->dbuf);
3721 kmem_free(mcp, sizeof (*mcp));
3722 }
3723
3724 /*
3725 * This can sleep. Should never be called from interrupt context.
3726 */
3727 static fct_status_t
3728 qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
3729 {
3730 int retries;
3731 int i;
3732 char info[80];
3733
3734 if (curthread->t_flag & T_INTR_THREAD) {
3735 ASSERT(0);
3736 return (QLT_MBOX_FAILED);
3737 }
3738
3739 EL(qlt, "mailbox:[0]=%xh [1]=%xh\n",
3740 mcp->to_fw[0], mcp->to_fw[1]);
3741
3742 mutex_enter(&qlt->mbox_lock);
3743 /* See if mailboxes are still uninitialized */
3744 if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
3745 mutex_exit(&qlt->mbox_lock);
3746 return (QLT_MBOX_NOT_INITIALIZED);
3747 }
3748
3749 /* Wait to grab the mailboxes */
3750 for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
3751 retries++) {
3752 cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
3753 if ((retries > 5) ||
3754 (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
3755 mutex_exit(&qlt->mbox_lock);
3756 return (QLT_MBOX_BUSY);
3757 }
3758 }
3759 /* Make sure we always ask for mailbox 0 */
3760 mcp->from_fw_mask |= BIT_0;
3761
3762 /* Load mailboxes, set state and generate RISC interrupt */
3763 qlt->mbox_io_state = MBOX_STATE_CMD_RUNNING;
3764 qlt->mcp = mcp;
3765 for (i = 0; i < MAX_MBOXES; i++) {
3766 if (mcp->to_fw_mask & ((uint32_t)1 << i))
3767 REG_WR16(qlt, REG_MBOX(i), mcp->to_fw[i]);
3768 }
3769 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
3770
3771 qlt_mbox_wait_loop:;
3772 /* Wait for mailbox command completion */
3773 if (cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock, ddi_get_lbolt()
3774 + drv_usectohz(MBOX_TIMEOUT)) < 0) {
3775 (void) snprintf(info, 80, "qlt_mailbox_command: qlt-%p, "
3776 "cmd-0x%02X timed out", (void *)qlt, qlt->mcp->to_fw[0]);
3777 info[79] = 0;
3778 qlt->mcp = NULL;
3779 qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
3780 mutex_exit(&qlt->mbox_lock);
3781
3782 /*
3783 * XXX Throw HBA fatal error event
3784 */
3785 (void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
3786 STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3787 return (QLT_MBOX_TIMEOUT);
3788 }
3789 if (qlt->mbox_io_state == MBOX_STATE_CMD_RUNNING)
3790 goto qlt_mbox_wait_loop;
3791
3792 qlt->mcp = NULL;
3793
3794 /* Make sure its a completion */
3795 if (qlt->mbox_io_state != MBOX_STATE_CMD_DONE) {
3796 ASSERT(qlt->mbox_io_state == MBOX_STATE_UNKNOWN);
3797 mutex_exit(&qlt->mbox_lock);
3798 return (QLT_MBOX_ABORTED);
3799 }
3800
3801 /* MBox command completed. Clear state, retuen based on mbox 0 */
3802 /* Mailboxes are already loaded by interrupt routine */
3803 qlt->mbox_io_state = MBOX_STATE_READY;
3804 mutex_exit(&qlt->mbox_lock);
3805 if (mcp->from_fw[0] != QLT_MBX_CMD_SUCCESS) {
3806 EL(qlt, "fw[0] = %xh\n", mcp->from_fw[0]);
3807 if ((mcp->from_fw[0] != 0x4005) &&
3808 (mcp->from_fw[1] != 0x7)) {
3809 (void) qlt_firmware_dump(qlt->qlt_port,
3810 (stmf_state_change_info_t *)NULL);
3811 }
3812 return (QLT_MBOX_FAILED | mcp->from_fw[0]);
3813 }
3814
3815 return (QLT_SUCCESS);
3816 }
3817
3818 clock_t qlt_next_invalid_msg = 0;
3819 int qlt_invalid_idx_cnt = 0;
3820
3821 /*
3822 * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE**
3823 */
3824 /* ARGSUSED */
3825 static uint_t
3826 qlt_msix_resp_handler(caddr_t arg, caddr_t arg2)
3827 {
3828 qlt_state_t *qlt = (qlt_state_t *)arg;
3829 uint32_t risc_status;
3830 uint16_t qi = 0;
3831 clock_t now;
3832
3833 risc_status = REG_RD32(qlt, REG_RISC_STATUS);
3834 if (qlt->qlt_mq_enabled) {
3835 /* XXX: */
3836 /* qi = (uint16_t)((unsigned long)arg2); */
3837 qi = (uint16_t)(risc_status >> 16);
3838 if (qi >= MQ_MAX_QUEUES) {
3839 /*
3840 * Two customers have reported panics in the call to
3841 * mutex_enter below. Analysis showed the address passed
3842 * in could only occur if 'qi' had a value of 0x4000.
3843 * We'll ignore the upper bits and see if an index which
3844 * at least within the range of possible produces some
3845 * sane results.
3846 */
3847 now = ddi_get_lbolt();
3848 if (now > qlt_next_invalid_msg) {
3849 /*
3850 * Since this issue has never been seen in the
3851 * lab it's unknown if once this bit gets set
3852 * does it remain until the next hardware reset?
3853 * If so, we don't want to flood the message
3854 * buffer or make it difficult to reboot the
3855 * system.
3856 */
3857 qlt_next_invalid_msg = now +
3858 drv_usectohz(MICROSEC * 10);
3859 cmn_err(CE_NOTE, "QLT: hardware reporting "
3860 "invalid index: 0x%x", qi);
3861 }
3862 qi &= MQ_MAX_QUEUES_MASK;
3863 qlt_invalid_idx_cnt++;
3864 }
3865
3866 mutex_enter(&qlt->mq_resp[qi].mq_lock);
3867 if (!qlt->qlt_intr_enabled) {
3868 /*
3869 * No further interrupt since intr disabled.
3870 */
3871 REG_WR32(qlt, REG_HCCR,
3872 HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3873 mutex_exit(&qlt->mq_resp[qi].mq_lock);
3874 return (DDI_INTR_UNCLAIMED);
3875 }
3876
3877 qlt->mq_resp[qi].mq_ndx_from_fw =
3878 (uint16_t)MQBAR_RD32(qlt,
3879 (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_IN);
3880
3881 qlt_handle_resp_queue_update(qlt, qi);
3882 mutex_exit(&qlt->mq_resp[qi].mq_lock);
3883 } else {
3884 mutex_enter(&qlt->intr_lock);
3885 if (!qlt->qlt_intr_enabled) {
3886 /*
3887 * No further interrupt since intr disabled.
3888 */
3889 REG_WR32(qlt, REG_HCCR,
3890 HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3891 mutex_exit(&qlt->intr_lock);
3892 return (DDI_INTR_UNCLAIMED);
3893 }
3894
3895 qlt->atio_ndx_from_fw =
3896 (uint16_t)REG_RD32(qlt, REG_ATIO_IN_PTR);
3897 qlt_handle_atio_queue_update(qlt);
3898
3899 qlt->mq_resp[qi].mq_ndx_from_fw = risc_status >> 16;
3900 qlt_handle_resp_queue_update(qlt, qi);
3901 mutex_exit(&qlt->intr_lock);
3902 }
3903
3904 if (risc_status & BIT_15) {
3905 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3906 }
3907 return (DDI_INTR_CLAIMED);
3908 }
3909
3910
3911 /*
3912 * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE**
3913 */
3914 /* ARGSUSED */
3915 static uint_t
3916 qlt_msix_default_handler(caddr_t arg, caddr_t arg2)
3917 {
3918 qlt_state_t *qlt = (qlt_state_t *)arg;
3919 uint32_t risc_status, intr_type;
3920 int i;
3921 char info[80];
3922
3923 risc_status = REG_RD32(qlt, REG_RISC_STATUS);
3924 if (!mutex_tryenter(&qlt->intr_lock)) {
3925 /*
3926 * Normally we will always get this lock. If tryenter is
3927 * failing then it means that driver is trying to do
3928 * some cleanup and is masking the intr but some intr
3929 * has sneaked in between. See if our device has generated
3930 * this intr. If so then wait a bit and return claimed.
3931 * If not then return claimed if this is the 1st instance
3932 * of a interrupt after driver has grabbed the lock.
3933 */
3934 if ((risc_status & BIT_15) == 0) {
3935 return (DDI_INTR_UNCLAIMED);
3936 } else {
3937 /* try again */
3938 drv_usecwait(10);
3939 if (!mutex_tryenter(&qlt->intr_lock)) {
3940 /* really bad! */
3941 return (DDI_INTR_CLAIMED);
3942 }
3943 }
3944 }
3945 if (((risc_status & BIT_15) == 0) ||
3946 (qlt->qlt_intr_enabled == 0)) {
3947 /*
3948 * This might be a pure coincedence that we are operating
3949 * in a interrupt disabled mode and another device
3950 * sharing the interrupt line has generated an interrupt
3951 * while an interrupt from our device might be pending. Just
3952 * ignore it and let the code handling the interrupt
3953 * disabled mode handle it.
3954 */
3955 mutex_exit(&qlt->intr_lock);
3956 return (DDI_INTR_UNCLAIMED);
3957 }
3958
3959 /* REG_WR32(qlt, REG_INTR_CTRL, 0); */
3960
3961 /* check for risc pause - unlikely */
3962 if (risc_status & BIT_8) {
3963 uint32_t hccsr;
3964
3965 hccsr = REG_RD32(qlt, REG_HCCR);
3966 EL(qlt, "Risc Pause status=%xh hccsr=%x\n",
3967 risc_status, hccsr);
3968 cmn_err(CE_WARN, "qlt(%d): Risc Pause %08x hccsr:%x",
3969 qlt->instance, risc_status, hccsr);
3970 (void) snprintf(info, 80, "Risc Pause %08x hccsr:%x",
3971 risc_status, hccsr);
3972 info[79] = 0;
3973 (void) fct_port_shutdown(qlt->qlt_port,
3974 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
3975 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3976 }
3977
3978 /* check most likely types first */
3979 intr_type = risc_status & 0xff;
3980 if (intr_type == 0x1D) {
3981 qlt->atio_ndx_from_fw =
3982 (uint16_t)REG_RD32(qlt, REG_ATIO_IN_PTR);
3983 qlt_handle_atio_queue_update(qlt);
3984 qlt->mq_resp[0].mq_ndx_from_fw = risc_status >> 16;
3985 qlt_handle_resp_queue_update(qlt, 0);
3986 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3987 } else if (intr_type == 0x1C) {
3988 qlt->atio_ndx_from_fw = (uint16_t)(risc_status >> 16);
3989 qlt_handle_atio_queue_update(qlt);
3990 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3991 } else if (intr_type == 0x1E) {
3992 /* 83xx */
3993 qlt->atio_ndx_from_fw =
3994 (uint16_t)MQBAR_RD32(qlt, MQBAR_ATIO_IN);
3995 qlt_handle_atio_queue_update(qlt);
3996 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3997 } else if (intr_type == 0x13) {
3998 uint16_t qi;
3999
4000 qlt->atio_ndx_from_fw =
4001 (uint16_t)REG_RD32(qlt, REG_ATIO_IN_PTR);
4002 qlt_handle_atio_queue_update(qlt);
4003
4004 if (qlt->qlt_mq_enabled) {
4005 qi = (uint16_t)(risc_status >> 16);
4006 qlt->mq_resp[qi].mq_ndx_from_fw =
4007 (uint16_t)MQBAR_RD32(qlt,
4008 (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_IN);
4009 /* FIX THIS to be optional */
4010 REG_WR32(qlt, REG_HCCR,
4011 HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4012 } else {
4013 qi = 0;
4014 REG_WR32(qlt, REG_HCCR,
4015 HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4016 qlt->mq_resp[qi].mq_ndx_from_fw = risc_status >> 16;
4017 }
4018 qlt_handle_resp_queue_update(qlt, qi);
4019
4020 } else if (intr_type == 0x14) {
4021 uint16_t qi = (uint16_t)(risc_status >> 16);
4022
4023 if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
4024 qlt->atio_ndx_from_fw =
4025 (uint16_t)MQBAR_RD32(qlt, MQBAR_ATIO_IN);
4026 } else {
4027 qlt->atio_ndx_from_fw = (uint16_t)
4028 REG_RD32(qlt, REG_ATIO_IN_PTR);
4029 }
4030 qlt_handle_atio_queue_update(qlt);
4031
4032 qlt->mq_resp[qi].mq_ndx_from_fw =
4033 (uint16_t)MQBAR_RD32(qlt,
4034 (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_IN);
4035 qlt_handle_resp_queue_update(qlt, qi);
4036
4037 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4038
4039 } else if (intr_type == 0x12) {
4040 uint16_t code, mbox1, mbox2, mbox3, mbox4, mbox5, mbox6;
4041
4042 REG_WR32(qlt, REG_INTR_CTRL, 0);
4043
4044 code = (uint16_t)(risc_status >> 16);
4045 mbox1 = REG_RD16(qlt, REG_MBOX(1));
4046 mbox2 = REG_RD16(qlt, REG_MBOX(2));
4047 mbox3 = REG_RD16(qlt, REG_MBOX(3));
4048 mbox4 = REG_RD16(qlt, REG_MBOX(4));
4049 mbox5 = REG_RD16(qlt, REG_MBOX(5));
4050 mbox6 = REG_RD16(qlt, REG_MBOX(6));
4051
4052 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4053 EL(qlt, "Async event: %x mb1=%x mb2=%x,"
4054 " mb3=%x, mb4=%x, mb5=%x, mb6=%x", code, mbox1, mbox2,
4055 mbox3, mbox4, mbox5, mbox6);
4056 stmf_trace(qlt->qlt_port_alias, "Async event: %x mb1=%x mb2=%x,"
4057 " mb3=%x, mb4=%x, mb5=%x, mb6=%x", code, mbox1, mbox2,
4058 mbox3, mbox4, mbox5, mbox6);
4059 cmn_err(CE_NOTE, "!qlt(%d): Async event %x mb1=%x mb2=%x,"
4060 " mb3=%x, mb4=%x, mb5=%x, mb6=%x", qlt->instance, code,
4061 mbox1, mbox2, mbox3, mbox4, mbox5, mbox6);
4062
4063 if ((code == 0x8030) || (code == 0x8010) || (code == 0x8013)) {
4064 if (qlt->qlt_link_up) {
4065 fct_handle_event(qlt->qlt_port,
4066 FCT_EVENT_LINK_RESET, 0, 0);
4067 }
4068 } else if (code == 0x8012) {
4069 qlt->qlt_link_up = 0;
4070 fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_DOWN,
4071 0, 0);
4072 } else if (code == 0x8014) {
4073 if (mbox1 == 0xffff) { /* global event */
4074 uint8_t reason_code;
4075
4076 reason_code = (uint8_t)(mbox3 >> 8);
4077
4078 switch (reason_code) {
4079 case 0x1d: /* FIP Clear Virtual Link received */
4080 case 0x1a: /* received FLOGO */
4081 case 0x1c: /* FCF configuration changed */
4082 case 0x1e: /* FKA timeout */
4083 if (mbox2 == 7) {
4084 qlt->qlt_link_up = 0;
4085 fct_handle_event(qlt->qlt_port,
4086 FCT_EVENT_LINK_DOWN, 0, 0);
4087 }
4088 break;
4089 case 0x12:
4090 if (mbox2 == 4) {
4091 qlt->qlt_link_up = 1;
4092 fct_handle_event(qlt->qlt_port,
4093 FCT_EVENT_LINK_UP, 0, 0);
4094 stmf_trace(qlt->qlt_port_alias,
4095 "SNS login and SCR done");
4096 }
4097 break;
4098 case 0:
4099 if ((mbox2 == 6) &&
4100 (!qlt->qlt_link_up)) {
4101 qlt->qlt_link_up = 1;
4102 fct_handle_event(qlt->qlt_port,
4103 FCT_EVENT_LINK_UP, 0, 0);
4104 stmf_trace(qlt->qlt_port_alias,
4105 "Link reinitialised");
4106 }
4107 break;
4108 default:
4109 stmf_trace(qlt->qlt_port_alias,
4110 "AEN ignored");
4111 break;
4112 }
4113 }
4114 } else if (code == 0x8011) {
4115 switch (mbox1) {
4116 case 0: qlt->link_speed = PORT_SPEED_1G;
4117 break;
4118 case 1: qlt->link_speed = PORT_SPEED_2G;
4119 break;
4120 case 3: qlt->link_speed = PORT_SPEED_4G;
4121 break;
4122 case 4: qlt->link_speed = PORT_SPEED_8G;
4123 break;
4124 case 5: qlt->link_speed = PORT_SPEED_16G;
4125 break;
4126 case 0x13: qlt->link_speed = PORT_SPEED_10G;
4127 break;
4128 default:
4129 qlt->link_speed = PORT_SPEED_UNKNOWN;
4130 }
4131 qlt->qlt_link_up = 1;
4132 fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_UP,
4133 0, 0);
4134 } else if ((code == 0x8002) || (code == 0x8003) ||
4135 (code == 0x8004) || (code == 0x8005)) {
4136 (void) snprintf(info, 80,
4137 "Got %04x, mb1=%x mb2=%x mb5=%x mb6=%x",
4138 code, mbox1, mbox2, mbox5, mbox6);
4139 info[79] = 0;
4140 (void) fct_port_shutdown(qlt->qlt_port,
4141 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4142 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4143 } else if (code == 0x800F) {
4144 (void) snprintf(info, 80,
4145 "Got 800F, mb1=%x mb2=%x mb3=%x",
4146 mbox1, mbox2, mbox3);
4147
4148 if (mbox1 != 1) {
4149 /* issue "verify fw" */
4150 qlt_verify_fw(qlt);
4151 }
4152 } else if (code == 0x8101) {
4153 (void) snprintf(info, 80,
4154 "IDC Req Rcvd:%04x, mb1=%x mb2=%x mb3=%x",
4155 code, mbox1, mbox2, mbox3);
4156 info[79] = 0;
4157
4158 /* check if "ACK" is required (timeout != 0) */
4159 if (mbox1 & 0x0f00) {
4160 caddr_t req;
4161
4162 /*
4163 * Ack the request (queue work to do it?)
4164 * using a mailbox iocb
4165 * (Only Queue #0 allowed)
4166 */
4167 mutex_enter(&qlt->mq_req[0].mq_lock);
4168 req = qlt_get_req_entries(qlt, 1, 0);
4169 if (req) {
4170 bzero(req, IOCB_SIZE);
4171 req[0] = 0x39; req[1] = 1;
4172 QMEM_WR16(qlt, req+8, 0x101);
4173 QMEM_WR16(qlt, req+10, mbox1);
4174 QMEM_WR16(qlt, req+12, mbox2);
4175 QMEM_WR16(qlt, req+14, mbox3);
4176 QMEM_WR16(qlt, req+16, mbox4);
4177 QMEM_WR16(qlt, req+18, mbox5);
4178 QMEM_WR16(qlt, req+20, mbox6);
4179 qlt_submit_req_entries(qlt, 1, 0);
4180 } else {
4181 (void) snprintf(info, 80,
4182 "IDC ACK failed");
4183 info[79] = 0;
4184 }
4185 mutex_exit(&qlt->mq_req[0].mq_lock);
4186 }
4187 } else {
4188 stmf_trace(qlt->qlt_port_alias,
4189 "Async event: 0x%x ignored",
4190 code);
4191 }
4192 REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
4193 } else if ((intr_type == 0x10) || (intr_type == 0x11)) {
4194 /* Handle mailbox completion */
4195 mutex_enter(&qlt->mbox_lock);
4196 if (qlt->mbox_io_state != MBOX_STATE_CMD_RUNNING) {
4197 cmn_err(CE_WARN, "qlt(%d): mailbox completion received"
4198 " when driver wasn't waiting for it %d",
4199 qlt->instance, qlt->mbox_io_state);
4200 } else {
4201 for (i = 0; i < MAX_MBOXES; i++) {
4202 if (qlt->mcp->from_fw_mask &
4203 (((uint32_t)1) << i)) {
4204 qlt->mcp->from_fw[i] =
4205 REG_RD16(qlt, REG_MBOX(i));
4206 }
4207 }
4208 qlt->mbox_io_state = MBOX_STATE_CMD_DONE;
4209 }
4210 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4211 cv_broadcast(&qlt->mbox_cv);
4212 mutex_exit(&qlt->mbox_lock);
4213 } else {
4214 cmn_err(CE_WARN, "qlt(%d): Unknown intr type 0x%x",
4215 qlt->instance, intr_type);
4216 stmf_trace(qlt->qlt_port_alias,
4217 "%s: Unknown intr type 0x%x [%x]",
4218 __func__, intr_type, risc_status);
4219 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4220 }
4221
4222 /* REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR); */
4223 mutex_exit(&qlt->intr_lock);
4224
4225 return (DDI_INTR_CLAIMED);
4226 }
4227
4228 /*
4229 * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE**
4230 */
4231 /* ARGSUSED */
4232 static uint_t
4233 qlt_isr(caddr_t arg, caddr_t arg2)
4234 {
4235 qlt_state_t *qlt = (qlt_state_t *)arg;
4236 uint32_t risc_status, intr_type;
4237 int i;
4238 int intr_loop_count;
4239 char info[80];
4240
4241 risc_status = REG_RD32(qlt, REG_RISC_STATUS);
4242 if (!mutex_tryenter(&qlt->intr_lock)) {
4243 /*
4244 * Normally we will always get this lock. If tryenter is
4245 * failing then it means that driver is trying to do
4246 * some cleanup and is masking the intr but some intr
4247 * has sneaked in between. See if our device has generated
4248 * this intr. If so then wait a bit and return claimed.
4249 * If not then return claimed if this is the 1st instance
4250 * of a interrupt after driver has grabbed the lock.
4251 */
4252 if (risc_status & BIT_15) {
4253 drv_usecwait(10);
4254 return (DDI_INTR_CLAIMED);
4255 } else if (qlt->intr_sneak_counter) {
4256 qlt->intr_sneak_counter--;
4257 return (DDI_INTR_CLAIMED);
4258 } else {
4259 return (DDI_INTR_UNCLAIMED);
4260 }
4261 }
4262 if (((risc_status & BIT_15) == 0) ||
4263 (qlt->qlt_intr_enabled == 0)) {
4264 /*
4265 * This might be a pure coincedence that we are operating
4266 * in a interrupt disabled mode and another device
4267 * sharing the interrupt line has generated an interrupt
4268 * while an interrupt from our device might be pending. Just
4269 * ignore it and let the code handling the interrupt
4270 * disabled mode handle it.
4271 */
4272 mutex_exit(&qlt->intr_lock);
4273 return (DDI_INTR_UNCLAIMED);
4274 }
4275
4276 /*
4277 * XXX take care for MSI-X case. disable intrs
4278 * Its gonna be complicated because of the max iterations.
4279 * as hba will have posted the intr which did not go on PCI
4280 * but we did not service it either because of max iterations.
4281 * Maybe offload the intr on a different thread.
4282 */
4283 intr_loop_count = 0;
4284
4285 REG_WR32(qlt, REG_INTR_CTRL, 0);
4286
4287 intr_again:;
4288
4289 /* check for risc pause */
4290 if (risc_status & BIT_8) {
4291 EL(qlt, "Risc Pause status=%xh\n", risc_status);
4292 cmn_err(CE_WARN, "qlt(%d): Risc Pause %08x",
4293 qlt->instance, risc_status);
4294 (void) snprintf(info, 80, "Risc Pause %08x", risc_status);
4295 info[79] = 0;
4296 (void) fct_port_shutdown(qlt->qlt_port,
4297 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4298 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4299 }
4300
4301 /* First check for high performance path */
4302 intr_type = risc_status & 0xff;
4303 if (intr_type == 0x1D) {
4304
4305 /* process the atio queue first */
4306 qlt->atio_ndx_from_fw =
4307 (uint16_t)REG_RD32(qlt, REG_ATIO_IN_PTR);
4308 qlt_handle_atio_queue_update(qlt);
4309
4310 /* process the response queue next */
4311 qlt->mq_resp[0].mq_ndx_from_fw =
4312 (uint16_t)REG_RD32(qlt, REG_RESP_IN_PTR);
4313 qlt_handle_resp_queue_update(qlt, 0);
4314 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4315
4316 } else if (intr_type == 0x1C) {
4317 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4318 qlt->atio_ndx_from_fw = (uint16_t)(risc_status >> 16);
4319 qlt_handle_atio_queue_update(qlt);
4320 } else if (intr_type == 0x1E) {
4321 /* 83xx Atio Queue update */
4322 qlt->atio_ndx_from_fw =
4323 (uint16_t)MQBAR_RD32(qlt, MQBAR_ATIO_IN);
4324 qlt_handle_atio_queue_update(qlt);
4325 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4326 } else if (intr_type == 0x13) {
4327 uint16_t qi;
4328
4329 qlt->atio_ndx_from_fw =
4330 (uint16_t)REG_RD32(qlt, REG_ATIO_IN_PTR);
4331 qlt_handle_atio_queue_update(qlt);
4332
4333 if (qlt->qlt_mq_enabled) {
4334 qi = (uint16_t)(risc_status >> 16);
4335 qlt->mq_resp[0].mq_ndx_from_fw =
4336 (uint16_t)MQBAR_RD32(qlt,
4337 (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_IN);
4338 /* FIX THIS to be optional */
4339 REG_WR32(qlt, REG_HCCR,
4340 HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4341 } else {
4342 qi = 0;
4343 REG_WR32(qlt, REG_HCCR,
4344 HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4345 qlt->mq_resp[qi].mq_ndx_from_fw = risc_status >> 16;
4346 REG_WR32(qlt, REG_HCCR,
4347 HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4348 }
4349 qlt_handle_resp_queue_update(qlt, qi);
4350
4351 } else if (intr_type == 0x14) {
4352 /* MQ */
4353 uint16_t qi = 0;
4354
4355 if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
4356 qlt->atio_ndx_from_fw =
4357 (uint16_t)MQBAR_RD32(qlt, MQBAR_ATIO_IN);
4358 } else {
4359 qi = (uint16_t)(risc_status >> 16);
4360 qlt->atio_ndx_from_fw = (uint16_t)
4361 REG_RD32(qlt, REG_ATIO_IN_PTR);
4362 }
4363 qlt_handle_atio_queue_update(qlt);
4364
4365 qlt->mq_resp[qi].mq_ndx_from_fw =
4366 (uint16_t)MQBAR_RD32(qlt,
4367 (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_IN);
4368 qlt_handle_resp_queue_update(qlt, qi);
4369 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4370
4371 } else if (intr_type == 0x12) {
4372 uint16_t code = (uint16_t)(risc_status >> 16);
4373 uint16_t mbox1 = REG_RD16(qlt, REG_MBOX(1));
4374 uint16_t mbox2 = REG_RD16(qlt, REG_MBOX(2));
4375 uint16_t mbox3 = REG_RD16(qlt, REG_MBOX(3));
4376 uint16_t mbox4 = REG_RD16(qlt, REG_MBOX(4));
4377 uint16_t mbox5 = REG_RD16(qlt, REG_MBOX(5));
4378 uint16_t mbox6 = REG_RD16(qlt, REG_MBOX(6));
4379
4380 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4381 EL(qlt, "Async event %x mb1=%x, mb2=%x, mb3=%x, mb4=%x, "
4382 "mb5=%x, mb6=%x\n", code, mbox1, mbox2, mbox3, mbox4,
4383 mbox5, mbox6);
4384 stmf_trace(qlt->qlt_port_alias, "Async event %x mb1=%x mb2=%x,"
4385 " mb3=%x, mb4=%x, mb5=%x, mb6=%x", code, mbox1, mbox2,
4386 mbox3, mbox4, mbox5, mbox6);
4387 cmn_err(CE_NOTE, "!qlt(%d): Async event %x mb1=%x mb2=%x,"
4388 " mb3=%x, mb4=%x, mb5=%x, mb6=%x", qlt->instance, code,
4389 mbox1, mbox2, mbox3, mbox4, mbox5, mbox6);
4390
4391 if ((code == 0x8030) || (code == 0x8010) || (code == 0x8013)) {
4392 if (qlt->qlt_link_up) {
4393 fct_handle_event(qlt->qlt_port,
4394 FCT_EVENT_LINK_RESET, 0, 0);
4395 }
4396 } else if (code == 0x8012) {
4397 qlt->qlt_link_up = 0;
4398 fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_DOWN,
4399 0, 0);
4400 } else if (code == 0x8014) {
4401 if (mbox1 == 0xffff) { /* global event */
4402 uint8_t reason_code;
4403
4404 reason_code = (uint8_t)(mbox3 >> 8);
4405
4406 switch (reason_code) {
4407 case 0x1d: /* FIP Clear Virtual Link received */
4408 case 0x1a: /* received FLOGO */
4409 case 0x1c: /* FCF configuration changed */
4410 case 0x1e: /* FKA timeout */
4411 if (mbox2 == 7) {
4412 qlt->qlt_link_up = 0;
4413 fct_handle_event(qlt->qlt_port,
4414 FCT_EVENT_LINK_DOWN, 0, 0);
4415 }
4416 break;
4417 case 0x12:
4418 if (mbox2 == 4) {
4419 qlt->qlt_link_up = 1;
4420 fct_handle_event(qlt->qlt_port,
4421 FCT_EVENT_LINK_UP, 0, 0);
4422 stmf_trace(qlt->qlt_port_alias,
4423 "SNS login and SCR done");
4424 }
4425 break;
4426 case 0:
4427 if ((mbox2 == 6) &&
4428 (!qlt->qlt_link_up)) {
4429 qlt->qlt_link_up = 1;
4430 fct_handle_event(qlt->qlt_port,
4431 FCT_EVENT_LINK_UP, 0, 0);
4432 stmf_trace(qlt->qlt_port_alias,
4433 "Link reinitialised");
4434 }
4435 break;
4436 default:
4437 stmf_trace(qlt->qlt_port_alias,
4438 "AEN ignored");
4439 break;
4440 }
4441 }
4442 } else if (code == 0x8011) {
4443 switch (mbox1) {
4444 case 0: qlt->link_speed = PORT_SPEED_1G;
4445 break;
4446 case 1: qlt->link_speed = PORT_SPEED_2G;
4447 break;
4448 case 3: qlt->link_speed = PORT_SPEED_4G;
4449 break;
4450 case 4: qlt->link_speed = PORT_SPEED_8G;
4451 break;
4452 case 5: qlt->link_speed = PORT_SPEED_16G;
4453 break;
4454 case 6: qlt->link_speed = PORT_SPEED_32G;
4455 break;
4456 case 0x13: qlt->link_speed = PORT_SPEED_10G;
4457 break;
4458 default:
4459 qlt->link_speed = PORT_SPEED_UNKNOWN;
4460 }
4461 qlt->qlt_link_up = 1;
4462 fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_UP,
4463 0, 0);
4464 } else if ((code == 0x8002) || (code == 0x8003) ||
4465 (code == 0x8004) || (code == 0x8005)) {
4466 (void) snprintf(info, 80,
4467 "Got %04x, mb1=%x mb2=%x mb5=%x mb6=%x",
4468 code, mbox1, mbox2, mbox5, mbox6);
4469 info[79] = 0;
4470 (void) fct_port_shutdown(qlt->qlt_port,
4471 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4472 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4473 } else if (code == 0x800F) {
4474 (void) snprintf(info, 80,
4475 "Got 800F, mb1=%x mb2=%x mb3=%x",
4476 mbox1, mbox2, mbox3);
4477
4478 if (mbox1 != 1) {
4479 /* issue "verify fw" */
4480 qlt_verify_fw(qlt);
4481 }
4482 } else if (code == 0x8101) {
4483 (void) snprintf(info, 80,
4484 "IDC Req Rcvd:%04x, mb1=%x mb2=%x mb3=%x",
4485 code, mbox1, mbox2, mbox3);
4486 info[79] = 0;
4487
4488 /* check if "ACK" is required (timeout != 0) */
4489 if (mbox1 & 0x0f00) {
4490 caddr_t req;
4491
4492 /*
4493 * Ack the request (queue work to do it?)
4494 * using a mailbox iocb (only Queue 0 allowed)
4495 */
4496 mutex_enter(&qlt->mq_req[0].mq_lock);
4497 req = qlt_get_req_entries(qlt, 1, 0);
4498 if (req) {
4499 bzero(req, IOCB_SIZE);
4500 req[0] = 0x39; req[1] = 1;
4501 QMEM_WR16(qlt, req+8, 0x101);
4502 QMEM_WR16(qlt, req+10, mbox1);
4503 QMEM_WR16(qlt, req+12, mbox2);
4504 QMEM_WR16(qlt, req+14, mbox3);
4505 QMEM_WR16(qlt, req+16, mbox4);
4506 QMEM_WR16(qlt, req+18, mbox5);
4507 QMEM_WR16(qlt, req+20, mbox6);
4508 qlt_submit_req_entries(qlt, 1, 0);
4509 } else {
4510 (void) snprintf(info, 80,
4511 "IDC ACK failed");
4512 info[79] = 0;
4513 }
4514 mutex_exit(&qlt->mq_req[0].mq_lock);
4515 }
4516 }
4517 } else if ((intr_type == 0x10) || (intr_type == 0x11)) {
4518 /* Handle mailbox completion */
4519 mutex_enter(&qlt->mbox_lock);
4520 if (qlt->mbox_io_state != MBOX_STATE_CMD_RUNNING) {
4521 cmn_err(CE_WARN, "qlt(%d): mailbox completion received"
4522 " when driver wasn't waiting for it %d",
4523 qlt->instance, qlt->mbox_io_state);
4524 } else {
4525 for (i = 0; i < MAX_MBOXES; i++) {
4526 if (qlt->mcp->from_fw_mask &
4527 (((uint32_t)1) << i)) {
4528 qlt->mcp->from_fw[i] =
4529 REG_RD16(qlt, REG_MBOX(i));
4530 }
4531 }
4532 qlt->mbox_io_state = MBOX_STATE_CMD_DONE;
4533 }
4534 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4535 cv_broadcast(&qlt->mbox_cv);
4536 mutex_exit(&qlt->mbox_lock);
4537 } else {
4538 cmn_err(CE_WARN, "qlt(%d): Unknown intr type 0x%x",
4539 qlt->instance, intr_type);
4540 stmf_trace(qlt->qlt_port_alias,
4541 "%s: Unknown intr type 0x%x [%x]",
4542 __func__, intr_type, risc_status);
4543 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4544 }
4545
4546 (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting */
4547
4548 if ((qlt->intr_flags & QLT_INTR_MSIX) == 0) {
4549 risc_status = REG_RD32(qlt, REG_RISC_STATUS);
4550 if ((risc_status & BIT_15) &&
4551 (++intr_loop_count < QLT_MAX_ITERATIONS_PER_INTR)) {
4552 goto intr_again;
4553 }
4554 REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
4555 mutex_exit(&qlt->intr_lock);
4556 } else {
4557 mutex_exit(&qlt->intr_lock);
4558 REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
4559 }
4560
4561 return (DDI_INTR_CLAIMED);
4562 }
4563
4564 /* **************** NVRAM Functions ********************** */
4565
4566 fct_status_t
4567 qlt_read_flash_word(qlt_state_t *qlt, uint32_t faddr, uint32_t *bp)
4568 {
4569 uint32_t timer;
4570
4571 /* Clear access error flag */
4572 REG_WR32(qlt, REG_CTRL_STATUS,
4573 REG_RD32(qlt, REG_CTRL_STATUS) | FLASH_ERROR);
4574
4575 REG_WR32(qlt, REG_FLASH_ADDR, faddr & ~BIT_31);
4576
4577 /* Wait for READ cycle to complete. */
4578 for (timer = 3000; timer; timer--) {
4579 if (REG_RD32(qlt, REG_FLASH_ADDR) & BIT_31) {
4580 break;
4586 return (QLT_FLASH_TIMEOUT);
4587 } else if (REG_RD32(qlt, REG_CTRL_STATUS) & FLASH_ERROR) {
4588 EL(qlt, "flash access error\n");
4589 return (QLT_FLASH_ACCESS_ERROR);
4590 }
4591
4592 *bp = REG_RD32(qlt, REG_FLASH_DATA);
4593
4594 return (QLT_SUCCESS);
4595 }
4596
4597 fct_status_t
4598 qlt_read_nvram(qlt_state_t *qlt)
4599 {
4600 uint32_t index, addr, chksum;
4601 uint32_t val, *ptr;
4602 fct_status_t ret;
4603 qlt_nvram_t *nv;
4604 uint64_t empty_node_name = 0;
4605
4606 if (qlt->qlt_27xx_chip) {
4607 int func;
4608
4609 func = ((REG_RD32(qlt, REG_CTRL_STATUS) & 0x0000f000) >> 12);
4610 switch (func) {
4611 case 0: addr = QLT27_NVRAM_FUNC0_ADDR; break;
4612 case 1: addr = QLT27_NVRAM_FUNC1_ADDR; break;
4613 case 2: addr = QLT27_NVRAM_FUNC2_ADDR; break;
4614 case 3: addr = QLT27_NVRAM_FUNC3_ADDR; break;
4615 }
4616 } else if (qlt->qlt_83xx_chip) {
4617 if (qlt->qlt_fcoe_enabled) {
4618 addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
4619 QLT83FCOE_NVRAM_FUNC1_ADDR :
4620 QLT83FCOE_NVRAM_FUNC0_ADDR;
4621 } else {
4622 addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
4623 QLT83FC_NVRAM_FUNC1_ADDR :
4624 QLT83FC_NVRAM_FUNC0_ADDR;
4625 }
4626 } else if (qlt->qlt_81xx_chip) {
4627 addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
4628 QLT81_NVRAM_FUNC1_ADDR : QLT81_NVRAM_FUNC0_ADDR;
4629 } else if (qlt->qlt_25xx_chip) {
4630 addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
4631 QLT25_NVRAM_FUNC1_ADDR : QLT25_NVRAM_FUNC0_ADDR;
4632 } else {
4633 addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
4634 NVRAM_FUNC1_ADDR : NVRAM_FUNC0_ADDR;
4635 }
4636 mutex_enter(&qlt_global_lock);
4637
4638 /* Pause RISC. */
4639 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
4640 (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting. */
4641
4642 /* Get NVRAM data and calculate checksum. */
4643 ptr = (uint32_t *)qlt->nvram;
4644 chksum = 0;
4645 for (index = 0; index < sizeof (qlt_nvram_t) / 4; index++) {
4646 ret = qlt_read_flash_word(qlt, addr++, &val);
4647 if (ret != QLT_SUCCESS) {
4664 nv = qlt->nvram;
4665 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
4666 nv->id[2] != 'P' || nv->id[3] != ' ' ||
4667 (nv->nvram_version[0] | nv->nvram_version[1]) == 0) {
4668 EL(qlt, "chksum=%xh, id=%c%c%c%c, ver=%02d%02d\n", chksum,
4669 nv->id[0], nv->id[1], nv->id[2], nv->id[3],
4670 nv->nvram_version[1], nv->nvram_version[0]);
4671 return (QLT_BAD_NVRAM_DATA);
4672 }
4673
4674 /* If node name is zero, hand craft it from port name */
4675 if (bcmp(nv->node_name, &empty_node_name, 8) == 0) {
4676 bcopy(nv->port_name, nv->node_name, 8);
4677 nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0);
4678 nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0);
4679 }
4680
4681 return (QLT_SUCCESS);
4682 }
4683
4684 fct_status_t
4685 qlt_read_vpd(qlt_state_t *qlt)
4686 {
4687 uint32_t index, addr, chksum;
4688 uint32_t val, *ptr;
4689 fct_status_t ret;
4690
4691 if (qlt->qlt_27xx_chip) {
4692 int func;
4693
4694 func = ((REG_RD32(qlt, REG_CTRL_STATUS) & 0x0000f000) >> 12);
4695 switch (func) {
4696 case 0: addr = QLT27_VPD_FUNC0_ADDR; break;
4697 case 1: addr = QLT27_VPD_FUNC1_ADDR; break;
4698 case 2: addr = QLT27_VPD_FUNC2_ADDR; break;
4699 case 3: addr = QLT27_VPD_FUNC3_ADDR; break;
4700 }
4701 } else if (qlt->qlt_83xx_chip) {
4702 if (qlt->qlt_fcoe_enabled) {
4703 addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
4704 QLT83FCOE_VPD_FUNC1_ADDR :
4705 QLT83FCOE_VPD_FUNC0_ADDR;
4706 } else {
4707 addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
4708 QLT83FC_VPD_FUNC1_ADDR :
4709 QLT83FC_VPD_FUNC0_ADDR;
4710 }
4711 } else if (qlt->qlt_81xx_chip) {
4712 addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
4713 QLT81_VPD_FUNC1_ADDR : QLT81_VPD_FUNC0_ADDR;
4714 } else if (qlt->qlt_25xx_chip) {
4715 addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
4716 QLT25_VPD_FUNC1_ADDR : QLT25_VPD_FUNC0_ADDR;
4717 } else {
4718 addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
4719 QLT24_VPD_FUNC1_ADDR : QLT24_VPD_FUNC0_ADDR;
4720 }
4721 mutex_enter(&qlt_global_lock);
4722
4723 /* Pause RISC. */
4724 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
4725 (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting. */
4726
4727 /* Get VPD data and calculate checksum. */
4728 ptr = (uint32_t *)qlt->vpd;
4729 chksum = 0;
4730 for (index = 0; index < QL_24XX_VPD_SIZE / 4; index++) {
4731 ret = qlt_read_flash_word(qlt, addr++, &val);
4732 if (ret != QLT_SUCCESS) {
4733 EL(qlt, "qlt_read_flash_word, status=%llxh\n", ret);
4734 mutex_exit(&qlt_global_lock);
4735 return (ret);
4736 }
4737 chksum += val;
4738 *ptr = LE_32(val);
4739 ptr++;
4740 }
4741
4742 /* Release RISC Pause */
4743 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_PAUSE));
4744 (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting. */
4745
4746 mutex_exit(&qlt_global_lock);
4747
4748 return (QLT_SUCCESS);
4749 }
4750
4751 fct_status_t
4752 qlt_read_bfe(qlt_state_t *qlt, uint32_t in_addr, uint32_t *out_addr, uint32_t i)
4753 {
4754 uint32_t index;
4755 uint32_t chksum;
4756 fct_status_t ret = QLT_SUCCESS;
4757 uint32_t val;
4758 uint16_t dataoffset;
4759 uint32_t *ptr;
4760 uint32_t addr, addr0;
4761 uint16_t length;
4762
4763 val = chksum = 0;
4764 ptr = (uint32_t *)&qlt->rimage[i].header;
4765
4766 addr = in_addr;
4767 addr0 = addr;
4768
4769 /* read rom header first */
4770 for (index = 0; index < sizeof (qlt_rom_header_t)/4;
4771 index ++) {
4772 ret = qlt_read_flash_word(qlt, addr++, &val);
4773 if (ret != QLT_SUCCESS) {
4774 EL(qlt, "read flash, status=%llxh\n", ret);
4775 return (ret);
4776 }
4777 chksum += val;
4778 *ptr = LE_32(val);
4779 ptr++;
4780 }
4781
4782 /* check the signature */
4783 if (qlt->rimage[i].header.signature[0] != PCI_HEADER0) {
4784 EL(qlt, "hdr[%d] sig[1] [0] (%xh) (%xh) is wrong.\n",
4785 i, qlt->rimage[i].header.signature[1],
4786 qlt->rimage[i].header.signature[0]);
4787 return (QLT_SUCCESS);
4788 }
4789
4790 if ((qlt->rimage[i].header.signature[0] == PCI_HEADER0) &&
4791 (qlt->rimage[i].header.signature[1] == PCI_HEADER1)) {
4792 /* get dataoffset */
4793 dataoffset = (qlt->rimage[i].header.dataoffset[1] |
4794 qlt->rimage[i].header.dataoffset[0]);
4795 EL(qlt, "dataoffset[0] = %xh\n", dataoffset);
4796
4797 ptr = (uint32_t *)&qlt->rimage[i].data;
4798
4799 /* adjust addr */
4800 addr = addr0 + (dataoffset/4);
4801 for (index = 0; index < sizeof (qlt_rom_data_t)/4;
4802 index ++) {
4803 ret = qlt_read_flash_word(qlt, addr++, &val);
4804 if (ret != QLT_SUCCESS) {
4805 EL(qlt, "read flash, status=%llxh\n", ret);
4806 return (ret);
4807 }
4808 chksum += val;
4809 *ptr = LE_32(val);
4810 ptr++;
4811 }
4812
4813 /* check signature */
4814 if ((qlt->rimage[i].data.signature[0] != 0x50) &&
4815 (qlt->rimage[i].data.signature[1] != 0x43) &&
4816 (qlt->rimage[i].data.signature[2] != 0x49) &&
4817 (qlt->rimage[i].data.signature[3] != 0x52)) {
4818 EL(qlt,
4819 "data sig[3] [2] [1] [0] (%xh)(%xh)(%xh)(%xh)\n",
4820 qlt->rimage[i].data.signature[3],
4821 qlt->rimage[i].data.signature[2],
4822 qlt->rimage[i].data.signature[1],
4823 qlt->rimage[i].data.signature[0]);
4824 return (QLT_SUCCESS);
4825 }
4826
4827 EL(qlt, "codetype (%xh) revisionlevel[1][0] (%xh)(%xh)\n",
4828 qlt->rimage[i].data.codetype,
4829 qlt->rimage[i].data.revisionlevel[1],
4830 qlt->rimage[i].data.revisionlevel[0]);
4831
4832 /* check if this is the last image */
4833 if (qlt->rimage[i].data.indicator == PCI_IND_LAST_IMAGE) {
4834 EL(qlt, "last image (%xh)\n",
4835 qlt->rimage[i].data.indicator);
4836 return (QLT_SUCCESS);
4837
4838 }
4839
4840 /* Get the image length and adjust the addr according */
4841 length = (qlt->rimage[i].data.imagelength[1] |
4842 qlt->rimage[i].data.imagelength[0]);
4843
4844 EL(qlt, "image[%d] length[1][0] (%xh) (%xh) in sectors\n",
4845 i, length);
4846
4847 /* the starting addr of the next image */
4848 addr = addr0 + ((length * 512)/4);
4849 *out_addr = addr;
4850 }
4851
4852 return (QLT_SUCCESS);
4853 }
4854
4855 fct_status_t
4856 qlt_read_rom_image(qlt_state_t *qlt)
4857 {
4858 uint32_t addr;
4859 uint32_t out_addr = 0;
4860 uint32_t count = 0;
4861 boolean_t last_image = FALSE;
4862 fct_status_t ret;
4863
4864 if (qlt->qlt_27xx_chip) {
4865 addr = FLASH_2700_DATA_ADDR + FLASH_2700_BOOT_CODE_ADDR;
4866 } else if (qlt->qlt_83xx_chip) {
4867 addr = FLASH_8300_DATA_ADDR + FLASH_8300_BOOT_CODE_ADDR;
4868 } else if (qlt->qlt_81xx_chip) {
4869 addr = FLASH_8100_DATA_ADDR + FLASH_8100_BOOT_CODE_ADDR;
4870 } else if (qlt->qlt_25xx_chip) {
4871 addr = FLASH_2500_DATA_ADDR + FLASH_2500_BOOT_CODE_ADDR;
4872 } else {
4873 addr = FLASH_2400_DATA_ADDR + FLASH_2400_BOOT_CODE_ADDR;
4874 }
4875 mutex_enter(&qlt_global_lock);
4876
4877 /* Pause RISC. */
4878 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
4879 (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting. */
4880
4881 do {
4882 ret = qlt_read_bfe(qlt, addr, &out_addr, count);
4883 if (ret != QLT_SUCCESS) {
4884 EL(qlt, "qlt_read_bfe, status=%llxh\n", ret);
4885 break;
4886 }
4887 if (qlt->rimage[count].data.indicator ==
4888 PCI_IND_LAST_IMAGE) {
4889 last_image = TRUE;
4890 } else {
4891 addr = out_addr;
4892 }
4893 count ++;
4894 } while ((last_image != TRUE) && (count < 6));
4895
4896 /* Release RISC Pause */
4897 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_PAUSE));
4898 (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting. */
4899
4900 mutex_exit(&qlt_global_lock);
4901
4902 return (QLT_SUCCESS);
4903 }
4904
4905 uint32_t
4906 qlt_sync_atio_queue(qlt_state_t *qlt)
4907 {
4908 uint32_t total_ent;
4909
4910 if (qlt->atio_ndx_from_fw > qlt->atio_ndx_to_fw) {
4911 total_ent = qlt->atio_ndx_from_fw - qlt->atio_ndx_to_fw;
4912 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
4913 + (qlt->atio_ndx_to_fw << 6), total_ent << 6,
4914 DDI_DMA_SYNC_FORCPU);
4915 } else {
4916 total_ent = ATIO_QUEUE_ENTRIES - qlt->atio_ndx_to_fw +
4917 qlt->atio_ndx_from_fw;
4918 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
4919 + (qlt->atio_ndx_to_fw << 6), (uint_t)(ATIO_QUEUE_ENTRIES -
4920 qlt->atio_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
4921 (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
4922 ATIO_QUEUE_OFFSET, (uint_t)(qlt->atio_ndx_from_fw << 6),
4923 DDI_DMA_SYNC_FORCPU);
4924 }
4928 void
4929 qlt_handle_atio_queue_update(qlt_state_t *qlt)
4930 {
4931 uint32_t total_ent;
4932
4933 if (qlt->atio_ndx_to_fw == qlt->atio_ndx_from_fw)
4934 return;
4935
4936 total_ent = qlt_sync_atio_queue(qlt);
4937
4938 do {
4939 uint8_t *atio = (uint8_t *)&qlt->atio_ptr[
4940 qlt->atio_ndx_to_fw << 6];
4941 uint32_t ent_cnt;
4942
4943 ent_cnt = (uint32_t)(atio[1]);
4944 if (ent_cnt > total_ent) {
4945 break;
4946 }
4947 switch ((uint8_t)(atio[0])) {
4948 case 0x06: /* ATIO, make performance case the 1st test */
4949 qlt_handle_atio(qlt, atio);
4950 break;
4951 case 0x0d: /* INOT */
4952 qlt_handle_inot(qlt, atio);
4953 break;
4954 default:
4955 EL(qlt, "atio_queue_update atio[0]=%xh\n", atio[0]);
4956 cmn_err(CE_WARN, "qlt_handle_atio_queue_update: "
4957 "atio[0] is %x, qlt-%p", atio[0], (void *)qlt);
4958 break;
4959 }
4960 qlt->atio_ndx_to_fw = (uint16_t)(
4961 (qlt->atio_ndx_to_fw + ent_cnt) & (ATIO_QUEUE_ENTRIES - 1));
4962 total_ent -= ent_cnt;
4963 } while (total_ent > 0);
4964
4965 if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
4966 MQBAR_WR32(qlt, MQBAR_ATIO_OUT, qlt->atio_ndx_to_fw);
4967 } else {
4968 REG_WR32(qlt, REG_ATIO_OUT_PTR, qlt->atio_ndx_to_fw);
4969 }
4970 }
4971
4972 uint32_t
4973 qlt_sync_resp_queue(qlt_state_t *qlt, uint16_t qi)
4974 {
4975 uint32_t total_ent;
4976
4977 if (qlt->mq_resp[qi].mq_ndx_from_fw > qlt->mq_resp[qi].mq_ndx_to_fw) {
4978 total_ent = qlt->mq_resp[qi].mq_ndx_from_fw -
4979 qlt->mq_resp[qi].mq_ndx_to_fw;
4980 if (qi) {
4981 (void) ddi_dma_sync(
4982 qlt->mq_resp[qi].queue_mem_mq_dma_handle,
4983 (qlt->mq_resp[qi].mq_ndx_to_fw << 6),
4984 total_ent << 6,
4985 DDI_DMA_SYNC_FORCPU);
4986 } else {
4987 (void) ddi_dma_sync(
4988 qlt->queue_mem_dma_handle,
4989 RESPONSE_QUEUE_OFFSET +
4990 (qlt->mq_resp[qi].mq_ndx_to_fw << 6),
4991 total_ent << 6,
4992 DDI_DMA_SYNC_FORCPU);
4993 }
4994 } else {
4995 total_ent =
4996 (qi ? RESPONSE_QUEUE_MQ_ENTRIES : RESPONSE_QUEUE_ENTRIES) -
4997 qlt->mq_resp[qi].mq_ndx_to_fw +
4998 qlt->mq_resp[qi].mq_ndx_from_fw;
4999
5000 if (qi) {
5001
5002 (void) ddi_dma_sync(
5003 qlt->mq_resp[qi].queue_mem_mq_dma_handle,
5004 qlt->mq_resp[qi].mq_ndx_to_fw << 6,
5005 (RESPONSE_QUEUE_MQ_ENTRIES -
5006 qlt->mq_resp[qi].mq_ndx_to_fw) << 6,
5007 DDI_DMA_SYNC_FORCPU);
5008 (void) ddi_dma_sync(
5009 qlt->mq_resp[qi].queue_mem_mq_dma_handle, 0,
5010 qlt->mq_resp[qi].mq_ndx_from_fw << 6,
5011 DDI_DMA_SYNC_FORCPU);
5012 } else {
5013 (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
5014 RESPONSE_QUEUE_OFFSET +
5015 (qlt->mq_resp[qi].mq_ndx_to_fw << 6),
5016 (RESPONSE_QUEUE_ENTRIES -
5017 qlt->mq_resp[qi].mq_ndx_to_fw) << 6,
5018 DDI_DMA_SYNC_FORCPU);
5019 (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
5020 RESPONSE_QUEUE_OFFSET,
5021 qlt->mq_resp[qi].mq_ndx_from_fw << 6,
5022 DDI_DMA_SYNC_FORCPU);
5023 }
5024 }
5025
5026 return (total_ent);
5027 }
5028
5029 void
5030 qlt_handle_resp_queue_update(qlt_state_t *qlt, uint16_t qi)
5031 {
5032 uint32_t total_ent;
5033 uint8_t c;
5034
5035 if (qlt->mq_resp[qi].mq_ndx_to_fw == qlt->mq_resp[qi].mq_ndx_from_fw)
5036 return;
5037
5038 total_ent = qlt_sync_resp_queue(qlt, qi);
5039
5040 do {
5041 uint32_t qe = qlt->mq_resp[qi].mq_ndx_to_fw;
5042 caddr_t resp = &qlt->mq_resp[qi].mq_ptr[qe << 6];
5043
5044 uint32_t ent_cnt;
5045
5046 ent_cnt = (uint32_t)(resp[0] == 0x51 ? resp[1] : 1);
5047 if (ent_cnt > total_ent) {
5048 break;
5049 }
5050 switch ((uint8_t)(resp[0])) {
5051 case 0x12: /* CTIO completion */
5052 qlt_handle_ctio_completion(qlt, (uint8_t *)resp, qi);
5053 break;
5054 case 0x0e: /* NACK */
5055 /* Do Nothing */
5056 break;
5057 case 0x1b: /* Verify FW */
5058 qlt_handle_verify_fw_completion(qlt, (uint8_t *)resp);
5059 break;
5060 case 0x29: /* CT PassThrough */
5061 qlt_handle_ct_completion(qlt, (uint8_t *)resp);
5062 break;
5063 case 0x32: /* Report ID */
5064 EL(qlt, "report Id received [type %xh]\n", resp[0]);
5065 break;
5066 case 0x33: /* Abort IO IOCB completion */
5067 qlt_handle_sol_abort_completion(qlt, (uint8_t *)resp);
5068 break;
5069 case 0x51: /* PUREX */
5070 qlt_handle_purex(qlt, (uint8_t *)resp);
5071 break;
5072 case 0x52:
5073 qlt_handle_dereg_completion(qlt, (uint8_t *)resp);
5074 break;
5075 case 0x53: /* ELS passthrough */
5076 c = (uint8_t)(((uint8_t)resp[0x1f]) >> 5);
5077 if (c == 0) {
5078 qlt_handle_sol_els_completion(qlt,
5079 (uint8_t *)resp);
5080 } else if (c == 3) {
5081 qlt_handle_unsol_els_abort_completion(qlt,
5082 (uint8_t *)resp);
5083 } else {
5084 qlt_handle_unsol_els_completion(qlt,
5085 (uint8_t *)resp);
5086 }
5087 break;
5088 case 0x54: /* ABTS received */
5089 qlt_handle_rcvd_abts(qlt, (uint8_t *)resp, qi);
5090 break;
5091 case 0x55: /* ABTS completion */
5092 qlt_handle_abts_completion(qlt, (uint8_t *)resp, qi);
5093 break;
5094 default:
5095 EL(qlt, "response entry=%xh\n", resp[0]);
5096 break;
5097 }
5098 if (qi != 0) {
5099 qlt->mq_resp[qi].mq_ndx_to_fw =
5100 (qlt->mq_resp[qi].mq_ndx_to_fw + ent_cnt) &
5101 (RESPONSE_QUEUE_MQ_ENTRIES - 1);
5102 } else {
5103 qlt->mq_resp[qi].mq_ndx_to_fw =
5104 (qlt->mq_resp[qi].mq_ndx_to_fw + ent_cnt) &
5105 (RESPONSE_QUEUE_ENTRIES - 1);
5106 }
5107 total_ent -= ent_cnt;
5108 } while (total_ent > 0);
5109 if (qlt->qlt_mq_enabled) {
5110 MQBAR_WR32(qlt, (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_OUT,
5111 qlt->mq_resp[qi].mq_ndx_to_fw);
5112 } else {
5113 REG_WR32(qlt, REG_RESP_OUT_PTR, qlt->mq_resp[qi].mq_ndx_to_fw);
5114 }
5115 }
5116
5117 fct_status_t
5118 qlt_portid_to_handle(qlt_state_t *qlt, uint32_t id, uint16_t cmd_handle,
5119 uint16_t *ret_handle)
5120 {
5121 fct_status_t ret;
5122 mbox_cmd_t *mcp;
5123 uint16_t n;
5124 uint16_t h;
5125 uint32_t ent_id;
5126 uint8_t *p;
5127 int found = 0;
5128
5129 mcp = qlt_alloc_mailbox_command(qlt, 2048 * 8);
5130 if (mcp == NULL) {
5131 return (STMF_ALLOC_FAILURE);
5132 }
5133 mcp->to_fw[0] = MBC_GET_ID_LIST;
5134 mcp->to_fw[8] = 2048 * 8;
5135 mcp->to_fw[9] = 0;
5136 mcp->to_fw_mask |= BIT_9 | BIT_8;
5137 mcp->from_fw_mask |= BIT_1 | BIT_2;
5138
5139 ret = qlt_mailbox_command(qlt, mcp);
5140 if (ret != QLT_SUCCESS) {
5141 EL(qlt, "qlt_mbox_command=7Ch status=%llxh\n", ret);
5142 cmn_err(CE_WARN, "qlt(%d) GET ID list failed, ret = %llx, "
5143 "mb0=%x, mb1=%x, mb2=%x", qlt->instance, (long long)ret,
5144 mcp->from_fw[0], mcp->from_fw[1], mcp->from_fw[2]);
5145 qlt_free_mailbox_command(qlt, mcp);
5146 return (ret);
5147 }
5148
5149 EL(qlt, "mbx cmd=7Ch, GET_ID_LIST id=%x fw[1]=%x\n",
5150 id, mcp->from_fw[1]);
5151
5152 qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
5153 p = mcp->dbuf->db_sglist[0].seg_addr;
5154 for (n = 0; n < mcp->from_fw[1]; n++) {
5155 ent_id = LE_32(*((uint32_t *)p)) & 0xFFFFFF;
5156 h = (uint16_t)((uint16_t)p[4] | (((uint16_t)p[5]) << 8));
5157 if (ent_id == id) {
5158 found = 1;
5159 *ret_handle = h;
5160 if ((cmd_handle != FCT_HANDLE_NONE) &&
5161 (cmd_handle != h)) {
5162 cmn_err(CE_WARN, "qlt(%d) login for portid %x "
5163 "came in with handle %x, while the portid "
5164 "was already using a different handle %x",
5165 qlt->instance, id, cmd_handle, h);
5166 qlt_free_mailbox_command(qlt, mcp);
5167 return (QLT_FAILURE);
5168 }
5169 break;
5170 }
5171 if ((cmd_handle != FCT_HANDLE_NONE) && (h == cmd_handle)) {
5172 cmn_err(CE_WARN, "qlt(%d) login for portid %x came in "
5173 "with handle %x, while the handle was already in "
5174 "use for portid %x",
5175 qlt->instance, id, cmd_handle, ent_id);
5176 qlt_free_mailbox_command(qlt, mcp);
5177 return (QLT_FAILURE);
5178 }
5179 p += 8;
5180 }
5181 if (!found) {
5182 *ret_handle = cmd_handle;
5183 }
5184 qlt_free_mailbox_command(qlt, mcp);
5185 return (FCT_SUCCESS);
5186 }
5187
5188 /* ARGSUSED */
5189 fct_status_t
5190 qlt_fill_plogi_req(fct_local_port_t *port, fct_remote_port_t *rp,
5191 fct_cmd_t *login)
5192 {
5193 uint8_t *p;
5194
5195 p = ((fct_els_t *)login->cmd_specific)->els_req_payload;
5236 default:
5237 ret = qlt_portid_to_handle(qlt, rp->rp_id,
5238 login->cmd_rp_handle, &h);
5239 if (ret != FCT_SUCCESS) {
5240 EL(qlt, "qlt_portid_to_handle, status=%llxh\n", ret);
5241 return (ret);
5242 }
5243 }
5244
5245 if (login->cmd_type == FCT_CMD_SOL_ELS) {
5246 ret = qlt_fill_plogi_req(port, rp, login);
5247 } else {
5248 ret = qlt_fill_plogi_resp(port, rp, login);
5249 }
5250
5251 if (ret != FCT_SUCCESS) {
5252 EL(qlt, "qlt_fill_plogi, status=%llxh\n", ret);
5253 return (ret);
5254 }
5255
5256 EL(qlt, "rport id=%xh cmd_type=%xh handle=%xh(%xh)\n",
5257 rp->rp_id, login->cmd_type, h, rp->rp_handle);
5258
5259 if (h == FCT_HANDLE_NONE)
5260 return (FCT_SUCCESS);
5261
5262 if (rp->rp_handle == FCT_HANDLE_NONE) {
5263 rp->rp_handle = h;
5264 return (FCT_SUCCESS);
5265 }
5266
5267 if (rp->rp_handle == h)
5268 return (FCT_SUCCESS);
5269
5270 EL(qlt, "failed, rp_handle=%xh != h=%xh\n", rp->rp_handle, h);
5271 return (FCT_FAILURE);
5272 }
5273
5274 /* invoked in single thread */
5275 fct_status_t
5276 qlt_deregister_remote_port(fct_local_port_t *port, fct_remote_port_t *rp)
5277 {
5278 uint8_t *req;
5279 qlt_state_t *qlt;
5280 clock_t dereg_req_timer;
5281 fct_status_t ret;
5282
5283 qlt = (qlt_state_t *)port->port_fca_private;
5284
5285 if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
5286 (qlt->qlt_state == FCT_STATE_OFFLINING))
5287 return (FCT_SUCCESS);
5288 ASSERT(qlt->rp_id_in_dereg == 0);
5289
5290 if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
5291 mutex_enter(&qlt->mq_req[0].mq_lock);
5292 req = (uint8_t *)qlt_get_req_entries(qlt, 1, 0);
5293 if (req == NULL) {
5294 EL(qlt, "req = NULL\n");
5295 mutex_exit(&qlt->mq_req[0].mq_lock);
5296 return (FCT_BUSY);
5297 }
5298 } else {
5299 mutex_enter(&qlt->preq_lock);
5300 req = (uint8_t *)qlt_get_preq_entries(qlt, 1);
5301 if (req == NULL) {
5302 EL(qlt, "req = NULL\n");
5303 mutex_exit(&qlt->preq_lock);
5304 return (FCT_BUSY);
5305 }
5306 }
5307 bzero(req, IOCB_SIZE);
5308 req[0] = 0x52; req[1] = 1;
5309 /* QMEM_WR32(qlt, (&req[4]), 0xffffffff); */
5310 QMEM_WR16(qlt, (&req[0xA]), rp->rp_handle);
5311 QMEM_WR16(qlt, (&req[0xC]), 0x98); /* implicit logo */
5312 QMEM_WR32(qlt, (&req[0x10]), rp->rp_id);
5313 qlt->rp_id_in_dereg = rp->rp_id;
5314 if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
5315 qlt_submit_req_entries(qlt, 1, 0);
5316 } else {
5317 qlt_submit_preq_entries(qlt, 1);
5318 }
5319
5320 dereg_req_timer = ddi_get_lbolt() + drv_usectohz(DEREG_RP_TIMEOUT);
5321 if (cv_timedwait(&qlt->rp_dereg_cv,
5322 (((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) ?
5323 &qlt->mq_req[0].mq_lock : &qlt->preq_lock),
5324 dereg_req_timer) > 0) {
5325 ret = qlt->rp_dereg_status;
5326 } else {
5327 ret = FCT_BUSY;
5328 }
5329 qlt->rp_dereg_status = 0;
5330 qlt->rp_id_in_dereg = 0;
5331 if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
5332 mutex_exit(&qlt->mq_req[0].mq_lock);
5333 } else {
5334 mutex_exit(&qlt->preq_lock);
5335 }
5336
5337 EL(qlt, "Dereg remote port(%Xh), ret=%llxh\n",
5338 rp->rp_id, ret);
5339
5340 return (ret);
5341 }
5342
5343 /*
5344 * Pass received ELS up to framework.
5345 */
5346 static void
5347 qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp)
5348 {
5349 fct_cmd_t *cmd;
5350 fct_els_t *els;
5351 qlt_cmd_t *qcmd;
5352 uint32_t payload_size;
5353 uint32_t remote_portid;
5354 uint8_t *pldptr, *bndrptr;
5355 int i, off;
5356 uint16_t iocb_flags;
5357 char info[160];
5358
5359 remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
5360 ((uint32_t)(resp[0x1A])) << 16;
5361 iocb_flags = QMEM_RD16(qlt, (&resp[8]));
5362 if (iocb_flags & BIT_15) {
5363 payload_size = (QMEM_RD16(qlt, (&resp[0x0e])) & 0xfff) - 24;
5364 } else {
5365 payload_size = QMEM_RD16(qlt, (&resp[0x0c])) - 24;
5366 }
5367
5368 if (payload_size > ((uint32_t)resp[1] * IOCB_SIZE - 0x2C)) {
5369 EL(qlt, "payload is too large = %xh\n", payload_size);
5370 cmn_err(CE_WARN, "handle_purex: payload is too large");
5371 goto cmd_null;
5372 }
5373
5374 cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ELS,
5375 (int)(payload_size + GET_STRUCT_SIZE(qlt_cmd_t)), 0);
5376 if (cmd == NULL) {
5377 EL(qlt, "fct_alloc cmd==NULL\n");
5378 cmd_null:;
5379 (void) snprintf(info, 160, "qlt_handle_purex: qlt-%p, can't "
5380 "allocate space for fct_cmd", (void *)qlt);
5381 info[159] = 0;
5382 (void) fct_port_shutdown(qlt->qlt_port,
5383 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
5384 return;
5385 }
5386
5387 cmd->cmd_port = qlt->qlt_port;
5388 cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xa);
5389 if (cmd->cmd_rp_handle == 0xFFFF) {
5390 cmd->cmd_rp_handle = FCT_HANDLE_NONE;
5391 }
5392
5393 els = (fct_els_t *)cmd->cmd_specific;
5394 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
5395 els->els_req_size = (uint16_t)payload_size;
5396 els->els_req_payload = GET_BYTE_OFFSET(qcmd,
5397 GET_STRUCT_SIZE(qlt_cmd_t));
5398 qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&resp[0x10]));
5399 cmd->cmd_rportid = remote_portid;
5400 cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
5401 ((uint32_t)(resp[0x16])) << 16;
5402 cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
5403 cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
5404 pldptr = &resp[0x2C];
5405 bndrptr = (uint8_t *)(qlt->mq_resp[0].mq_ptr
5406 + (RESPONSE_QUEUE_ENTRIES << 6));
5407 for (i = 0, off = 0x2c; i < payload_size; i += 4) {
5408 /* Take care of fw's swapping of payload */
5409 els->els_req_payload[i] = pldptr[3];
5410 els->els_req_payload[i+1] = pldptr[2];
5411 els->els_req_payload[i+2] = pldptr[1];
5412 els->els_req_payload[i+3] = pldptr[0];
5413 pldptr += 4;
5414 if (pldptr == bndrptr)
5415 pldptr = (uint8_t *)qlt->mq_resp[0].mq_ptr;
5416 off += 4;
5417 if (off >= IOCB_SIZE) {
5418 off = 4;
5419 pldptr += 4;
5420 }
5421 }
5422
5423 EL(qlt, "remote portid = %xh logi/o(%xh) to us revd rex1=%xh\n",
5424 remote_portid, els->els_req_payload[0], qcmd->fw_xchg_addr);
5425
5426 fct_post_rcvd_cmd(cmd, 0);
5427 }
5428
5429 fct_status_t
5430 qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags)
5431 {
5432 qlt_state_t *qlt;
5433 char info[160];
5434
5435 qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
5436
5437 if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
5438 if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
5439 EL(qlt, "ioflags = %xh\n", ioflags);
5440 goto fatal_panic;
5441 } else {
5442 return (qlt_send_status(qlt, cmd));
5443 }
5444 }
5445
5446 if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
5447 if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
5448 goto fatal_panic;
5449 } else {
5450 return (qlt_send_els_response(qlt, cmd));
5451 }
5452 }
5453
5454 if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
5455 cmd->cmd_handle = 0;
5456 }
5457
5458 if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
5459 return (qlt_send_abts_response(qlt, cmd, 0));
5460 } else {
5461 EL(qlt, "cmd->cmd_type=%xh\n", cmd->cmd_type);
5462 ASSERT(0);
5463 return (FCT_FAILURE);
5464 }
5465
5466 fatal_panic:;
5467 (void) snprintf(info, 160, "qlt_send_cmd_response: can not handle "
5468 "FCT_IOF_FORCE_FCA_DONE for cmd %p, ioflags-%x", (void *)cmd,
5469 ioflags);
5470 info[159] = 0;
5471 (void) fct_port_shutdown(qlt->qlt_port,
5472 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
5473 return (FCT_FAILURE);
5474 }
5475
5476 /* ARGSUSED */
5477 fct_status_t
5478 qlt_xfer_scsi_data(fct_cmd_t *cmd, stmf_data_buf_t *dbuf, uint32_t ioflags)
5479 {
5480 qlt_dmem_bctl_t *bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private;
5481 qlt_state_t *qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
5482 qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
5483 uint8_t *req, rcnt;
5484 uint16_t flags;
5485 uint16_t cookie_count;
5486 uint32_t ent_cnt;
5487 uint16_t qi;
5488
5489 qi = qcmd->qid;
5490
5491 if (dbuf->db_handle == 0)
5492 qcmd->dbuf = dbuf;
5493 flags = (uint16_t)(((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5);
5494 if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
5495 flags = (uint16_t)(flags | 2);
5496 qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORDEV);
5497 } else {
5498 flags = (uint16_t)(flags | 1);
5499 }
5500
5501 if (dbuf->db_flags & DB_SEND_STATUS_GOOD)
5502 flags = (uint16_t)(flags | BIT_15);
5503
5504 if (dbuf->db_flags & DB_LU_DATA_BUF) {
5505 /*
5506 * Data bufs from LU are in scatter/gather list format.
5507 */
5508 cookie_count = qlt_get_cookie_count(dbuf);
5509 rcnt = qlt_get_iocb_count(cookie_count);
5510 } else {
5511 cookie_count = 1;
5512 rcnt = 1;
5513 }
5514 mutex_enter(&qlt->mq_req[qi].mq_lock);
5515 req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
5516 if (req == NULL) {
5517 mutex_exit(&qlt->mq_req[qi].mq_lock);
5518 return (FCT_BUSY);
5519 }
5520 bzero(req, IOCB_SIZE);
5521 req[0] = 0x12;
5522 req[1] = rcnt;
5523 req[2] = dbuf->db_handle;
5524 QMEM_WR32_REQ(qlt, qi, req+4, cmd->cmd_handle);
5525 QMEM_WR16_REQ(qlt, qi, req+8, cmd->cmd_rp->rp_handle);
5526 QMEM_WR16_REQ(qlt, qi, req+10, 60); /* 60 seconds timeout */
5527 QMEM_WR16_REQ(qlt, qi, req+12, cookie_count);
5528 QMEM_WR32_REQ(qlt, qi, req+0x10, cmd->cmd_rportid);
5529 QMEM_WR32_REQ(qlt, qi, req+0x14, qcmd->fw_xchg_addr);
5530 QMEM_WR16_REQ(qlt, qi, req+0x1A, flags);
5531 QMEM_WR16_REQ(qlt, qi, req+0x20, cmd->cmd_oxid);
5532 QMEM_WR32_REQ(qlt, qi, req+0x24, dbuf->db_relative_offset);
5533 QMEM_WR32_REQ(qlt, qi, req+0x2C, dbuf->db_data_size);
5534 if (dbuf->db_flags & DB_LU_DATA_BUF) {
5535 uint8_t *qptr; /* qlt continuation segs */
5536 uint16_t cookie_resid;
5537 uint16_t cont_segs;
5538 ddi_dma_cookie_t cookie, *ckp;
5539
5540 /*
5541 * See if the dma cookies are in simple array format.
5542 */
5543 ckp = qlt_get_cookie_array(dbuf);
5544
5545 /*
5546 * Program the first segment into main record.
5547 */
5548 if (ckp) {
5549 ASSERT(ckp->dmac_size);
5550 QMEM_WR64_REQ(qlt, qi, req+0x34, ckp->dmac_laddress);
5551 QMEM_WR32_REQ(qlt, qi, req+0x3c, ckp->dmac_size);
5552 } else {
5553 qlt_ddi_dma_nextcookie(dbuf, &cookie);
5554 ASSERT(cookie.dmac_size);
5555 QMEM_WR64_REQ(qlt, qi, req+0x34, cookie.dmac_laddress);
5556 QMEM_WR32_REQ(qlt, qi, req+0x3c, cookie.dmac_size);
5557 }
5558 cookie_resid = cookie_count-1;
5559
5560 ent_cnt = (qi == 0) ? REQUEST_QUEUE_ENTRIES :
5561 REQUEST_QUEUE_MQ_ENTRIES;
5562 /*
5563 * Program remaining segments into continuation records.
5564 */
5565 while (cookie_resid) {
5566 req += IOCB_SIZE;
5567 if (req >= (uint8_t *)(qlt->mq_req[qi].mq_ptr +
5568 (ent_cnt * IOCB_SIZE))) {
5569 req = (uint8_t *)(qlt->mq_req[qi].mq_ptr);
5570 }
5571
5572 req[0] = 0x0a;
5573 req[1] = 1;
5574 req[2] = req[3] = 0; /* tidy */
5575 qptr = &req[4];
5576 for (cont_segs = CONT_A64_DATA_SEGMENTS;
5577 cont_segs && cookie_resid; cont_segs--) {
5578
5579 if (ckp) {
5580 ++ckp; /* next cookie */
5581 ASSERT(ckp->dmac_size != 0);
5582 QMEM_WR64_REQ(qlt, qi, qptr,
5583 ckp->dmac_laddress);
5584 qptr += 8; /* skip over laddress */
5585 QMEM_WR32_REQ(qlt, qi, qptr,
5586 ckp->dmac_size);
5587 qptr += 4; /* skip over size */
5588 } else {
5589 qlt_ddi_dma_nextcookie(dbuf, &cookie);
5590 ASSERT(cookie.dmac_size != 0);
5591 QMEM_WR64_REQ(qlt, qi, qptr,
5592 cookie.dmac_laddress);
5593 qptr += 8; /* skip over laddress */
5594 QMEM_WR32_REQ(qlt, qi, qptr,
5595 cookie.dmac_size);
5596 qptr += 4; /* skip over size */
5597 }
5598 cookie_resid--;
5599 }
5600 /*
5601 * zero unused remainder of IOCB
5602 */
5603 if (cont_segs) {
5604 size_t resid;
5605 resid = (size_t)((uintptr_t)(req+IOCB_SIZE) -
5606 (uintptr_t)qptr);
5607 ASSERT(resid < IOCB_SIZE);
5608 bzero(qptr, resid);
5609 }
5610 }
5611 } else {
5612 /* Single, contiguous buffer */
5613 QMEM_WR64_REQ(qlt, qi, req+0x34, bctl->bctl_dev_addr);
5614 QMEM_WR32_REQ(qlt, qi, req+0x34+8, dbuf->db_data_size);
5615 }
5616
5617 qlt_submit_req_entries(qlt, rcnt, qi);
5618 mutex_exit(&qlt->mq_req[qi].mq_lock);
5619
5620 return (STMF_SUCCESS);
5621 }
5622
5623 /*
5624 * We must construct proper FCP_RSP_IU now. Here we only focus on
5625 * the handling of FCP_SNS_INFO. If there's protocol failures (FCP_RSP_INFO),
5626 * we could have caught them before we enter here.
5627 */
5628 fct_status_t
5629 qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd)
5630 {
5631 qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
5632 scsi_task_t *task = (scsi_task_t *)cmd->cmd_specific;
5633 qlt_dmem_bctl_t *bctl;
5634 uint32_t size;
5635 uint8_t *req, *fcp_rsp_iu;
5636 uint8_t *psd, sensbuf[24]; /* sense data */
5637 uint16_t flags;
5638 uint16_t scsi_status;
5639 int use_mode2;
5640 int ndx;
5641 uint16_t qi;
5642
5643 qi = qcmd->qid;
5644
5645 /*
5646 * Enter fast channel for non check condition
5647 */
5648 if (task->task_scsi_status != STATUS_CHECK) {
5649 /*
5650 * We will use mode1
5651 */
5652 flags = (uint16_t)(BIT_6 | BIT_15 |
5653 (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
5654 scsi_status = (uint16_t)task->task_scsi_status;
5655 if (task->task_status_ctrl == TASK_SCTRL_OVER) {
5656 scsi_status = (uint16_t)(scsi_status | FCP_RESID_OVER);
5657 } else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
5658 scsi_status = (uint16_t)(scsi_status | FCP_RESID_UNDER);
5659 }
5660 qcmd->dbuf_rsp_iu = NULL;
5661
5662 /*
5663 * Fillout CTIO type 7 IOCB
5664 */
5665 mutex_enter(&qlt->mq_req[qi].mq_lock);
5666 req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
5667 if (req == NULL) {
5668 mutex_exit(&qlt->mq_req[qi].mq_lock);
5669 return (FCT_BUSY);
5670 }
5671
5672 /*
5673 * Common fields
5674 */
5675 bzero(req, IOCB_SIZE);
5676 req[0x00] = 0x12;
5677 req[0x01] = 0x1;
5678 req[0x02] = BIT_7; /* indicate if it's a pure status req */
5679 QMEM_WR32_REQ(qlt, qi, req + 0x04, cmd->cmd_handle);
5680 QMEM_WR16_REQ(qlt, qi, req + 0x08, cmd->cmd_rp->rp_handle);
5681 QMEM_WR32_REQ(qlt, qi, req + 0x10, cmd->cmd_rportid);
5682 QMEM_WR32_REQ(qlt, qi, req + 0x14, qcmd->fw_xchg_addr);
5683
5684 /* handle TMF completion - !!! Important FIX */
5685 if (task->task_mgmt_function) {
5686 scsi_status =
5687 (uint16_t)(scsi_status | FCP_RESP_LEN_VALID);
5688
5689 /* no sense length, 4 bytes of resp info */
5690 QMEM_WR16_REQ(qlt, qi, req + 0x24, 4);
5691 }
5692
5693 /*
5694 * Mode-specific fields
5695 */
5696 QMEM_WR16_REQ(qlt, qi, req + 0x1A, flags);
5697 QMEM_WR32_REQ(qlt, qi, req + 0x1C, task->task_resid);
5698 QMEM_WR16_REQ(qlt, qi, req + 0x20, cmd->cmd_oxid);
5699 QMEM_WR16_REQ(qlt, qi, req + 0x22, scsi_status);
5700
5701 /*
5702 * Trigger FW to send SCSI status out
5703 */
5704 qlt_submit_req_entries(qlt, 1, qi);
5705 mutex_exit(&qlt->mq_req[qi].mq_lock);
5706 return (STMF_SUCCESS);
5707 }
5708
5709 ASSERT(task->task_scsi_status == STATUS_CHECK);
5710 /*
5711 * Decide the SCSI status mode, that should be used
5712 */
5713 use_mode2 = (task->task_sense_length > 24);
5714
5715 /*
5716 * Prepare required information per the SCSI status mode
5717 */
5718 flags = (uint16_t)(BIT_15 |
5719 (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
5720 if (use_mode2) {
5721 flags = (uint16_t)(flags | BIT_7);
5722
5723 size = task->task_sense_length;
5724 qcmd->dbuf_rsp_iu = qlt_i_dmem_alloc(qlt,
5725 task->task_sense_length, &size, 0);
5782 qlt_dmem_dma_sync(qcmd->dbuf_rsp_iu, DDI_DMA_SYNC_FORDEV);
5783 } else {
5784 flags = (uint16_t)(flags | BIT_6);
5785
5786 scsi_status = (uint16_t)task->task_scsi_status;
5787 if (task->task_status_ctrl == TASK_SCTRL_OVER) {
5788 scsi_status = (uint16_t)(scsi_status | BIT_10);
5789 } else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
5790 scsi_status = (uint16_t)(scsi_status | BIT_11);
5791 }
5792 if (task->task_sense_length) {
5793 scsi_status = (uint16_t)(scsi_status | BIT_9);
5794 }
5795 bcopy(task->task_sense_data, sensbuf, task->task_sense_length);
5796 qcmd->dbuf_rsp_iu = NULL;
5797 }
5798
5799 /*
5800 * Fillout CTIO type 7 IOCB
5801 */
5802 mutex_enter(&qlt->mq_req[qi].mq_lock);
5803 req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
5804 if (req == NULL) {
5805 mutex_exit(&qlt->mq_req[qi].mq_lock);
5806 if (use_mode2) {
5807 qlt_dmem_free(cmd->cmd_port->port_fds,
5808 qcmd->dbuf_rsp_iu);
5809 qcmd->dbuf_rsp_iu = NULL;
5810 }
5811 return (FCT_BUSY);
5812 }
5813
5814 /*
5815 * Common fields
5816 */
5817 bzero(req, IOCB_SIZE);
5818 req[0x00] = 0x12;
5819 req[0x01] = 0x1;
5820 req[0x02] = BIT_7; /* to indicate if it's a pure status req */
5821 QMEM_WR32_REQ(qlt, qi, req + 0x04, cmd->cmd_handle);
5822 QMEM_WR16_REQ(qlt, qi, req + 0x08, cmd->cmd_rp->rp_handle);
5823 QMEM_WR16_REQ(qlt, qi, req + 0x0A, 0); /* not timed by FW */
5824 if (use_mode2) {
5825 /* FCP RSP IU data field */
5826 QMEM_WR16_REQ(qlt, qi, req+0x0C, 1);
5827 }
5828 QMEM_WR32_REQ(qlt, qi, req + 0x10, cmd->cmd_rportid);
5829 QMEM_WR32_REQ(qlt, qi, req + 0x14, qcmd->fw_xchg_addr);
5830
5831 /*
5832 * Mode-specific fields
5833 */
5834 if (!use_mode2) {
5835 QMEM_WR16_REQ(qlt, qi, req + 0x18, task->task_sense_length);
5836 }
5837 QMEM_WR16_REQ(qlt, qi, req + 0x1A, flags);
5838 QMEM_WR32_REQ(qlt, qi, req + 0x1C, task->task_resid);
5839 QMEM_WR16_REQ(qlt, qi, req + 0x20, cmd->cmd_oxid);
5840 if (use_mode2) {
5841 bctl = (qlt_dmem_bctl_t *)qcmd->dbuf_rsp_iu->db_port_private;
5842 QMEM_WR32_REQ(qlt, qi, req + 0x2C,
5843 24 + task->task_sense_length);
5844 QMEM_WR64_REQ(qlt, qi, req + 0x34, bctl->bctl_dev_addr);
5845 QMEM_WR32_REQ(qlt, qi, req + 0x3C,
5846 24 + task->task_sense_length);
5847 } else {
5848 QMEM_WR16_REQ(qlt, qi, req + 0x22, scsi_status);
5849 psd = req+0x28;
5850
5851 /*
5852 * Data in sense buf is always big-endian, data in IOCB
5853 * should always be little-endian, so we must do swapping.
5854 */
5855 size = ((task->task_sense_length + 3) & (~3));
5856 for (ndx = 0; ndx < size; ndx += 4) {
5857 psd[ndx + 0] = sensbuf[ndx + 3];
5858 psd[ndx + 1] = sensbuf[ndx + 2];
5859 psd[ndx + 2] = sensbuf[ndx + 1];
5860 psd[ndx + 3] = sensbuf[ndx + 0];
5861 }
5862 }
5863
5864 /*
5865 * Trigger FW to send SCSI status out
5866 */
5867 qlt_submit_req_entries(qlt, 1, qi);
5868 mutex_exit(&qlt->mq_req[qi].mq_lock);
5869
5870 return (STMF_SUCCESS);
5871 }
5872
5873 fct_status_t
5874 qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd)
5875 {
5876 qlt_cmd_t *qcmd;
5877 fct_els_t *els = (fct_els_t *)cmd->cmd_specific;
5878 uint8_t *req, *addr;
5879 qlt_dmem_bctl_t *bctl;
5880 uint32_t minsize;
5881 uint8_t elsop, req1f;
5882 uint16_t qi = 0;
5883
5884 addr = els->els_resp_payload;
5885 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
5886
5887 minsize = els->els_resp_size;
5888 qcmd->dbuf = qlt_i_dmem_alloc(qlt, els->els_resp_size, &minsize, 0);
5889 if (qcmd->dbuf == NULL)
5890 return (FCT_BUSY);
5891
5892 bctl = (qlt_dmem_bctl_t *)qcmd->dbuf->db_port_private;
5893
5894 bcopy(addr, qcmd->dbuf->db_sglist[0].seg_addr, els->els_resp_size);
5895 qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORDEV);
5896
5897 if (addr[0] == 0x02) { /* ACC */
5898 req1f = BIT_5;
5899 } else {
5900 req1f = BIT_6;
5901 }
5902 elsop = els->els_req_payload[0];
5903 if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
5904 (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
5905 req1f = (uint8_t)(req1f | BIT_4);
5906 }
5907
5908 mutex_enter(&qlt->mq_req[qi].mq_lock);
5909 req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
5910 if (req == NULL) {
5911 EL(qlt, "req = NULL, %xh %xh %p %xh\n", cmd->cmd_oxid,
5912 cmd->cmd_rportid, cmd, qcmd->fw_xchg_addr);
5913 mutex_exit(&qlt->mq_req[qi].mq_lock);
5914 qlt_dmem_free(NULL, qcmd->dbuf);
5915 qcmd->dbuf = NULL;
5916 return (FCT_BUSY);
5917 }
5918 bzero(req, IOCB_SIZE);
5919 req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
5920 req[0x16] = elsop; req[0x1f] = req1f;
5921 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
5922 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
5923 QMEM_WR16(qlt, (&req[0xC]), 1);
5924 QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
5925 QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
5926 if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
5927 req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
5928 req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
5929 req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
5930 }
5931 QMEM_WR32(qlt, (&req[0x24]), els->els_resp_size);
5932 QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
5933 QMEM_WR32(qlt, (&req[0x30]), els->els_resp_size);
5934
5935 EL(qlt, "elsop=%xh req1f=%xh IOCB_TYPE_ELSPASS: rex1=%xh\n",
5936 elsop, req1f, qcmd->fw_xchg_addr);
5937
5938 qlt_submit_req_entries(qlt, 1, qi);
5939 mutex_exit(&qlt->mq_req[qi].mq_lock);
5940
5941 return (FCT_SUCCESS);
5942 }
5943
5944 fct_status_t
5945 qlt_send_abts_response(qlt_state_t *qlt, fct_cmd_t *cmd, int terminate)
5946 {
5947 qlt_abts_cmd_t *qcmd;
5948 fct_rcvd_abts_t *abts = (fct_rcvd_abts_t *)cmd->cmd_specific;
5949 uint8_t *req;
5950 uint32_t lportid;
5951 uint32_t fctl;
5952 int i;
5953 uint16_t qi;
5954 uint32_t rex1, rex2;
5955 uint8_t temp[64];
5956
5957 qi = 0;
5958
5959 qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
5960
5961 mutex_enter(&qlt->mq_req[qi].mq_lock);
5962 req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
5963 if (req == NULL) {
5964 bcopy(qcmd->buf, &temp, IOCB_SIZE);
5965 for (i = 0; i < 12; i += 4) {
5966 /* Take care of firmware's LE requirement */
5967 temp[0x2C+i] = abts->abts_resp_payload[i+3];
5968 temp[0x2C+i+1] = abts->abts_resp_payload[i+2];
5969 temp[0x2C+i+2] = abts->abts_resp_payload[i+1];
5970 temp[0x2C+i+3] = abts->abts_resp_payload[i];
5971 }
5972 rex1 = QMEM_RD32(qlt, &temp[0x10]);
5973 rex2 = QMEM_RD32(qlt, &temp[0x3C]);
5974
5975 EL(qlt, "req = NULL, %xh %xh %p %xh %xh\n", cmd->cmd_oxid,
5976 cmd->cmd_rportid, cmd, rex1, rex2);
5977
5978 mutex_exit(&qlt->mq_req[qi].mq_lock);
5979 return (FCT_BUSY);
5980 }
5981 bcopy(qcmd->buf, req, IOCB_SIZE);
5982 lportid = QMEM_RD32(qlt, req+0x14) & 0xFFFFFF;
5983 fctl = QMEM_RD32(qlt, req+0x1C);
5984 fctl = ((fctl ^ BIT_23) & ~BIT_22) | (BIT_19 | BIT_16);
5985 req[0] = 0x55; req[1] = 1; req[2] = (uint8_t)terminate;
5986 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
5987 if (cmd->cmd_rp)
5988 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
5989 else
5990 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
5991 if (terminate) {
5992 QMEM_WR16(qlt, (&req[0xC]), 1);
5993 }
5994 QMEM_WR32(qlt, req+0x14, cmd->cmd_rportid);
5995 req[0x17] = abts->abts_resp_rctl;
5996 QMEM_WR32(qlt, req+0x18, lportid);
5997 QMEM_WR32(qlt, req+0x1C, fctl);
5998 req[0x23]++;
5999 for (i = 0; i < 12; i += 4) {
6000 /* Take care of firmware's LE requirement */
6001 req[0x2C+i] = abts->abts_resp_payload[i+3];
6002 req[0x2C+i+1] = abts->abts_resp_payload[i+2];
6003 req[0x2C+i+2] = abts->abts_resp_payload[i+1];
6004 req[0x2C+i+3] = abts->abts_resp_payload[i];
6005 }
6006
6007 rex1 = QMEM_RD32(qlt, &req[0x10]);
6008 rex2 = QMEM_RD32(qlt, &req[0x3C]);
6009
6010 EL(qlt, "%xh %xh %d %p %xh %xh\n",
6011 QMEM_RD16(qlt, req+0x26), QMEM_RD16(qlt, req+0x24),
6012 terminate, cmd, rex1, rex2);
6013
6014 qlt_submit_req_entries(qlt, 1, qi);
6015 mutex_exit(&qlt->mq_req[qi].mq_lock);
6016
6017 return (FCT_SUCCESS);
6018 }
6019
6020 static void
6021 qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot)
6022 {
6023 int i;
6024 uint32_t d;
6025 caddr_t req;
6026 uint16_t qi;
6027 uint8_t *entry = inot;
6028
6029 qi = 0;
6030
6031 /* Just put it on the request queue */
6032 mutex_enter(&qlt->mq_req[qi].mq_lock);
6033 req = qlt_get_req_entries(qlt, 1, qi);
6034 if (req == NULL) {
6035 mutex_exit(&qlt->mq_req[qi].mq_lock);
6036 stmf_trace(qlt->qlt_port_alias,
6037 "qlt_handle_inot: can't get a ReqQ entry");
6038 EL(qlt, "req = NULL\n");
6039 return;
6040 }
6041 for (i = 0; i < 16; i++) {
6042 d = QMEM_RD32(qlt, inot);
6043 inot += 4;
6044 QMEM_WR32(qlt, req, d);
6045 req += 4;
6046 }
6047 req -= 64;
6048 req[0] = 0x0e;
6049
6050 QMEM_WR32(qlt, entry+0x3c, 0xdeadbeef);
6051 EL(qlt, "Issue inot ack\n");
6052
6053 qlt_submit_req_entries(qlt, 1, qi);
6054 mutex_exit(&qlt->mq_req[qi].mq_lock);
6055 }
6056
6057 static uint16_t
6058 qlt_get_queue_id(qlt_state_t *qlt, int id)
6059 {
6060 uint16_t qid;
6061
6062 if ((!qlt->qlt_mq_enabled) || (qlt->qlt_queue_cnt == 1)) {
6063 return (0);
6064 }
6065
6066 mutex_enter(&qlt->qlock);
6067 if ((id == 0) && (qlt->last_qi == 0)) {
6068 qlt->last_qi++;
6069 }
6070 qid = qlt->last_qi;
6071 qlt->last_qi++;
6072
6073 if (qlt->last_qi >= qlt->qlt_queue_cnt) {
6074 qlt->last_qi -= qlt->qlt_queue_cnt;
6075 }
6076 mutex_exit(&qlt->qlock);
6077
6078 return (qid);
6079 }
6080
6081 static fct_status_t
6082 qlt_verify_atio_entry(qlt_state_t *qlt, uint8_t *atio)
6083 {
6084 uint32_t sig;
6085 int i;
6086 char info[160];
6087
6088
6089 sig = QMEM_RD32(qlt, atio+0x3c);
6090 for (i = 0; ((sig == 0xdeadbeef) &&
6091 (i < qlt_reprocess_attempt_cnt)); i++) {
6092 (void) ddi_dma_sync(
6093 qlt->queue_mem_dma_handle,
6094 ATIO_QUEUE_OFFSET + (qlt->atio_ndx_to_fw << 6),
6095 IOCB_SIZE, DDI_DMA_SYNC_FORCPU);
6096
6097 qlt->qlt_atio_reproc_cnt++;
6098 drv_usecwait(qlt_reprocess_delay);
6099 sig = QMEM_RD32(qlt, atio+0x3c);
6100 }
6101
6102 if (i) {
6103 if (i >= qlt_reprocess_attempt_cnt) {
6104 EL(qlt, "atio entry reprocess failed, %x\n",
6105 qlt->qlt_atio_reproc_cnt);
6106 cmn_err(CE_WARN, "qlt%d: atio entry reprocess"
6107 " failed %x\n",
6108 qlt->instance, qlt->qlt_atio_reproc_cnt);
6109 (void) snprintf(info, 160,
6110 "qlt_handle_ctio_completion: atio entry reprocess"
6111 " failed, %x rsp-%p",
6112 qlt->qlt_atio_reproc_cnt, (void *)atio);
6113 info[159] = 0;
6114 (void) fct_port_shutdown(qlt->qlt_port,
6115 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
6116 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
6117 return (QLT_FAILURE);
6118 } else {
6119 EL(qlt, "atio entry reprocess succeeded, %x %x\n",
6120 i, qlt->qlt_atio_reproc_cnt);
6121 }
6122 }
6123
6124 return (QLT_SUCCESS);
6125 }
6126
6127 uint8_t qlt_task_flags[] = { 1, 3, 2, 1, 4, 0, 1, 1 };
6128 static void
6129 qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio)
6130 {
6131 fct_cmd_t *cmd;
6132 scsi_task_t *task;
6133 qlt_cmd_t *qcmd;
6134 uint32_t rportid, fw_xchg_addr;
6135 uint8_t *p, *q, *req, tm;
6136 uint16_t cdb_size, flags, oxid;
6137 char info[160];
6138 uint16_t qi;
6139
6140 if (qlt_verify_atio_entry(qlt, atio) != QLT_SUCCESS)
6141 return;
6142
6143 /*
6144 * If either bidirection xfer is requested of there is extended
6145 * CDB, atio[0x20 + 11] will be greater than or equal to 3.
6146 */
6147 cdb_size = 16;
6148 if (atio[0x20 + 11] >= 3) {
6149 uint8_t b = atio[0x20 + 11];
6150 uint16_t b1;
6151 if ((b & 3) == 3) {
6152 EL(qlt, "bidirectional I/O not supported\n");
6153 cmn_err(CE_WARN, "qlt(%d) CMD with bidirectional I/O "
6154 "received, dropping the cmd as bidirectional "
6155 " transfers are not yet supported", qlt->instance);
6156 /* XXX abort the I/O */
6157 return;
6158 }
6159 cdb_size = (uint16_t)(cdb_size + (b & 0xfc));
6160 /*
6161 * Verify that we have enough entries. Without additional CDB
6162 * Everything will fit nicely within the same 64 bytes. So the
6175 return;
6176 }
6177 }
6178
6179 rportid = (((uint32_t)atio[8 + 5]) << 16) |
6180 (((uint32_t)atio[8 + 6]) << 8) | atio[8+7];
6181 fw_xchg_addr = QMEM_RD32(qlt, atio+4);
6182 oxid = (uint16_t)((((uint16_t)atio[8 + 16]) << 8) | atio[8+17]);
6183
6184 if (fw_xchg_addr == 0xFFFFFFFF) {
6185 EL(qlt, "fw_xchg_addr==0xFFFFFFFF\n");
6186 cmd = NULL;
6187 } else {
6188 cmd = fct_scsi_task_alloc(qlt->qlt_port, FCT_HANDLE_NONE,
6189 rportid, atio+0x20, cdb_size, STMF_TASK_EXT_NONE);
6190 if (cmd == NULL) {
6191 EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
6192 }
6193 }
6194 if (cmd == NULL) {
6195 qi = 0; /* just use request queue 0 */
6196
6197 EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
6198 /* Abort this IO */
6199 flags = (uint16_t)(BIT_14 | ((atio[3] & 0xF0) << 5));
6200
6201 mutex_enter(&qlt->mq_req[qi].mq_lock);
6202 req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
6203 if (req == NULL) {
6204 mutex_exit(&qlt->mq_req[0].mq_lock);
6205
6206 (void) snprintf(info, 160,
6207 "qlt_handle_atio: qlt-%p, can't "
6208 "allocate space for scsi_task", (void *)qlt);
6209 info[159] = 0;
6210 (void) fct_port_shutdown(qlt->qlt_port,
6211 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
6212 return;
6213 }
6214 bzero(req, IOCB_SIZE);
6215 req[0] = 0x12; req[1] = 0x1;
6216 QMEM_WR32(qlt, req+4, 0);
6217 QMEM_WR16(qlt, req+8, fct_get_rp_handle(qlt->qlt_port,
6218 rportid));
6219 QMEM_WR16(qlt, req+10, 60);
6220 QMEM_WR32(qlt, req+0x10, rportid);
6221 QMEM_WR32(qlt, req+0x14, fw_xchg_addr);
6222 QMEM_WR16(qlt, req+0x1A, flags);
6223 QMEM_WR16(qlt, req+0x20, oxid);
6224 qlt_submit_req_entries(qlt, 1, qi);
6225 mutex_exit(&qlt->mq_req[qi].mq_lock);
6226
6227 return;
6228 }
6229 if (cmd == NULL) {
6230 uint32_t res;
6231 uint16_t scsi_status = 0;
6232 uint16_t rphdl = 0;
6233
6234 qi = 0; /* always use request queue 0 */
6235
6236 rphdl = fct_get_rp_handle(qlt->qlt_port, rportid);
6237 if ((rphdl != 0xFFFF) &&
6238 (rphdl >= qlt->qlt_port->port_max_logins)) {
6239 rphdl = 0xFFFF;
6240 }
6241
6242 mutex_enter(&qlt->mq_req[qi].mq_lock);
6243 req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
6244 if (req == NULL) {
6245 EL(qlt, "cannot get reqq\n");
6246 mutex_exit(&qlt->mq_req[qi].mq_lock);
6247 (void) snprintf(info, 160,
6248 "qlt_handle_atio: qlt-%p, can't "
6249 "allocate space for termi-excg", (void *)qlt);
6250 info[159] = 0;
6251 (void) fct_port_shutdown(qlt->qlt_port,
6252 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
6253 return;
6254 }
6255
6256 if (rphdl != 0xFFFF) {
6257 /* Driver send scsi qfull status now */
6258 flags = (uint16_t)(BIT_15 |
6259 ((uint16_t)(atio[0x3] & 0xF0) << 5));
6260 /* always use SCSI status mode 1 */
6261 flags = (uint16_t)(flags | BIT_6);
6262
6263 scsi_status |= (uint16_t)(0x28);
6264
6265 /* Build SCSI Status Mode 1, FCP_RSP IU 24-48 byte */
6266 bzero(req, IOCB_SIZE);
6267 req[0] = 0x12;
6268 req[1] = 0x1;
6269
6270 /* allocate a special IOCB handle? or donot care */
6271 QMEM_WR32(qlt, req+4, 0);
6272 QMEM_WR16(qlt, req+8, rphdl);
6273 QMEM_WR16(qlt, req+10, 60);
6274 QMEM_WR32(qlt, req+0x10, rportid);
6275 QMEM_WR32(qlt, req+0x14, fw_xchg_addr);
6276
6277 /* sense_length set to 0 */
6278 QMEM_WR16(qlt, req+0x18, 0);
6279
6280 QMEM_WR16(qlt, req+0x1A, flags);
6281
6282 /* Residual transfer length */
6283 res = QMEM_RD32(qlt, atio+0x3C);
6284 BIG_ENDIAN_32(&res);
6285 if (res != 0) {
6286 scsi_status |= FCP_RESID_UNDER;
6287 }
6288 QMEM_WR32_REQ(qlt, qi, req + 0x1C, res);
6289
6290 QMEM_WR16(qlt, req+0x20, oxid);
6291 QMEM_WR16_REQ(qlt, qi, req + 0x22, scsi_status);
6292
6293 EL(qlt, "Send qfull (%Xh) (%Xh)(%Xh)(%Xh) from port "
6294 "(%Xh:%Xh)\n", scsi_status, fw_xchg_addr, flags,
6295 oxid, rportid, rphdl);
6296 } else {
6297 /* Terminate exchange because no remote port context */
6298 flags = (uint16_t)(BIT_14 | ((atio[3] & 0xF0) << 5));
6299
6300 bzero(req, IOCB_SIZE);
6301 req[0] = 0x12;
6302 req[1] = 0x1;
6303
6304 QMEM_WR32(qlt, req+4, 0);
6305 QMEM_WR16(qlt, req+8, rphdl);
6306 QMEM_WR16(qlt, req+10, 60);
6307 QMEM_WR32(qlt, req+0x10, rportid);
6308 QMEM_WR32(qlt, req+0x14, fw_xchg_addr);
6309 QMEM_WR16(qlt, req+0x1A, flags);
6310 QMEM_WR16(qlt, req+0x20, oxid);
6311
6312 EL(qlt, "Termi excg (%Xh)(%Xh)(%Xh) from port (%Xh)\n",
6313 fw_xchg_addr, flags, oxid, rportid);
6314
6315 EL(qlt, "Termi rp_handle (%Xh)\n", rphdl);
6316 }
6317
6318 qlt_submit_req_entries(qlt, 1, qi);
6319 mutex_exit(&qlt->mq_req[qi].mq_lock);
6320 return;
6321 }
6322
6323 qi = qlt_get_queue_id(qlt, 0);
6324 task = (scsi_task_t *)cmd->cmd_specific;
6325 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
6326 qcmd->fw_xchg_addr = fw_xchg_addr;
6327 qcmd->param.atio_byte3 = atio[3];
6328 qcmd->qid = qi;
6329 cmd->cmd_oxid = oxid;
6330 cmd->cmd_rxid = (uint16_t)((((uint16_t)atio[8 + 18]) << 8) |
6331 atio[8+19]);
6332 cmd->cmd_rportid = rportid;
6333 cmd->cmd_lportid = (((uint32_t)atio[8 + 1]) << 16) |
6334 (((uint32_t)atio[8 + 2]) << 8) | atio[8 + 3];
6335 cmd->cmd_rp_handle = FCT_HANDLE_NONE;
6336 /* Dont do a 64 byte read as this is IOMMU */
6337 q = atio+0x28;
6338 /* XXX Handle fcp_cntl */
6339 task->task_cmd_seq_no = (uint32_t)(*q++);
6340 task->task_csn_size = 8;
6341 task->task_flags = qlt_task_flags[(*q++) & 7];
6342 tm = *q++;
6343 if (tm) {
6344 if (tm & BIT_1)
6345 task->task_mgmt_function = TM_ABORT_TASK_SET;
6346 else if (tm & BIT_2)
6347 task->task_mgmt_function = TM_CLEAR_TASK_SET;
6348 else if (tm & BIT_4)
6375 q = (uint8_t *)qlt->queue_mem_ptr +
6376 ATIO_QUEUE_OFFSET;
6377 }
6378 }
6379 for (i = 0; i < 4; i++) {
6380 cb[i] = *q++;
6381 if (q == ((uint8_t *)qlt->queue_mem_ptr +
6382 ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
6383 q = (uint8_t *)qlt->queue_mem_ptr +
6384 ATIO_QUEUE_OFFSET;
6385 }
6386 }
6387 task->task_expected_xfer_length = (((uint32_t)cb[0]) << 24) |
6388 (((uint32_t)cb[1]) << 16) |
6389 (((uint32_t)cb[2]) << 8) | cb[3];
6390 } else {
6391 task->task_expected_xfer_length = (((uint32_t)q[0]) << 24) |
6392 (((uint32_t)q[1]) << 16) |
6393 (((uint32_t)q[2]) << 8) | q[3];
6394 }
6395
6396 QMEM_WR32(qlt, atio+0x3c, 0xdeadbeef);
6397 fct_post_rcvd_cmd(cmd, 0);
6398 }
6399
6400 static void
6401 qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp)
6402 {
6403 uint16_t status;
6404 uint32_t portid;
6405 uint32_t subcode1, subcode2;
6406
6407 status = QMEM_RD16(qlt, rsp+8);
6408 portid = QMEM_RD32(qlt, rsp+0x10) & 0xffffff;
6409 subcode1 = QMEM_RD32(qlt, rsp+0x14);
6410 subcode2 = QMEM_RD32(qlt, rsp+0x18);
6411
6412 if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
6413 mutex_enter(&qlt->mq_req[0].mq_lock);
6414 } else {
6415 mutex_enter(&qlt->preq_lock);
6416 }
6417 if (portid != qlt->rp_id_in_dereg) {
6418 int instance = ddi_get_instance(qlt->dip);
6419
6420 EL(qlt, "implicit logout reveived portid = %xh\n", portid);
6421 cmn_err(CE_WARN, "qlt(%d): implicit logout completion for 0x%x"
6422 " received when driver wasn't waiting for it",
6423 instance, portid);
6424 if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
6425 mutex_exit(&qlt->mq_req[0].mq_lock);
6426 } else {
6427 mutex_exit(&qlt->preq_lock);
6428 }
6429 return;
6430 }
6431
6432 if (status != 0) {
6433 EL(qlt, "implicit logout completed for %xh with status %xh, "
6434 "subcode1 %xh subcode2 %xh\n", portid, status, subcode1,
6435 subcode2);
6436 if (status == 0x31 && subcode1 == 0x0a) {
6437 qlt->rp_dereg_status = FCT_SUCCESS;
6438 } else {
6439 EL(qlt, "implicit logout portid=%xh, status=%xh, "
6440 "subcode1=%xh, subcode2=%xh\n", portid, status,
6441 subcode1, subcode2);
6442 qlt->rp_dereg_status =
6443 QLT_FIRMWARE_ERROR(status, subcode1, subcode2);
6444 }
6445 } else {
6446 qlt->rp_dereg_status = FCT_SUCCESS;
6447 }
6448 cv_signal(&qlt->rp_dereg_cv);
6449 if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
6450 mutex_exit(&qlt->mq_req[0].mq_lock);
6451 } else {
6452 mutex_exit(&qlt->preq_lock);
6453 }
6454 }
6455
6456 /*
6457 * Note that when an ELS is aborted, the regular or aborted completion
6458 * (if any) gets posted before the abort IOCB comes back on response queue.
6459 */
6460 static void
6461 qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
6462 {
6463 char info[160];
6464 fct_cmd_t *cmd;
6465 qlt_cmd_t *qcmd;
6466 uint32_t hndl;
6467 uint32_t subcode1, subcode2;
6468 uint16_t status;
6469 uint8_t elsop;
6470
6471 hndl = QMEM_RD32(qlt, rsp+4);
6472 status = QMEM_RD16(qlt, rsp+8);
6473 subcode1 = QMEM_RD32(qlt, rsp+0x24);
6474 subcode2 = QMEM_RD32(qlt, rsp+0x28);
6475 elsop = rsp[0x16];
6476
6477 if (!CMD_HANDLE_VALID(hndl)) {
6478 EL(qlt, "handle = %xh\n", hndl);
6479 /*
6480 * This cannot happen for unsol els completion. This can
6481 * only happen when abort for an unsol els completes.
6482 * This condition indicates a firmware bug.
6483 */
6484 (void) snprintf(info, 160, "qlt_handle_unsol_els_completion: "
6485 "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
6486 hndl, status, subcode1, subcode2, (void *)rsp);
6487 info[159] = 0;
6488 (void) fct_port_shutdown(qlt->qlt_port,
6489 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
6490 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
6491 return;
6492 }
6493
6494 if (status == 5) {
6495 /*
6496 * When an unsolicited els is aborted, the abort is done
6497 * by a ELSPT iocb with abort control. This is the aborted IOCB
6498 * and not the abortee. We will do the cleanup when the
6499 * IOCB which caused the abort, returns.
6500 */
6501 EL(qlt, "status = %xh\n", status);
6502 stmf_trace(0, "--UNSOL ELS returned with status 5 --");
6503 return;
6504 }
6505
6506 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
6507 if (cmd == NULL) {
6508 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
6509 /*
6510 * Now why would this happen ???
6511 */
6512 (void) snprintf(info, 160,
6513 "qlt_handle_unsol_els_completion: can not "
6514 "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
6515 (void *)rsp);
6516 info[159] = 0;
6517 (void) fct_port_shutdown(qlt->qlt_port,
6518 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
6519
6520 return;
6521 }
6522
6523 ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
6524 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
6525 if (qcmd->flags & QLT_CMD_ABORTING) {
6526 /*
6527 * This is the same case as "if (status == 5)" above. The
6528 * only difference is that in this case the firmware actually
6529 * finished sending the response. So the abort attempt will
6530 * come back with status ?. We will handle it there.
6531 */
6532 stmf_trace(0, "--UNSOL ELS finished while we are trying to "
6533 "abort it");
6534 return;
6535 }
6536
6537 if (qcmd->dbuf != NULL) {
6538 qlt_dmem_free(NULL, qcmd->dbuf);
6539 qcmd->dbuf = NULL;
6540 }
6541
6542 if (status == 0) {
6543 fct_send_response_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
6544
6545 if ((elsop == ELS_OP_LOGO) &&
6546 (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT)) {
6547 EL(qlt, "reset link since this is LOGO and N2N\n");
6548 (void) snprintf(info, 80,
6549 "qlt_handle_unsol_els_completion: qlt-%p, "
6550 "trigger RFLAG_RESET to recover",
6551 (void *)qlt);
6552
6553 info[79] = 0;
6554 (void) fct_port_shutdown(qlt->qlt_port,
6555 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET,
6556 info);
6557 }
6558 } else {
6559 EL(qlt, "status (0xh) sucode1=%xh subconde2=%xh\n",
6560 status, subcode1, subcode2);
6561 fct_send_response_done(cmd,
6562 QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
6563 }
6564 }
6565
6566 static void
6567 qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
6568 {
6569 char info[160];
6570 fct_cmd_t *cmd;
6571 qlt_cmd_t *qcmd;
6572 uint32_t hndl;
6573 uint32_t subcode1, subcode2;
6574 uint16_t status;
6575
6576 hndl = QMEM_RD32(qlt, rsp+4);
6577 status = QMEM_RD16(qlt, rsp+8);
6578 subcode1 = QMEM_RD32(qlt, rsp+0x24);
6579 subcode2 = QMEM_RD32(qlt, rsp+0x28);
6580
6581 if (!CMD_HANDLE_VALID(hndl)) {
6582 EL(qlt, "handle = %xh\n", hndl);
6583 ASSERT(hndl == 0);
6584 /*
6585 * Someone has requested to abort it, but no one is waiting for
6586 * this completion.
6587 */
6588 if ((status != 0) && (status != 8)) {
6589 EL(qlt, "status = %xh\n", status);
6590 /*
6591 * There could be exchange resource leakage, so
6592 * throw HBA fatal error event now
6593 */
6594 (void) snprintf(info, 160,
6595 "qlt_handle_unsol_els_abort_completion: "
6596 "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
6597 hndl, status, subcode1, subcode2, (void *)rsp);
6598 info[159] = 0;
6599 (void) fct_port_shutdown(qlt->qlt_port,
6600 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
6601 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
6602 return;
6603 }
6604
6605 return;
6606 }
6607
6608 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
6609 if (cmd == NULL) {
6610 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
6611 /*
6612 * Why would this happen ??
6613 */
6614 (void) snprintf(info, 160,
6615 "qlt_handle_unsol_els_abort_completion: can not get "
6616 "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
6617 (void *)rsp);
6618 info[159] = 0;
6619 (void) fct_port_shutdown(qlt->qlt_port,
6620 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
6621
6622 return;
6623 }
6624
6625 ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
6626 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
6627 ASSERT(qcmd->flags & QLT_CMD_ABORTING);
6628
6629 if (qcmd->dbuf != NULL) {
6630 qlt_dmem_free(NULL, qcmd->dbuf);
6631 qcmd->dbuf = NULL;
6632 }
6633
6634 if (status == 0) {
6635 fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
6636 } else if (status == 8) {
6637 fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
6638 } else {
6639 fct_cmd_fca_aborted(cmd,
6640 QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
6641 }
6642 }
6643
6644 static void
6645 qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
6646 {
6647 char info[160];
6648 fct_cmd_t *cmd;
6649 fct_els_t *els;
6650 qlt_cmd_t *qcmd;
6651 uint32_t hndl;
6652 uint32_t subcode1, subcode2;
6653 uint16_t status;
6654
6655 hndl = QMEM_RD32(qlt, rsp+4);
6656 status = QMEM_RD16(qlt, rsp+8);
6657 subcode1 = QMEM_RD32(qlt, rsp+0x24);
6658 subcode2 = QMEM_RD32(qlt, rsp+0x28);
6659
6660 if (!CMD_HANDLE_VALID(hndl)) {
6661 EL(qlt, "handle = %xh\n", hndl);
6662 /*
6663 * This cannot happen for sol els completion.
6664 */
6665 (void) snprintf(info, 160, "qlt_handle_sol_els_completion: "
6666 "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
6667 hndl, status, subcode1, subcode2, (void *)rsp);
6668 info[159] = 0;
6669 (void) fct_port_shutdown(qlt->qlt_port,
6670 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
6671 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
6672 return;
6673 }
6674
6675 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
6676 if (cmd == NULL) {
6677 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
6678 (void) snprintf(info, 160,
6679 "qlt_handle_sol_els_completion: can not "
6680 "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
6681 (void *)rsp);
6682 info[159] = 0;
6683 (void) fct_port_shutdown(qlt->qlt_port,
6684 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
6685
6686 return;
6687 }
6688
6689 ASSERT(cmd->cmd_type == FCT_CMD_SOL_ELS);
6690 els = (fct_els_t *)cmd->cmd_specific;
6691 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
6692 qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&rsp[0x10]));
6693
6694 if (qcmd->flags & QLT_CMD_ABORTING) {
6695 /*
6696 * We will handle it when the ABORT IO IOCB returns.
6697 */
6698 return;
6699 }
6700
6701 if (qcmd->dbuf != NULL) {
6702 if (status == 0) {
6708 qlt_dmem_free(NULL, qcmd->dbuf);
6709 qcmd->dbuf = NULL;
6710 }
6711
6712 if (status == 0) {
6713 fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
6714 } else {
6715 fct_send_cmd_done(cmd,
6716 QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
6717 }
6718 }
6719
6720 static void
6721 qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp)
6722 {
6723 fct_cmd_t *cmd;
6724 fct_sol_ct_t *ct;
6725 qlt_cmd_t *qcmd;
6726 uint32_t hndl;
6727 uint16_t status;
6728 char info[160];
6729
6730 hndl = QMEM_RD32(qlt, rsp+4);
6731 status = QMEM_RD16(qlt, rsp+8);
6732
6733 if (!CMD_HANDLE_VALID(hndl)) {
6734 EL(qlt, "handle = %xh\n", hndl);
6735 /*
6736 * Solicited commands will always have a valid handle.
6737 */
6738 (void) snprintf(info, 160, "qlt_handle_ct_completion: hndl-"
6739 "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
6740 info[159] = 0;
6741 (void) fct_port_shutdown(qlt->qlt_port,
6742 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
6743 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
6744 return;
6745 }
6746
6747 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
6748 EL(qlt, "cmd=%ph hndl=%xh status=%xh\n", cmd, hndl, status);
6749 if (cmd == NULL) {
6750 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
6751 (void) snprintf(info, 160,
6752 "qlt_handle_ct_completion: cannot find "
6753 "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
6754 (void *)rsp);
6755 info[159] = 0;
6756 (void) fct_port_shutdown(qlt->qlt_port,
6757 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
6758
6759 return;
6760 }
6761
6762 ct = (fct_sol_ct_t *)cmd->cmd_specific;
6763 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
6764 ASSERT(cmd->cmd_type == FCT_CMD_SOL_CT);
6765
6766 if (qcmd->flags & QLT_CMD_ABORTING) {
6767 /*
6768 * We will handle it when ABORT IO IOCB returns;
6769 */
6770 return;
6771 }
6772
6773 ASSERT(qcmd->dbuf);
6774 if ((status == 0) || (status == 0x15)) {
6775 qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
6776 bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
6777 qcmd->param.resp_offset,
6778 ct->ct_resp_payload, ct->ct_resp_size);
6779 }
6780 qlt_dmem_free(NULL, qcmd->dbuf);
6781 qcmd->dbuf = NULL;
6782
6783 if ((status == 0) || (status == 0x15)) {
6784 fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
6785 } else {
6786 fct_send_cmd_done(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
6787 }
6788 }
6789
6790 static fct_status_t
6791 qlt_verify_resp_entry(qlt_state_t *qlt, uint8_t *rsp, uint16_t qi)
6792 {
6793 uint32_t sig;
6794 int i;
6795 char info[160];
6796
6797 sig = QMEM_RD32_RSPQ(qlt, qi, rsp+0x3c);
6798 for (i = 0; ((sig == 0xdeadbeef) &&
6799 (i < qlt_reprocess_attempt_cnt)); i++) {
6800 (void) ddi_dma_sync(
6801 qlt->mq_resp[qi].queue_mem_mq_dma_handle,
6802 (qlt->mq_resp[qi].mq_ndx_to_fw << 6),
6803 IOCB_SIZE, DDI_DMA_SYNC_FORCPU);
6804
6805 qlt->qlt_resp_reproc_cnt++;
6806 drv_usecwait(qlt_reprocess_delay);
6807 sig = QMEM_RD32_RSPQ(qlt, qi, rsp+0x3c);
6808 }
6809
6810 if (i) {
6811 if (i >= qlt_reprocess_attempt_cnt) {
6812 EL(qlt, "resp entry reprocess failed, %x\n",
6813 qlt->qlt_resp_reproc_cnt);
6814 cmn_err(CE_WARN, "qlt%d: resp entry reprocess"
6815 " failed %x\n",
6816 qlt->instance, qlt->qlt_resp_reproc_cnt);
6817 (void) snprintf(info, 160,
6818 "qlt_handle_ctio_completion: resp entry reprocess"
6819 " failed, %x rsp-%p",
6820 qlt->qlt_resp_reproc_cnt, (void *)rsp);
6821 info[159] = 0;
6822 (void) fct_port_shutdown(qlt->qlt_port,
6823 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET,
6824 info);
6825 return (QLT_FAILURE);
6826 } else {
6827 EL(qlt, "resp entry reprocess succeeded, %x %x\n",
6828 i, qlt->qlt_resp_reproc_cnt);
6829 }
6830 }
6831
6832 return (QLT_SUCCESS);
6833 }
6834
6835 static void
6836 qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp, uint16_t qi)
6837 {
6838 fct_cmd_t *cmd;
6839 scsi_task_t *task;
6840 qlt_cmd_t *qcmd;
6841 stmf_data_buf_t *dbuf;
6842 fct_status_t fc_st;
6843 uint32_t iof = 0;
6844 uint32_t hndl;
6845 uint32_t rex1;
6846 uint16_t oxid;
6847 uint16_t status;
6848 uint16_t flags;
6849 uint8_t abort_req;
6850 uint8_t n;
6851 char info[160];
6852
6853 if (qlt_verify_resp_entry(qlt, rsp, qi) != QLT_SUCCESS)
6854 return;
6855
6856 /* write a deadbeef in the last 4 bytes of the IOCB */
6857 QMEM_WR32_RSPQ(qlt, qi, rsp+0x3c, 0xdeadbeef);
6858
6859 /* XXX: Check validity of the IOCB by checking 4th byte. */
6860 hndl = QMEM_RD32_RSPQ(qlt, qi, rsp+4);
6861 status = QMEM_RD16_RSPQ(qlt, qi, rsp+8);
6862 flags = QMEM_RD16_RSPQ(qlt, qi, rsp+0x1a);
6863 oxid = QMEM_RD16_RSPQ(qlt, qi, rsp+0x20);
6864 rex1 = QMEM_RD32_RSPQ(qlt, qi, rsp+0x14);
6865 n = rsp[2];
6866
6867 if (!CMD_HANDLE_VALID(hndl)) {
6868 EL(qlt, "handle = %xh\n", hndl);
6869 ASSERT(hndl == 0);
6870 /*
6871 * Someone has requested to abort it, but no one is waiting for
6872 * this completion.
6873 */
6874 EL(qlt, "hndl-%xh, status-%xh, rsp-%p\n", hndl, status,
6875 (void *)rsp);
6876 if ((status != 1) && (status != 2)) {
6877 EL(qlt, "status = %xh\n", status);
6878 if (status == 0x29) {
6879 uint8_t *req;
6880
6881 /*
6882 * The qlt port received an ATIO request from
6883 * remote port before it issued a plogi.
6884 * The qlt fw returned the CTIO completion
6885 * status 0x29 to inform driver to do cleanup
6886 * (terminate the IO exchange). The subsequent
6887 * ABTS from the initiator can be handled
6888 * cleanly.
6889 */
6890 qi = 0;
6891 mutex_enter(&qlt->mq_req[qi].mq_lock);
6892 req = (uint8_t *)
6893 qlt_get_req_entries(qlt, 1, qi);
6894
6895 if (req == NULL) {
6896 EL(qlt, "No reqq entry available to "
6897 "termi exchg\n");
6898 mutex_exit(&qlt->mq_req[qi].mq_lock);
6899
6900 (void) snprintf(info, 160,
6901 "qlt_handle_ctio_completion: no "
6902 "reqq entry available, status-%x,"
6903 "rsp-%p", status, (void *)rsp);
6904
6905 info[159] = 0;
6906
6907 (void) fct_port_shutdown(qlt->qlt_port,
6908 STMF_RFLAG_FATAL_ERROR |
6909 STMF_RFLAG_RESET,
6910 info);
6911
6912 return;
6913 }
6914
6915 flags &= 0x1E00;
6916 flags |= BIT_14;
6917
6918 bzero(req, IOCB_SIZE);
6919 req[0] = 0x12;
6920 req[1] = 0x1;
6921
6922 QMEM_WR32(qlt, req+4, 0);
6923 QMEM_WR16(qlt, req+8, 0xFFFF);
6924 QMEM_WR16(qlt, req+10, 60);
6925 QMEM_WR32(qlt, req+0x14, rex1);
6926 QMEM_WR16(qlt, req+0x1A, flags);
6927 QMEM_WR16(qlt, req+0x20, oxid);
6928
6929 EL(qlt, "Termi exchg (%Xh)(%Xh)(%Xh) "
6930 "rphdl=0xFFFF\n", rex1, flags, oxid);
6931
6932 qlt_submit_req_entries(qlt, 1, qi);
6933 mutex_exit(&qlt->mq_req[qi].mq_lock);
6934 } else {
6935 /*
6936 * There could be exchange resource leakage,
6937 * so throw HBA fatal error event now
6938 */
6939 (void) snprintf(info, 160,
6940 "qlt_handle_ctio_completion: hndl-%x, "
6941 "status-%x, rsp-%p", hndl, status,
6942 (void *)rsp);
6943
6944 info[159] = 0;
6945
6946 (void) fct_port_shutdown(qlt->qlt_port,
6947 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET,
6948 info);
6949 }
6950 }
6951
6952 return;
6953 }
6954
6955 if (flags & BIT_14) {
6956 abort_req = 1;
6957 EL(qlt, "abort: hndl-%x, status-%x, rsp-%p\n", hndl, status,
6958 (void *)rsp);
6959 } else {
6960 abort_req = 0;
6961 }
6962
6963 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
6964 if (cmd == NULL) {
6965 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
6966 (void) snprintf(info, 160,
6967 "qlt_handle_ctio_completion: cannot find "
6968 "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
6969 (void *)rsp);
6970 info[159] = 0;
6971 (void) fct_port_shutdown(qlt->qlt_port,
6972 /* STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info); */
6973 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
6974 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
6975
6976 return;
6977 }
6978
6979 task = (scsi_task_t *)cmd->cmd_specific;
6980 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
6981 if (qcmd->dbuf_rsp_iu) {
6982 ASSERT((flags & (BIT_6 | BIT_7)) == BIT_7);
6983 qlt_dmem_free(NULL, qcmd->dbuf_rsp_iu);
6984 qcmd->dbuf_rsp_iu = NULL;
6985 }
6986
6987 if ((status == 1) || (status == 2)) {
6988 if (abort_req) {
6989 fc_st = FCT_ABORT_SUCCESS;
6990 iof = FCT_IOF_FCA_DONE;
6991 } else {
6992 fc_st = FCT_SUCCESS;
6993 if (flags & BIT_15) {
6994 iof = FCT_IOF_FCA_DONE;
7016 if (dbuf->db_flags & DB_DIRECTION_FROM_RPORT)
7017 qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORCPU);
7018 if (flags & BIT_15) {
7019 dbuf->db_flags = (uint16_t)(dbuf->db_flags |
7020 DB_STATUS_GOOD_SENT);
7021 }
7022
7023 dbuf->db_xfer_status = fc_st;
7024 fct_scsi_data_xfer_done(cmd, dbuf, iof);
7025 return;
7026 }
7027 if (!abort_req) {
7028 /*
7029 * This was just a pure status xfer.
7030 */
7031 fct_send_response_done(cmd, fc_st, iof);
7032 return;
7033 }
7034
7035 fct_cmd_fca_aborted(cmd, fc_st, iof);
7036
7037 EL(qlt, "(%d) (%p)(%xh,%xh),%x %x %x\n",
7038 qi, cmd, cmd->cmd_oxid, cmd->cmd_rxid,
7039 cmd->cmd_handle, qcmd->fw_xchg_addr,
7040 fc_st);
7041 }
7042
7043 static void
7044 qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
7045 {
7046 char info[80];
7047 fct_cmd_t *cmd;
7048 qlt_cmd_t *qcmd;
7049 uint32_t h;
7050 uint16_t status;
7051
7052 h = QMEM_RD32(qlt, rsp+4);
7053 status = QMEM_RD16(qlt, rsp+8);
7054
7055 if (!CMD_HANDLE_VALID(h)) {
7056 EL(qlt, "handle = %xh\n", h);
7057 /*
7058 * Solicited commands always have a valid handle.
7059 */
7060 (void) snprintf(info, 80,
7061 "qlt_handle_sol_abort_completion: hndl-"
7062 "%x, status-%x, rsp-%p", h, status, (void *)rsp);
7063 info[79] = 0;
7064 (void) fct_port_shutdown(qlt->qlt_port,
7065 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
7066 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
7067 return;
7068 }
7069 cmd = fct_handle_to_cmd(qlt->qlt_port, h);
7070 if (cmd == NULL) {
7071 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", h);
7072 /*
7073 * What happened to the cmd ??
7074 */
7075 (void) snprintf(info, 80,
7076 "qlt_handle_sol_abort_completion: cannot "
7077 "find cmd, hndl-%x, status-%x, rsp-%p", h, status,
7078 (void *)rsp);
7079 info[79] = 0;
7080 (void) fct_port_shutdown(qlt->qlt_port,
7081 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
7082
7083 return;
7084 }
7085
7086 ASSERT((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
7087 (cmd->cmd_type == FCT_CMD_SOL_CT));
7088 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
7089 if (qcmd->dbuf != NULL) {
7090 qlt_dmem_free(NULL, qcmd->dbuf);
7091 qcmd->dbuf = NULL;
7092 }
7093 ASSERT(qcmd->flags & QLT_CMD_ABORTING);
7094 EL(qlt, "status=%xh\n", status);
7095 if (status == 0) {
7096 fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
7097 } else if (status == 0x31) {
7098 fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
7099 } else {
7100 fct_cmd_fca_aborted(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
7101 }
7102 }
7103
7104 static void
7105 qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp, uint16_t qi)
7106 {
7107 qlt_abts_cmd_t *qcmd;
7108 fct_cmd_t *cmd;
7109 uint32_t remote_portid;
7110 uint32_t rex1;
7111 uint32_t rex2;
7112 char info[160];
7113
7114 remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
7115 ((uint32_t)(resp[0x1A])) << 16;
7116 cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ABTS,
7117 sizeof (qlt_abts_cmd_t), 0);
7118 if (cmd == NULL) {
7119 EL(qlt, "fct_alloc cmd==NULL\n");
7120 (void) snprintf(info, 160,
7121 "qlt_handle_rcvd_abts: qlt-%p, can't "
7122 "allocate space for fct_cmd", (void *)qlt);
7123 info[159] = 0;
7124 (void) fct_port_shutdown(qlt->qlt_port,
7125 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
7126 return;
7127 }
7128
7129 resp[0xC] = resp[0xD] = resp[0xE] = 0;
7130 qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
7131 qcmd->qid = qi;
7132 bcopy(resp, qcmd->buf, IOCB_SIZE);
7133 cmd->cmd_port = qlt->qlt_port;
7134 cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xA);
7135 if (cmd->cmd_rp_handle == 0xFFFF)
7136 cmd->cmd_rp_handle = FCT_HANDLE_NONE;
7137
7138 cmd->cmd_rportid = remote_portid;
7139 cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
7140 ((uint32_t)(resp[0x16])) << 16;
7141 cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
7142 cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
7143
7144 rex1 = QMEM_RD32(qlt, resp+0x10);
7145 rex2 = QMEM_RD32(qlt, resp+0x3C);
7146
7147 EL(qlt, "(%d)(%xh %xh) (%xh)(%p) (%xh %xh) (%x)\n",
7148 qi, cmd->cmd_oxid, cmd->cmd_rxid, remote_portid,
7149 cmd, rex1, rex2, cmd->cmd_handle);
7150
7151 fct_post_rcvd_cmd(cmd, 0);
7152 }
7153
7154 static void
7155 qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp, uint16_t qi)
7156 {
7157 uint16_t status;
7158 char info[80];
7159
7160 status = QMEM_RD16(qlt, resp+8);
7161
7162 if ((status == 0) || (status == 5)) {
7163 EL(qlt, "qi(%d) status =%xh,(%xh %xh)\n",
7164 qi, status, QMEM_RD16(qlt, resp+0x26),
7165 QMEM_RD16(qlt, resp+0x24));
7166 return;
7167 }
7168
7169 EL(qlt, "ABTS status=%x/%x/%x resp_off %x",
7170 status, QMEM_RD32(qlt, resp+0x34),
7171 QMEM_RD32(qlt, resp+0x38),
7172 ((uint32_t)(qlt->mq_resp[0].mq_ndx_to_fw)) << 6);
7173
7174 (void) snprintf(info, 80, "ABTS completion failed %x/%x/%x resp_off %x",
7175 status, QMEM_RD32(qlt, resp+0x34), QMEM_RD32(qlt, resp+0x38),
7176 ((uint32_t)(qlt->mq_resp[0].mq_ndx_to_fw)) << 6);
7177 info[79] = 0;
7178 (void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
7179 STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
7180 }
7181
7182 #ifdef DEBUG
7183 uint32_t qlt_drop_abort_counter = 0;
7184 #endif
7185
7186 fct_status_t
7187 qlt_abort_cmd(struct fct_local_port *port, fct_cmd_t *cmd, uint32_t flags)
7188 {
7189 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
7190
7191 if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
7192 (qlt->qlt_state == FCT_STATE_OFFLINING)) {
7193 return (FCT_NOT_FOUND);
7194 }
7195
7196 #ifdef DEBUG
7197 if (qlt_drop_abort_counter > 0) {
7198 if (atomic_dec_32_nv(&qlt_drop_abort_counter) == 1)
7199 return (FCT_SUCCESS);
7200 }
7201 #endif
7202
7203 EL(qlt, "cmd_type = %x\n", cmd->cmd_type);
7204 if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
7205 return (qlt_abort_unsol_scsi_cmd(qlt, cmd));
7206 }
7207
7208 if (flags & FCT_IOF_FORCE_FCA_DONE) {
7209 cmd->cmd_handle = 0;
7210 }
7211
7212 if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
7213 /* this is retried ABTS, terminate it now */
7214 return (qlt_send_abts_response(qlt, cmd, 1));
7215 }
7216
7217 if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
7218 return (qlt_abort_purex(qlt, cmd));
7219 }
7220
7221 if ((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
7222 (cmd->cmd_type == FCT_CMD_SOL_CT)) {
7223 return (qlt_abort_sol_cmd(qlt, cmd));
7224 }
7225 EL(qlt, "cmd->cmd_type = %x\n", cmd->cmd_type);
7226
7227 ASSERT(0);
7228 return (FCT_FAILURE);
7229 }
7230
7231 fct_status_t
7232 qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
7233 {
7234 uint8_t *req;
7235 qlt_cmd_t *qcmd;
7236 uint16_t qi;
7237
7238 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
7239 qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
7240 qi = qcmd->qid;
7241
7242 EL(qlt, "fctcmd-%p, cmd_handle-%xh rportid=%xh\n",
7243 cmd, cmd->cmd_handle, cmd->cmd_rportid);
7244
7245 mutex_enter(&qlt->mq_req[qi].mq_lock);
7246 req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
7247 if (req == NULL) {
7248 EL(qlt, "req == NULL\n");
7249 mutex_exit(&qlt->mq_req[qi].mq_lock);
7250
7251 return (FCT_BUSY);
7252 }
7253 bzero(req, IOCB_SIZE);
7254 req[0] = 0x33; req[1] = 1;
7255 QMEM_WR32(qlt, req+4, cmd->cmd_handle);
7256 if (cmd->cmd_rp) {
7257 QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
7258 } else {
7259 QMEM_WR16(qlt, req+8, 0xFFFF);
7260 }
7261
7262 QMEM_WR32(qlt, req+0xc, cmd->cmd_handle);
7263 QMEM_WR32(qlt, req+0x30, cmd->cmd_rportid);
7264 qlt_submit_req_entries(qlt, 1, qi);
7265 mutex_exit(&qlt->mq_req[qi].mq_lock);
7266
7267 return (FCT_SUCCESS);
7268 }
7269
7270 fct_status_t
7271 qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd)
7272 {
7273 uint8_t *req;
7274 qlt_cmd_t *qcmd;
7275 fct_els_t *els;
7276 uint8_t elsop, req1f;
7277 uint16_t qi;
7278
7279 els = (fct_els_t *)cmd->cmd_specific;
7280 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
7281 qi = qcmd->qid;
7282 elsop = els->els_req_payload[0];
7283 EL(qlt, "fctcmd-%p, cmd_handle-%xh, elsop-%xh\n", cmd,
7284 cmd->cmd_handle, elsop);
7285 req1f = 0x60; /* Terminate xchg */
7286 if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
7287 (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
7288 req1f = (uint8_t)(req1f | BIT_4);
7289 }
7290
7291 mutex_enter(&qlt->mq_req[qi].mq_lock);
7292 req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
7293 if (req == NULL) {
7294 EL(qlt, "req == NULL\n");
7295 mutex_exit(&qlt->mq_req[qi].mq_lock);
7296 return (FCT_BUSY);
7297 }
7298
7299 qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
7300 bzero(req, IOCB_SIZE);
7301 req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
7302 req[0x16] = elsop; req[0x1f] = req1f;
7303 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
7304 if (cmd->cmd_rp) {
7305 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
7306 EL(qlt, "rp_handle-%x\n", cmd->cmd_rp->rp_handle);
7307 } else {
7308 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
7309 EL(qlt, "cmd_rp_handle-%x\n", cmd->cmd_rp_handle);
7310 }
7311
7312 QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
7313 QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
7314 qlt_submit_req_entries(qlt, 1, qi);
7315 mutex_exit(&qlt->mq_req[qi].mq_lock);
7316
7317 return (FCT_SUCCESS);
7318 }
7319
7320 fct_status_t
7321 qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
7322 {
7323 qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
7324 uint8_t *req;
7325 uint16_t flags;
7326 uint16_t qi;
7327
7328 qi = qcmd->qid;
7329
7330 flags = (uint16_t)(BIT_14 |
7331 (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
7332
7333 EL(qlt, "(%d) (%x) (%p) (%x)\n", qi, cmd->cmd_oxid,
7334 cmd, qcmd->fw_xchg_addr);
7335
7336 mutex_enter(&qlt->mq_req[qi].mq_lock);
7337 req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
7338 if (req == NULL) {
7339 EL(qlt, "req == NULL\n");
7340 mutex_exit(&qlt->mq_req[qi].mq_lock);
7341 return (FCT_BUSY);
7342 }
7343
7344 qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
7345 bzero(req, IOCB_SIZE);
7346 req[0] = 0x12; req[1] = 0x1;
7347 QMEM_WR32_REQ(qlt, qi, req+4, cmd->cmd_handle);
7348 QMEM_WR16_REQ(qlt, qi, req+8, cmd->cmd_rp->rp_handle);
7349 QMEM_WR16_REQ(qlt, qi, req+10, 60); /* 60 seconds timeout */
7350 QMEM_WR32_REQ(qlt, qi, req+0x10, cmd->cmd_rportid);
7351 QMEM_WR32_REQ(qlt, qi, req+0x14, qcmd->fw_xchg_addr);
7352 QMEM_WR16_REQ(qlt, qi, req+0x1A, flags);
7353 QMEM_WR16_REQ(qlt, qi, req+0x20, cmd->cmd_oxid);
7354 qlt_submit_req_entries(qlt, 1, qi);
7355 mutex_exit(&qlt->mq_req[qi].mq_lock);
7356
7357 return (FCT_SUCCESS);
7358 }
7359
7360 fct_status_t
7361 qlt_send_cmd(fct_cmd_t *cmd)
7362 {
7363 qlt_state_t *qlt;
7364
7365 qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
7366 EL(qlt, "cmd->cmd_type = %xh\n", cmd->cmd_type);
7367 if (cmd->cmd_type == FCT_CMD_SOL_ELS) {
7368 return (qlt_send_els(qlt, cmd));
7369 } else if (cmd->cmd_type == FCT_CMD_SOL_CT) {
7370 return (qlt_send_ct(qlt, cmd));
7371 }
7372 EL(qlt, "Unknown cmd->cmd_type = %xh\n", cmd->cmd_type);
7373
7374 ASSERT(0);
7375 return (FCT_FAILURE);
7376 }
7377
7378 fct_status_t
7379 qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd)
7380 {
7381 uint8_t *req;
7382 fct_els_t *els;
7383 qlt_cmd_t *qcmd;
7384 stmf_data_buf_t *buf;
7385 qlt_dmem_bctl_t *bctl;
7386 uint32_t sz, minsz;
7387 uint16_t qi;
7388
7389 qi = 0;
7390
7391 els = (fct_els_t *)cmd->cmd_specific;
7392 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
7393 qcmd->flags = QLT_CMD_TYPE_SOLICITED;
7394 qcmd->param.resp_offset = (uint16_t)((els->els_req_size + 7) & ~7);
7395 sz = minsz = qcmd->param.resp_offset + els->els_resp_size;
7396 buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
7397 if (buf == NULL) {
7398 return (FCT_BUSY);
7399 }
7400 bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
7401
7402 qcmd->dbuf = buf;
7403 bcopy(els->els_req_payload, buf->db_sglist[0].seg_addr,
7404 els->els_req_size);
7405 qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
7406
7407 mutex_enter(&qlt->mq_req[qi].mq_lock);
7408 req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
7409 if (req == NULL) {
7410 EL(qlt, "req = NULL, %xh %xh %p %xh\n", cmd->cmd_oxid,
7411 cmd->cmd_rportid, cmd, qcmd->fw_xchg_addr);
7412 qlt_dmem_free(NULL, buf);
7413 mutex_exit(&qlt->mq_req[qi].mq_lock);
7414 return (FCT_BUSY);
7415 }
7416 bzero(req, IOCB_SIZE);
7417 req[0] = 0x53; req[1] = 1;
7418 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
7419 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
7420 QMEM_WR16(qlt, (&req[0xC]), 1);
7421 QMEM_WR16(qlt, (&req[0xE]), 0x1000);
7422 QMEM_WR16(qlt, (&req[0x14]), 1);
7423 req[0x16] = els->els_req_payload[0];
7424 if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
7425 req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
7426 req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
7427 req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
7428 }
7429 QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rp->rp_id);
7430 QMEM_WR32(qlt, (&req[0x20]), els->els_resp_size);
7431 QMEM_WR32(qlt, (&req[0x24]), els->els_req_size);
7432 QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
7433 QMEM_WR32(qlt, (&req[0x30]), els->els_req_size);
7434 QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
7435 qcmd->param.resp_offset));
7436 QMEM_WR32(qlt, (&req[0x3C]), els->els_resp_size);
7437
7438 EL(qlt, "ELS opcode %xh to %xh\n",
7439 req[0x16], cmd->cmd_rp->rp_id);
7440
7441 qlt_submit_req_entries(qlt, 1, qi);
7442 mutex_exit(&qlt->mq_req[qi].mq_lock);
7443
7444 return (FCT_SUCCESS);
7445 }
7446
7447 fct_status_t
7448 qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd)
7449 {
7450 uint8_t *req;
7451 fct_sol_ct_t *ct;
7452 qlt_cmd_t *qcmd;
7453 stmf_data_buf_t *buf;
7454 qlt_dmem_bctl_t *bctl;
7455 uint32_t sz, minsz;
7456 uint16_t qi;
7457
7458 qi = 0;
7459
7460 ct = (fct_sol_ct_t *)cmd->cmd_specific;
7461 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
7462 qcmd->flags = QLT_CMD_TYPE_SOLICITED;
7463 qcmd->param.resp_offset = (uint16_t)((ct->ct_req_size + 7) & ~7);
7464 sz = minsz = qcmd->param.resp_offset + ct->ct_resp_size;
7465 buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
7466 if (buf == NULL) {
7467 return (FCT_BUSY);
7468 }
7469 bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
7470
7471 qcmd->dbuf = buf;
7472 bcopy(ct->ct_req_payload, buf->db_sglist[0].seg_addr,
7473 ct->ct_req_size);
7474 qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
7475
7476 mutex_enter(&qlt->mq_req[qi].mq_lock);
7477 req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
7478 if (req == NULL) {
7479 EL(qlt, "req = NULL, %xh %xh %p %xh\n", cmd->cmd_oxid,
7480 cmd->cmd_rportid, cmd, qcmd->fw_xchg_addr);
7481 qlt_dmem_free(NULL, buf);
7482 mutex_exit(&qlt->mq_req[qi].mq_lock);
7483 return (FCT_BUSY);
7484 }
7485 bzero(req, IOCB_SIZE);
7486 req[0] = 0x29; req[1] = 1;
7487 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
7488 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
7489 QMEM_WR16(qlt, (&req[0xC]), 1);
7490 QMEM_WR16(qlt, (&req[0x10]), 0x20); /* > (2 * RA_TOV) */
7491 QMEM_WR16(qlt, (&req[0x14]), 1);
7492
7493 QMEM_WR32(qlt, (&req[0x20]), ct->ct_resp_size);
7494 QMEM_WR32(qlt, (&req[0x24]), ct->ct_req_size);
7495
7496 QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr); /* COMMAND DSD */
7497 QMEM_WR32(qlt, (&req[0x30]), ct->ct_req_size);
7498 QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
7499 qcmd->param.resp_offset)); /* RESPONSE DSD */
7500 QMEM_WR32(qlt, (&req[0x3C]), ct->ct_resp_size);
7501
7502 EL(qlt, "%p cmd_hdl=%xh %xh %xh\n",
7503 cmd, cmd->cmd_handle, ct->ct_req_size, ct->ct_resp_size);
7504
7505 qlt_submit_req_entries(qlt, 1, qi);
7506 mutex_exit(&qlt->mq_req[qi].mq_lock);
7507
7508 return (FCT_SUCCESS);
7509 }
7510
7511 /*ARGSUSED*/
7512 caddr_t
7513 qlt_str_ptr(qlt_state_t *qlt, caddr_t bp, uint32_t *len)
7514 {
7515 caddr_t sp;
7516 uint32_t i = 0;
7517
7518 sp = bp;
7519 while (*sp++ != 0) i++;
7520 if (i > *len || !(*len -= i)) {
7521 EL(qlt, "full buffer\n");
7522 return (NULL);
7523 }
7524 return (bp += i);
7525 }
7526
7527 static fct_status_t
7528 qlt_27xx_fw_dump(fct_local_port_t *port, stmf_state_change_info_t *ssci)
7529 {
7530 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
7531 qlt_dmp_template_t *template_buff;
7532 uint32_t tsize, dsize, len;
7533 uint32_t cnt, *dp, *bp;
7534 uint8_t *fw;
7535 caddr_t sp;
7536
7537 EL(qlt, "enter...\n");
7538
7539 mutex_enter(&qlt->qlt_ioctl_lock);
7540 /*
7541 * To make sure that there's no outstanding dumping task
7542 */
7543 if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
7544 mutex_exit(&qlt->qlt_ioctl_lock);
7545 EL(qlt, "qlt_ioctl_flags=%xh, inprogress\n",
7546 qlt->qlt_ioctl_flags);
7547 return (FCT_FAILURE);
7548 }
7549
7550 /*
7551 * To make sure not to overwrite existing dump
7552 */
7553 if ((qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID) &&
7554 !(qlt->qlt_ioctl_flags & QLT_FWDUMP_TRIGGERED_BY_USER) &&
7555 !(qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER)) {
7556 /*
7557 * If we have already one dump, but it's not triggered by user
7558 * and the user hasn't fetched it, we shouldn't dump again.
7559 * But if qlt force a fw dump, then we need to overwrite the
7560 * previous one anyway.
7561 */
7562 mutex_exit(&qlt->qlt_ioctl_lock);
7563 EL(qlt, "qlt_ioctl_flags=%xh, already done\n",
7564 qlt->qlt_ioctl_flags);
7565 cmn_err(CE_NOTE, "qlt(%d): Skipping firmware dump as there "
7566 "is one already outstanding.", qlt->instance);
7567 return (FCT_FAILURE);
7568 }
7569
7570 if (qlt->dmp_template_addr == NULL) {
7571 mutex_exit(&qlt->qlt_ioctl_lock);
7572 EL(qlt, "dmp_template_addr is NULL, can't "
7573 "perform firmware dump\n");
7574 cmn_err(CE_WARN, "!qlt(%d) dmp_template_addr is NULL, can't "
7575 "perform firmware dump", qlt->instance);
7576 return (FCT_FAILURE);
7577 }
7578
7579 qlt->qlt_ioctl_flags |= QLT_FWDUMP_INPROGRESS;
7580 if (ssci != NULL && (ssci->st_rflags & STMF_RFLAG_USER_REQUEST)) {
7581 qlt->qlt_ioctl_flags |= QLT_FWDUMP_TRIGGERED_BY_USER;
7582 } else {
7583 qlt->qlt_ioctl_flags &= ~QLT_FWDUMP_TRIGGERED_BY_USER;
7584 }
7585 mutex_exit(&qlt->qlt_ioctl_lock);
7586
7587 template_buff = (qlt_dmp_template_t *)qlt->dmp_template_addr;
7588 tsize = template_buff->hdr.size_of_template;
7589
7590 if (qlt->fw_bin_dump_size == 0) {
7591 qlt->fw_bin_dump_buf = kmem_zalloc(tsize, KM_NOSLEEP);
7592 if (qlt->fw_bin_dump_buf == NULL) {
7593 cmn_err(CE_WARN, "!qlt(%d) cannot alloc bin dump buf",
7594 qlt->instance);
7595 return (FCT_FAILURE);
7596 }
7597 cnt = (uint32_t)(tsize / sizeof (uint32_t));
7598 dp = (uint32_t *)qlt->fw_bin_dump_buf;
7599 bp = (uint32_t *)&template_buff->hdr;
7600 while (cnt--) {
7601 *dp++ = ddi_get32(qlt->dmp_template_acc_handle, bp++);
7602 }
7603 qlt->fw_bin_dump_size = qlt_27xx_dmp_parse_template(qlt,
7604 (qlt_dt_hdr_t *)qlt->fw_bin_dump_buf, NULL, 0);
7605 kmem_free(qlt->fw_bin_dump_buf, tsize);
7606 qlt->fw_bin_dump_buf = NULL;
7607
7608 if (qlt->fw_bin_dump_size == 0) {
7609 return (FCT_FAILURE);
7610 }
7611
7612 /*
7613 * Determine ascii dump file size
7614 * 2 ascii bytes per binary byte + a space and
7615 * a newline every 16 binary bytes
7616 */
7617 qlt->fw_ascii_dump_size = qlt->fw_bin_dump_size << 1;
7618 qlt->fw_ascii_dump_size += qlt->fw_bin_dump_size;
7619 qlt->fw_ascii_dump_size += qlt->fw_bin_dump_size / 16 + 1;
7620
7621 EL(qlt, "fw_bin_dump_size=%xh, "
7622 "fw_acsii_dump_size=%xh\n", qlt->fw_bin_dump_size,
7623 qlt->fw_ascii_dump_size);
7624 }
7625
7626 if (qlt->fw_bin_dump_buf != NULL) {
7627 /* overwrite the previous fw dump by qlt forced fw dump */
7628 bzero((void *) qlt->fw_bin_dump_buf, qlt->fw_bin_dump_size);
7629 } else {
7630 qlt->fw_bin_dump_buf = kmem_zalloc(qlt->fw_bin_dump_size,
7631 KM_NOSLEEP);
7632 if (qlt->fw_bin_dump_buf == NULL) {
7633 qlt->fw_bin_dump_size = 0;
7634 EL(qlt, "done, failed alloc bin dump buf\n");
7635 return (FCT_FAILURE);
7636 }
7637 }
7638
7639 if ((qlt->fw_dump_size != 0) &&
7640 (qlt->fw_dump_size != qlt->fw_ascii_dump_size)) {
7641 if (qlt->qlt_fwdump_buf != NULL) {
7642 /* Release previously allocated buffer */
7643 kmem_free(qlt->qlt_fwdump_buf, qlt->fw_dump_size);
7644 qlt->qlt_fwdump_buf = NULL;
7645 }
7646 }
7647
7648 if (qlt->qlt_fwdump_buf == NULL) {
7649 qlt->qlt_fwdump_buf = kmem_zalloc(qlt->fw_ascii_dump_size,
7650 KM_NOSLEEP);
7651 if (qlt->qlt_fwdump_buf == NULL) {
7652 EL(qlt, "done, failed alloc ascii fw dump buf\n");
7653 return (FCT_FAILURE);
7654 }
7655 qlt->fw_dump_size = qlt->fw_ascii_dump_size;
7656 }
7657
7658 /* Disable ISP interrupts. */
7659 REG_WR32(qlt, 0xc, 0);
7660
7661 cnt = (uint32_t)(tsize / sizeof (uint32_t));
7662 dp = (uint32_t *)qlt->fw_bin_dump_buf;
7663 bp = (uint32_t *)&template_buff->hdr;
7664 while (cnt--) {
7665 *dp++ = ddi_get32(qlt->dmp_template_acc_handle, bp++);
7666 }
7667
7668 (void) qlt_27xx_dmp_parse_template(qlt,
7669 (qlt_dt_hdr_t *)qlt->fw_bin_dump_buf,
7670 (uint8_t *)dp, qlt->fw_bin_dump_size);
7671
7672 #ifdef _BIG_ENDIAN
7673 cnt = (uint32_t)(tsize / sizeof (uint32_t));
7674 dp = (uint32_t *)qlt->fw_bin_dump_buf;
7675 while (cnt--) {
7676 qlt_chg_endian((uint8_t *)dp, 4);
7677 dp++;
7678 }
7679 #endif
7680
7681 /*
7682 * Build ascii dump
7683 */
7684 len = qlt->fw_ascii_dump_size;
7685 dsize = qlt->fw_bin_dump_size;
7686 fw = (uint8_t *)qlt->fw_bin_dump_buf;
7687 sp = qlt->qlt_fwdump_buf;
7688
7689 EL(qlt, "fw_dump_buffer=%ph, fw=%ph, fw_ascii_dump_size=%xh, "
7690 "dsize=%xh\n", (void *)qlt->qlt_fwdump_buf, (void *)fw,
7691 len, dsize);
7692
7693 /*
7694 * 2 ascii bytes per binary byte + a space and
7695 * a newline every 16 binary bytes
7696 */
7697 cnt = 0;
7698 while (cnt < dsize) {
7699 (void) snprintf(sp, len, "%02x ", *fw++);
7700 if ((sp = qlt_str_ptr(qlt, sp, &len)) == NULL) {
7701 break;
7702 }
7703 if (++cnt % 16 == 0) {
7704 (void) snprintf(sp, len, "\n");
7705 if ((sp = qlt_str_ptr(qlt, sp, &len)) == NULL) {
7706 break;
7707 }
7708 }
7709 }
7710 if (cnt % 16 != 0) {
7711 (void) snprintf(sp, len, "\n");
7712 sp = qlt_str_ptr(qlt, sp, &len);
7713 }
7714
7715 mutex_enter(&qlt->qlt_ioctl_lock);
7716 qlt->qlt_ioctl_flags &=
7717 ~(QLT_FWDUMP_INPROGRESS | QLT_FWDUMP_FETCHED_BY_USER);
7718 qlt->qlt_ioctl_flags |= QLT_FWDUMP_ISVALID;
7719 mutex_exit(&qlt->qlt_ioctl_lock);
7720
7721 EL(qlt, "done...\n");
7722 return (FCT_SUCCESS);
7723 }
7724
7725 /*
7726 * All QLT_FIRMWARE_* will mainly be handled in this function
7727 * It can not be called in interrupt context
7728 *
7729 * FWDUMP's purpose is to serve ioctl, so we will use qlt_ioctl_flags
7730 * and qlt_ioctl_lock
7731 */
7732 static fct_status_t
7733 qlt_firmware_dump(fct_local_port_t *port, stmf_state_change_info_t *ssci)
7734 {
7735 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
7736 int i;
7737 int retries, n;
7738 uint_t size_left;
7739 char c = ' ';
7740 uint32_t addr, endaddr, words_to_read;
7741 caddr_t buf;
7742 fct_status_t ret;
7743
7744 if (qlt->qlt_27xx_chip) {
7745 return (qlt_27xx_fw_dump(port, ssci));
7746 }
7747 mutex_enter(&qlt->qlt_ioctl_lock);
7748 /*
7749 * To make sure that there's no outstanding dumping task
7750 */
7751 if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
7752 mutex_exit(&qlt->qlt_ioctl_lock);
7753 EL(qlt, "qlt_ioctl_flags=%xh, inprogress\n",
7754 qlt->qlt_ioctl_flags);
7755 return (FCT_FAILURE);
7756 }
7757
7758 /*
7759 * To make sure not to overwrite existing dump
7760 */
7761 if ((qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID) &&
7762 !(qlt->qlt_ioctl_flags & QLT_FWDUMP_TRIGGERED_BY_USER) &&
7763 !(qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER)) {
7764 /*
7765 * If we have already one dump, but it's not triggered by user
7766 * and the user hasn't fetched it, we shouldn't dump again.
7767 */
7768 mutex_exit(&qlt->qlt_ioctl_lock);
7769 EL(qlt, "qlt_ioctl_flags=%xh, already done\n",
7770 qlt->qlt_ioctl_flags);
7771 cmn_err(CE_NOTE, "qlt(%d): Skipping firmware dump as there "
7772 "is one already outstanding.", qlt->instance);
7773 return (FCT_FAILURE);
7774 }
7775 qlt->qlt_ioctl_flags |= QLT_FWDUMP_INPROGRESS;
7776 if ((ssci != NULL) && (ssci->st_rflags & STMF_RFLAG_USER_REQUEST)) {
7777 qlt->qlt_ioctl_flags |= QLT_FWDUMP_TRIGGERED_BY_USER;
7778 } else {
7779 qlt->qlt_ioctl_flags &= ~QLT_FWDUMP_TRIGGERED_BY_USER;
7780 }
7781 mutex_exit(&qlt->qlt_ioctl_lock);
7782
7783 size_left = QLT_FWDUMP_BUFSIZE;
7784 if (qlt->qlt_mq_enabled && qlt->qlt_queue_cnt >= 8) {
7785 size_left += 512 * 1024;
7786 }
7787 qlt->fw_dump_size = size_left;
7788 if (!qlt->qlt_fwdump_buf) {
7789 ASSERT(!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID));
7790 /*
7791 * It's the only place that we allocate buf for dumping. After
7792 * it's allocated, we will use it until the port is detached.
7793 */
7794 qlt->qlt_fwdump_buf = kmem_zalloc(size_left, KM_NOSLEEP);
7795 if (qlt->qlt_fwdump_buf == NULL) {
7796 EL(qlt, "cannot alloc fwdump buffer\n");
7797 cmn_err(CE_WARN, "!qlt(%d): cannot alloc fwdump buf",
7798 qlt->instance);
7799 return (FCT_FAILURE);
7800 }
7801 }
7802
7803 EL(qlt, "starting firmware dump...\n");
7804 cmn_err(CE_WARN, "!qlt(%d) starting firmware dump...",
7805 qlt->instance);
7806
7807 /*
7808 * Start to dump firmware
7809 */
7810 buf = (caddr_t)qlt->qlt_fwdump_buf;
7811
7812 /*
7813 * Print the ISP firmware revision number and attributes information
7814 * Read the RISC to Host Status register
7815 */
7816 n = (int)snprintf(buf, size_left, "ISP FW Version %d.%02d.%02d "
7817 "Attributes %04x\n\nR2H Status register\n%08x",
7818 qlt->fw_major, qlt->fw_minor,
7819 qlt->fw_subminor, qlt->fw_attr, REG_RD32(qlt, REG_RISC_STATUS));
7820 buf += n; size_left -= n;
7821
7822 /*
7823 * Before pausing the RISC, make sure no mailbox can execute
7824 */
7825 mutex_enter(&qlt->mbox_lock);
7826 if ((qlt->mbox_io_state != MBOX_STATE_UNKNOWN) &&
7827 (qlt->qlt_intr_enabled)) {
7828 /*
7829 * Wait to grab the mailboxes
7830 */
7831 for (retries = 0; (qlt->mbox_io_state != MBOX_STATE_READY) &&
7832 (qlt->mbox_io_state != MBOX_STATE_UNKNOWN); retries++) {
7833 (void) cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock,
7834 ddi_get_lbolt() + drv_usectohz(1000000));
7835 if (retries > 5) {
7836 mutex_exit(&qlt->mbox_lock);
7837 EL(qlt, "can't drain out mailbox commands\n");
7838 goto dump_fail;
7839 }
7840 }
7841 qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
7842 cv_broadcast(&qlt->mbox_cv);
7843 }
7844 mutex_exit(&qlt->mbox_lock);
7845
7846 /*
7847 * Pause the RISC processor
7848 */
7849 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
7850
7851 /*
7852 * Wait for the RISC processor to pause
7853 */
7854 for (i = 0; i < 200; i++) {
7855 if (REG_RD32(qlt, REG_RISC_STATUS) & 0x100) {
7856 break;
7857 }
7858 drv_usecwait(1000);
7859 }
7860 if (i == 200) {
7861 EL(qlt, "can't pause\n");
7862 return (FCT_FAILURE);
7863 }
7864
7865 if (qlt->qlt_83xx_chip) {
7866 /* Disable ECC checks in FB registers */
7867 REG_WR32(qlt, 0x54, 0x6000);
7868 REG_WR32(qlt, 0xC0, 0); /* 6000h */
7869 REG_WR32(qlt, 0xCC, 0); /* 6003h */
7870 REG_WR32(qlt, 0x54, 0x6010);
7871 REG_WR32(qlt, 0xD4, 0); /* 6015h */
7872
7873 /* disable ECC detection in PCR whilst dumping */
7874 REG_WR32(qlt, 0x54, 0xF70);
7875 REG_WR32(qlt, 0xF0, 0x60000000);
7876 }
7877
7878 if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip) &&
7879 (!qlt->qlt_83xx_chip) && (!qlt->qlt_27xx_chip)) {
7880 goto over_25xx_specific_dump;
7881 }
7882 n = (int)snprintf(buf, size_left, "\n\nHostRisc registers\n");
7883 buf += n; size_left -= n;
7884 REG_WR32(qlt, 0x54, 0x7000);
7885 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
7886 buf += n; size_left -= n;
7887 REG_WR32(qlt, 0x54, 0x7010);
7888 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
7889 buf += n; size_left -= n;
7890 if (qlt->qlt_83xx_chip) {
7891 REG_WR32(qlt, 0x54, 0x7040);
7892 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
7893 buf += n; size_left -= n;
7894 }
7895 REG_WR32(qlt, 0x54, 0x7C00);
7896
7897 n = (int)snprintf(buf, size_left, "\nPCIe registers\n");
7898 buf += n; size_left -= n;
7899 REG_WR32(qlt, 0xC0, 0x1);
7900 n = qlt_fwdump_dump_regs(qlt, buf, 0xc4, 3, size_left);
7901 buf += n; size_left -= n;
7902 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 1, size_left);
7903 buf += n; size_left -= n;
7904 REG_WR32(qlt, 0xC0, 0x0);
7905
7906 /* don't need to do this for 83xx */
7907 if ((!qlt->qlt_83xx_chip) && (qlt->qlt_mq_enabled)) {
7908 uint16_t qi;
7909
7910 for (qi = 0; qi < qlt->qlt_queue_cnt; qi++) {
7911
7912 n = (int)snprintf(buf, size_left,
7913 "\n\nQueue Pointers #%x\n", qi);
7914 buf += n; size_left -= n;
7915
7916 n = (int)snprintf(buf, size_left, "%08x ",
7917 MQBAR_RD32(qlt,
7918 (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_IN));
7919 buf += n; size_left -= n;
7920 n = (int)snprintf(buf, size_left, "%08x ",
7921 MQBAR_RD32(qlt,
7922 (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_OUT));
7923 buf += n; size_left -= n;
7924 n = (int)snprintf(buf, size_left, "%08x ",
7925 MQBAR_RD32(qlt,
7926 (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_IN));
7927 buf += n; size_left -= n;
7928 n = (int)snprintf(buf, size_left, "%08x",
7929 MQBAR_RD32(qlt,
7930 (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_OUT));
7931 buf += n; size_left -= n;
7932 }
7933 }
7934
7935 over_25xx_specific_dump:;
7936 n = (int)snprintf(buf, size_left, "\n\nHost Interface registers\n");
7937 buf += n; size_left -= n;
7938 /*
7939 * Capture data from 32 registers
7940 */
7941 n = qlt_fwdump_dump_regs(qlt, buf, 0, 32, size_left);
7942 buf += n; size_left -= n;
7943
7944 /*
7945 * Disable interrupts
7946 */
7947 REG_WR32(qlt, 0xc, 0);
7948 EL(qlt, "Disable interrupt\n");
7949
7950 /*
7951 * Shadow registers
7952 */
7953 n = (int)snprintf(buf, size_left, "\nShadow registers\n");
7954 buf += n; size_left -= n;
7955
7956 REG_WR32(qlt, 0x54, 0xF70);
7957 addr = 0xb0000000;
7958 for (i = 0; i < 0xb; i++) {
7959 if ((!qlt->qlt_25xx_chip) &&
7960 (!qlt->qlt_81xx_chip) &&
7961 (!qlt->qlt_83xx_chip) &&
7962 (i >= 7)) {
7963 break;
7964 }
7965 if (i && ((i & 7) == 0)) {
7966 n = (int)snprintf(buf, size_left, "\n");
7967 buf += n; size_left -= n;
7968 }
7969 REG_WR32(qlt, 0xF0, addr);
7970 n = (int)snprintf(buf, size_left, "%08x ", REG_RD32(qlt, 0xFC));
7971 buf += n; size_left -= n;
7972 addr += 0x100000;
7973 }
7974
7975 if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip) ||
7976 (qlt->qlt_83xx_chip)) {
7977 REG_WR32(qlt, 0x54, 0x10);
7978 n = (int)snprintf(buf, size_left,
7979 "\n\nRISC IO register\n%08x", REG_RD32(qlt, 0xC0));
7980 buf += n; size_left -= n;
7981 }
7982
7983 /*
7984 * Mailbox registers
7985 */
7986 n = (int)snprintf(buf, size_left, "\n\nMailbox registers\n");
7987 buf += n; size_left -= n;
7988 for (i = 0; i < 32; i += 2) {
7989 if ((i + 2) & 15) {
7990 c = ' ';
7991 } else {
7992 c = '\n';
7993 }
7994 n = (int)snprintf(buf, size_left, "%04x %04x%c",
7995 REG_RD16(qlt, 0x80 + (i << 1)),
7996 REG_RD16(qlt, 0x80 + ((i+1) << 1)), c);
7997 buf += n; size_left -= n;
7998 }
7999
8000 /*
8001 * Transfer sequence registers
8002 */
8003 n = (int)snprintf(buf, size_left, "\nXSEQ GP registers\n");
8004 buf += n; size_left -= n;
8005
8006 if (qlt->qlt_83xx_chip) {
8007 REG_WR32(qlt, 0x54, 0xBE00);
8008 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8009 buf += n; size_left -= n;
8010 REG_WR32(qlt, 0x54, 0xBE10);
8011 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8012 buf += n; size_left -= n;
8013 REG_WR32(qlt, 0x54, 0xBE20);
8014 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8015 buf += n; size_left -= n;
8016 REG_WR32(qlt, 0x54, 0xBE30);
8017 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8018 buf += n; size_left -= n;
8019 REG_WR32(qlt, 0x54, 0xBE40);
8020 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8021 buf += n; size_left -= n;
8022 REG_WR32(qlt, 0x54, 0xBE50);
8023 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8024 buf += n; size_left -= n;
8025 REG_WR32(qlt, 0x54, 0xBE60);
8026 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8027 buf += n; size_left -= n;
8028 REG_WR32(qlt, 0x54, 0xBE70);
8029 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8030 buf += n; size_left -= n;
8031 }
8032 REG_WR32(qlt, 0x54, 0xBF00);
8033 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8034 buf += n; size_left -= n;
8035 REG_WR32(qlt, 0x54, 0xBF10);
8036 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8037 buf += n; size_left -= n;
8038 REG_WR32(qlt, 0x54, 0xBF20);
8039 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8040 buf += n; size_left -= n;
8041 REG_WR32(qlt, 0x54, 0xBF30);
8042 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8043 buf += n; size_left -= n;
8044 REG_WR32(qlt, 0x54, 0xBF40);
8045 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8046 buf += n; size_left -= n;
8047 REG_WR32(qlt, 0x54, 0xBF50);
8048 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8049 buf += n; size_left -= n;
8050 REG_WR32(qlt, 0x54, 0xBF60);
8051 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8052 buf += n; size_left -= n;
8053 REG_WR32(qlt, 0x54, 0xBF70);
8054 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8055 buf += n; size_left -= n;
8056 n = (int)snprintf(buf, size_left, "\nXSEQ-0 registers\n");
8057 buf += n; size_left -= n;
8058 if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip) ||
8059 (qlt->qlt_83xx_chip)) {
8060 REG_WR32(qlt, 0x54, 0xBFC0);
8061 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8062 buf += n; size_left -= n;
8063 REG_WR32(qlt, 0x54, 0xBFD0);
8064 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8065 buf += n; size_left -= n;
8066 }
8067 REG_WR32(qlt, 0x54, 0xBFE0);
8068 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8069 buf += n; size_left -= n;
8070 n = (int)snprintf(buf, size_left, "\nXSEQ-1 registers\n");
8071 buf += n; size_left -= n;
8072 REG_WR32(qlt, 0x54, 0xBFF0);
8073 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8074 buf += n; size_left -= n;
8075
8076 if (qlt->qlt_83xx_chip) {
8077 n = (int)snprintf(buf, size_left, "\nXSEQ-2 registers\n");
8078 buf += n; size_left -= n;
8079 REG_WR32(qlt, 0x54, 0xBEF0);
8080 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8081 buf += n; size_left -= n;
8082 }
8083
8084 /*
8085 * Receive sequence registers
8086 */
8087 n = (int)snprintf(buf, size_left, "\nRSEQ GP registers\n");
8088 buf += n; size_left -= n;
8089 if (qlt->qlt_83xx_chip) {
8090 REG_WR32(qlt, 0x54, 0xFE00);
8091 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8092 buf += n; size_left -= n;
8093 REG_WR32(qlt, 0x54, 0xFE10);
8094 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8095 buf += n; size_left -= n;
8096 REG_WR32(qlt, 0x54, 0xFE20);
8097 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8098 buf += n; size_left -= n;
8099 REG_WR32(qlt, 0x54, 0xFE30);
8100 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8101 buf += n; size_left -= n;
8102 REG_WR32(qlt, 0x54, 0xFE40);
8103 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8104 buf += n; size_left -= n;
8105 REG_WR32(qlt, 0x54, 0xFE50);
8106 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8107 buf += n; size_left -= n;
8108 REG_WR32(qlt, 0x54, 0xFE60);
8109 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8110 buf += n; size_left -= n;
8111 REG_WR32(qlt, 0x54, 0xFE70);
8112 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8113 buf += n; size_left -= n;
8114 }
8115 REG_WR32(qlt, 0x54, 0xFF00);
8116 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8117 buf += n; size_left -= n;
8118 REG_WR32(qlt, 0x54, 0xFF10);
8119 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8120 buf += n; size_left -= n;
8121 REG_WR32(qlt, 0x54, 0xFF20);
8122 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8123 buf += n; size_left -= n;
8124 REG_WR32(qlt, 0x54, 0xFF30);
8125 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8126 buf += n; size_left -= n;
8127 REG_WR32(qlt, 0x54, 0xFF40);
8128 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8129 buf += n; size_left -= n;
8130 REG_WR32(qlt, 0x54, 0xFF50);
8131 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8132 buf += n; size_left -= n;
8133 REG_WR32(qlt, 0x54, 0xFF60);
8134 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8135 buf += n; size_left -= n;
8136 REG_WR32(qlt, 0x54, 0xFF70);
8137 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8138 buf += n; size_left -= n;
8139 n = (int)snprintf(buf, size_left, "\nRSEQ-0 registers\n");
8140 buf += n; size_left -= n;
8141 if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip) ||
8142 (qlt->qlt_83xx_chip)) {
8143 REG_WR32(qlt, 0x54, 0xFFC0);
8144 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8145 buf += n; size_left -= n;
8146 }
8147 REG_WR32(qlt, 0x54, 0xFFD0);
8148 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8149 buf += n; size_left -= n;
8150 n = (int)snprintf(buf, size_left, "\nRSEQ-1 registers\n");
8151 buf += n; size_left -= n;
8152 REG_WR32(qlt, 0x54, 0xFFE0);
8153 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8154 buf += n; size_left -= n;
8155 n = (int)snprintf(buf, size_left, "\nRSEQ-2 registers\n");
8156 buf += n; size_left -= n;
8157 REG_WR32(qlt, 0x54, 0xFFF0);
8158 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8159 buf += n; size_left -= n;
8160 if (qlt->qlt_83xx_chip) {
8161 n = (int)snprintf(buf, size_left, "\nRSEQ-3 registers\n");
8162 buf += n; size_left -= n;
8163 REG_WR32(qlt, 0x54, 0xFEF0);
8164 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8165 buf += n; size_left -= n;
8166 }
8167
8168 if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip) &&
8169 (!qlt->qlt_83xx_chip))
8170 goto over_aseq_regs;
8171
8172 /*
8173 * Auxiliary sequencer registers
8174 */
8175 n = (int)snprintf(buf, size_left, "\nASEQ GP registers\n");
8176 buf += n; size_left -= n;
8177 REG_WR32(qlt, 0x54, 0xB000);
8178 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8179 buf += n; size_left -= n;
8180 REG_WR32(qlt, 0x54, 0xB010);
8181 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8182 buf += n; size_left -= n;
8183 REG_WR32(qlt, 0x54, 0xB020);
8184 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8185 buf += n; size_left -= n;
8186 REG_WR32(qlt, 0x54, 0xB030);
8187 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8188 buf += n; size_left -= n;
8189 REG_WR32(qlt, 0x54, 0xB040);
8190 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8191 buf += n; size_left -= n;
8192 REG_WR32(qlt, 0x54, 0xB050);
8193 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8194 buf += n; size_left -= n;
8195 REG_WR32(qlt, 0x54, 0xB060);
8196 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8197 buf += n; size_left -= n;
8198 REG_WR32(qlt, 0x54, 0xB070);
8199 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8200 buf += n; size_left -= n;
8201 if (qlt->qlt_83xx_chip) {
8202 REG_WR32(qlt, 0x54, 0xB100);
8203 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8204 buf += n; size_left -= n;
8205 REG_WR32(qlt, 0x54, 0xB110);
8206 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8207 buf += n; size_left -= n;
8208 REG_WR32(qlt, 0x54, 0xB120);
8209 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8210 buf += n; size_left -= n;
8211 REG_WR32(qlt, 0x54, 0xB130);
8212 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8213 buf += n; size_left -= n;
8214 REG_WR32(qlt, 0x54, 0xB140);
8215 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8216 buf += n; size_left -= n;
8217 REG_WR32(qlt, 0x54, 0xB150);
8218 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8219 buf += n; size_left -= n;
8220 REG_WR32(qlt, 0x54, 0xB160);
8221 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8222 buf += n; size_left -= n;
8223 REG_WR32(qlt, 0x54, 0xB170);
8224 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8225 buf += n; size_left -= n;
8226 }
8227 n = (int)snprintf(buf, size_left, "\nASEQ-0 registers\n");
8228 buf += n; size_left -= n;
8229 REG_WR32(qlt, 0x54, 0xB0C0);
8230 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8231 buf += n; size_left -= n;
8232 REG_WR32(qlt, 0x54, 0xB0D0);
8233 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8234 buf += n; size_left -= n;
8235 n = (int)snprintf(buf, size_left, "\nASEQ-1 registers\n");
8236 buf += n; size_left -= n;
8237 REG_WR32(qlt, 0x54, 0xB0E0);
8238 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8239 buf += n; size_left -= n;
8240 n = (int)snprintf(buf, size_left, "\nASEQ-2 registers\n");
8241 buf += n; size_left -= n;
8242 REG_WR32(qlt, 0x54, 0xB0F0);
8243 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8244 buf += n; size_left -= n;
8245 if (qlt->qlt_83xx_chip) {
8246 n = (int)snprintf(buf, size_left, "\nASEQ-3 registers\n");
8247 buf += n; size_left -= n;
8248 REG_WR32(qlt, 0x54, 0xB1F0);
8249 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8250 buf += n; size_left -= n;
8251 }
8252
8253 over_aseq_regs:;
8254
8255 /*
8256 * Command DMA registers
8257 */
8258 n = (int)snprintf(buf, size_left, "\nCommand DMA registers\n");
8259 buf += n; size_left -= n;
8260 REG_WR32(qlt, 0x54, 0x7100);
8261 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8262 buf += n; size_left -= n;
8263 if (qlt->qlt_83xx_chip) {
8264 REG_WR32(qlt, 0x54, 0x7120);
8265 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8266 buf += n; size_left -= n;
8267 REG_WR32(qlt, 0x54, 0x7130);
8268 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8269 buf += n; size_left -= n;
8270 REG_WR32(qlt, 0x54, 0x71F0);
8271 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8272 buf += n; size_left -= n;
8273 }
8274
8275 /*
8276 * Queues
8277 */
8278 n = (int)snprintf(buf, size_left,
8279 "\nRequest0 Queue DMA Channel registers\n");
8280 buf += n; size_left -= n;
8281 REG_WR32(qlt, 0x54, 0x7200);
8282 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
8283 buf += n; size_left -= n;
8284 n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
8285 buf += n; size_left -= n;
8286
8287 n = (int)snprintf(buf, size_left,
8288 "\n\nResponse0 Queue DMA Channel registers\n");
8289 buf += n; size_left -= n;
8290 REG_WR32(qlt, 0x54, 0x7300);
8291 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
8292 buf += n; size_left -= n;
8293 n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
8413 buf += n; size_left -= n;
8414 REG_WR32(qlt, 0x54, 0x3010);
8415 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8416 buf += n; size_left -= n;
8417 REG_WR32(qlt, 0x54, 0x3020);
8418 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8419 buf += n; size_left -= n;
8420 REG_WR32(qlt, 0x54, 0x3030);
8421 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8422 buf += n; size_left -= n;
8423 REG_WR32(qlt, 0x54, 0x3040);
8424 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8425 buf += n; size_left -= n;
8426 REG_WR32(qlt, 0x54, 0x3050);
8427 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8428 buf += n; size_left -= n;
8429 REG_WR32(qlt, 0x54, 0x3060);
8430 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8431 buf += n; size_left -= n;
8432
8433 if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip) ||
8434 (qlt->qlt_83xx_chip)) {
8435 REG_WR32(qlt, 0x54, 0x3070);
8436 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8437 buf += n; size_left -= n;
8438 }
8439
8440 /*
8441 * Fibre protocol module registers
8442 */
8443 n = (int)snprintf(buf, size_left, "\nFPM hardware registers\n");
8444 buf += n; size_left -= n;
8445 REG_WR32(qlt, 0x54, 0x4000);
8446 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8447 buf += n; size_left -= n;
8448 REG_WR32(qlt, 0x54, 0x4010);
8449 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8450 buf += n; size_left -= n;
8451 REG_WR32(qlt, 0x54, 0x4020);
8452 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8453 buf += n; size_left -= n;
8454 REG_WR32(qlt, 0x54, 0x4030);
8455 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8456 buf += n; size_left -= n;
8457 REG_WR32(qlt, 0x54, 0x4040);
8458 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8459 buf += n; size_left -= n;
8460 REG_WR32(qlt, 0x54, 0x4050);
8461 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8462 buf += n; size_left -= n;
8463 REG_WR32(qlt, 0x54, 0x4060);
8464 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8465 buf += n; size_left -= n;
8466 REG_WR32(qlt, 0x54, 0x4070);
8467 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8468 buf += n; size_left -= n;
8469 REG_WR32(qlt, 0x54, 0x4080);
8470 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8471 buf += n; size_left -= n;
8472 REG_WR32(qlt, 0x54, 0x4090);
8473 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8474 buf += n; size_left -= n;
8475 REG_WR32(qlt, 0x54, 0x40A0);
8476 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8477 buf += n; size_left -= n;
8478 REG_WR32(qlt, 0x54, 0x40B0);
8479 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8480 buf += n; size_left -= n;
8481 if ((qlt->qlt_83xx_chip) || (qlt->qlt_81xx_chip)) {
8482 REG_WR32(qlt, 0x54, 0x40C0);
8483 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8484 buf += n; size_left -= n;
8485 REG_WR32(qlt, 0x54, 0x40D0);
8486 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8487 buf += n; size_left -= n;
8488 }
8489 if (qlt->qlt_83xx_chip) {
8490 REG_WR32(qlt, 0x54, 0x40E0);
8491 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8492 buf += n; size_left -= n;
8493 REG_WR32(qlt, 0x54, 0x40F0);
8494 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8495 buf += n; size_left -= n;
8496
8497 n = (int)snprintf(buf, size_left, "\nRQ0 Array registers\n");
8498 buf += n; size_left -= n;
8499 REG_WR32(qlt, 0x54, 0x5C00);
8500 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8501 buf += n; size_left -= n;
8502 REG_WR32(qlt, 0x54, 0x5C10);
8503 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8504 buf += n; size_left -= n;
8505 REG_WR32(qlt, 0x54, 0x5C20);
8506 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8507 buf += n; size_left -= n;
8508 REG_WR32(qlt, 0x54, 0x5C30);
8509 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8510 buf += n; size_left -= n;
8511 REG_WR32(qlt, 0x54, 0x5C40);
8512 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8513 buf += n; size_left -= n;
8514 REG_WR32(qlt, 0x54, 0x5C50);
8515 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8516 buf += n; size_left -= n;
8517 REG_WR32(qlt, 0x54, 0x5C60);
8518 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8519 buf += n; size_left -= n;
8520 REG_WR32(qlt, 0x54, 0x5C70);
8521 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8522 buf += n; size_left -= n;
8523 REG_WR32(qlt, 0x54, 0x5C80);
8524 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8525 buf += n; size_left -= n;
8526 REG_WR32(qlt, 0x54, 0x5C90);
8527 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8528 buf += n; size_left -= n;
8529 REG_WR32(qlt, 0x54, 0x5CA0);
8530 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8531 buf += n; size_left -= n;
8532 REG_WR32(qlt, 0x54, 0x5CB0);
8533 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8534 buf += n; size_left -= n;
8535 REG_WR32(qlt, 0x54, 0x5CC0);
8536 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8537 buf += n; size_left -= n;
8538 REG_WR32(qlt, 0x54, 0x5CD0);
8539 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8540 buf += n; size_left -= n;
8541 REG_WR32(qlt, 0x54, 0x5CE0);
8542 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8543 buf += n; size_left -= n;
8544 REG_WR32(qlt, 0x54, 0x5CF0);
8545 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8546 buf += n; size_left -= n;
8547
8548 n = (int)snprintf(buf, size_left, "\nRQ1 Array registers\n");
8549 buf += n; size_left -= n;
8550 REG_WR32(qlt, 0x54, 0x5D00);
8551 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8552 buf += n; size_left -= n;
8553 REG_WR32(qlt, 0x54, 0x5D10);
8554 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8555 buf += n; size_left -= n;
8556 REG_WR32(qlt, 0x54, 0x5D20);
8557 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8558 buf += n; size_left -= n;
8559 REG_WR32(qlt, 0x54, 0x5D30);
8560 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8561 buf += n; size_left -= n;
8562 REG_WR32(qlt, 0x54, 0x5D40);
8563 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8564 buf += n; size_left -= n;
8565 REG_WR32(qlt, 0x54, 0x5D50);
8566 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8567 buf += n; size_left -= n;
8568 REG_WR32(qlt, 0x54, 0x5D60);
8569 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8570 buf += n; size_left -= n;
8571 REG_WR32(qlt, 0x54, 0x5D70);
8572 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8573 buf += n; size_left -= n;
8574 REG_WR32(qlt, 0x54, 0x5D80);
8575 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8576 buf += n; size_left -= n;
8577 REG_WR32(qlt, 0x54, 0x5D90);
8578 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8579 buf += n; size_left -= n;
8580 REG_WR32(qlt, 0x54, 0x5DA0);
8581 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8582 buf += n; size_left -= n;
8583 REG_WR32(qlt, 0x54, 0x5DB0);
8584 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8585 buf += n; size_left -= n;
8586 REG_WR32(qlt, 0x54, 0x5DC0);
8587 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8588 buf += n; size_left -= n;
8589 REG_WR32(qlt, 0x54, 0x5DD0);
8590 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8591 buf += n; size_left -= n;
8592 REG_WR32(qlt, 0x54, 0x5DE0);
8593 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8594 buf += n; size_left -= n;
8595 REG_WR32(qlt, 0x54, 0x5DF0);
8596 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8597 buf += n; size_left -= n;
8598
8599 n = (int)snprintf(buf, size_left, "\nRP0 Array registers\n");
8600 buf += n; size_left -= n;
8601 REG_WR32(qlt, 0x54, 0x5E00);
8602 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8603 buf += n; size_left -= n;
8604 REG_WR32(qlt, 0x54, 0x5E10);
8605 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8606 buf += n; size_left -= n;
8607 REG_WR32(qlt, 0x54, 0x5E20);
8608 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8609 buf += n; size_left -= n;
8610 REG_WR32(qlt, 0x54, 0x5E30);
8611 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8612 buf += n; size_left -= n;
8613 REG_WR32(qlt, 0x54, 0x5E40);
8614 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8615 buf += n; size_left -= n;
8616 REG_WR32(qlt, 0x54, 0x5E50);
8617 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8618 buf += n; size_left -= n;
8619 REG_WR32(qlt, 0x54, 0x5E60);
8620 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8621 buf += n; size_left -= n;
8622 REG_WR32(qlt, 0x54, 0x5E70);
8623 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8624 buf += n; size_left -= n;
8625 REG_WR32(qlt, 0x54, 0x5E80);
8626 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8627 buf += n; size_left -= n;
8628 REG_WR32(qlt, 0x54, 0x5E90);
8629 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8630 buf += n; size_left -= n;
8631 REG_WR32(qlt, 0x54, 0x5EA0);
8632 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8633 buf += n; size_left -= n;
8634 REG_WR32(qlt, 0x54, 0x5EB0);
8635 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8636 buf += n; size_left -= n;
8637 REG_WR32(qlt, 0x54, 0x5EC0);
8638 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8639 buf += n; size_left -= n;
8640 REG_WR32(qlt, 0x54, 0x5ED0);
8641 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8642 buf += n; size_left -= n;
8643 REG_WR32(qlt, 0x54, 0x5EE0);
8644 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8645 buf += n; size_left -= n;
8646 REG_WR32(qlt, 0x54, 0x5EF0);
8647 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8648 buf += n; size_left -= n;
8649
8650 n = (int)snprintf(buf, size_left, "\nRP1 Array registers\n");
8651 buf += n; size_left -= n;
8652 REG_WR32(qlt, 0x54, 0x5F00);
8653 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8654 buf += n; size_left -= n;
8655 REG_WR32(qlt, 0x54, 0x5F10);
8656 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8657 buf += n; size_left -= n;
8658 REG_WR32(qlt, 0x54, 0x5F20);
8659 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8660 buf += n; size_left -= n;
8661 REG_WR32(qlt, 0x54, 0x5F30);
8662 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8663 buf += n; size_left -= n;
8664 REG_WR32(qlt, 0x54, 0x5F40);
8665 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8666 buf += n; size_left -= n;
8667 REG_WR32(qlt, 0x54, 0x5F50);
8668 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8669 buf += n; size_left -= n;
8670 REG_WR32(qlt, 0x54, 0x5F60);
8671 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8672 buf += n; size_left -= n;
8673 REG_WR32(qlt, 0x54, 0x5F70);
8674 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8675 buf += n; size_left -= n;
8676 REG_WR32(qlt, 0x54, 0x5F80);
8677 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8678 buf += n; size_left -= n;
8679 REG_WR32(qlt, 0x54, 0x5F90);
8680 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8681 buf += n; size_left -= n;
8682 REG_WR32(qlt, 0x54, 0x5FA0);
8683 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8684 buf += n; size_left -= n;
8685 REG_WR32(qlt, 0x54, 0x5FB0);
8686 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8687 buf += n; size_left -= n;
8688 REG_WR32(qlt, 0x54, 0x5FC0);
8689 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8690 buf += n; size_left -= n;
8691 REG_WR32(qlt, 0x54, 0x5FD0);
8692 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8693 buf += n; size_left -= n;
8694 REG_WR32(qlt, 0x54, 0x5FE0);
8695 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8696 buf += n; size_left -= n;
8697 REG_WR32(qlt, 0x54, 0x5FF0);
8698 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8699 buf += n; size_left -= n;
8700
8701 n = (int)snprintf(buf,
8702 size_left, "\nQueue Control Registers\n");
8703 buf += n; size_left -= n;
8704 REG_WR32(qlt, 0x54, 0x7800);
8705 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8706 buf += n; size_left -= n;
8707 }
8708
8709 /*
8710 * Fibre buffer registers
8711 */
8712 n = (int)snprintf(buf, size_left, "\nFB hardware registers\n");
8713 buf += n; size_left -= n;
8714 REG_WR32(qlt, 0x54, 0x6000);
8715 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8716 buf += n; size_left -= n;
8717 REG_WR32(qlt, 0x54, 0x6010);
8718 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8719 buf += n; size_left -= n;
8720 REG_WR32(qlt, 0x54, 0x6020);
8721 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8722 buf += n; size_left -= n;
8723 REG_WR32(qlt, 0x54, 0x6030);
8724 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8725 buf += n; size_left -= n;
8726 REG_WR32(qlt, 0x54, 0x6040);
8727 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8728 buf += n; size_left -= n;
8729 if (qlt->qlt_83xx_chip) {
8730 REG_WR32(qlt, 0x54, 0x6060);
8731 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8732 buf += n; size_left -= n;
8733 REG_WR32(qlt, 0x54, 0x6070);
8734 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8735 buf += n; size_left -= n;
8736 }
8737 REG_WR32(qlt, 0x54, 0x6100);
8738 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8739 buf += n; size_left -= n;
8740 REG_WR32(qlt, 0x54, 0x6130);
8741 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8742 buf += n; size_left -= n;
8743 REG_WR32(qlt, 0x54, 0x6150);
8744 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8745 buf += n; size_left -= n;
8746 REG_WR32(qlt, 0x54, 0x6170);
8747 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8748 buf += n; size_left -= n;
8749 REG_WR32(qlt, 0x54, 0x6190);
8750 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8751 buf += n; size_left -= n;
8752 REG_WR32(qlt, 0x54, 0x61B0);
8753 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8754 buf += n; size_left -= n;
8755 if ((qlt->qlt_83xx_chip) || (qlt->qlt_81xx_chip)) {
8756 REG_WR32(qlt, 0x54, 0x61C0);
8757 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8758 buf += n; size_left -= n;
8759 }
8760 if (qlt->qlt_83xx_chip) {
8761 REG_WR32(qlt, 0x54, 0x6530);
8762 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8763 buf += n; size_left -= n;
8764 REG_WR32(qlt, 0x54, 0x6540);
8765 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8766 buf += n; size_left -= n;
8767 REG_WR32(qlt, 0x54, 0x6550);
8768 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8769 buf += n; size_left -= n;
8770 REG_WR32(qlt, 0x54, 0x6560);
8771 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8772 buf += n; size_left -= n;
8773 REG_WR32(qlt, 0x54, 0x6570);
8774 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8775 buf += n; size_left -= n;
8776 REG_WR32(qlt, 0x54, 0x6580);
8777 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8778 buf += n; size_left -= n;
8779 REG_WR32(qlt, 0x54, 0x6590);
8780 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8781 buf += n; size_left -= n;
8782 REG_WR32(qlt, 0x54, 0x65A0);
8783 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8784 buf += n; size_left -= n;
8785 REG_WR32(qlt, 0x54, 0x65B0);
8786 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8787 buf += n; size_left -= n;
8788 REG_WR32(qlt, 0x54, 0x65C0);
8789 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8790 buf += n; size_left -= n;
8791 REG_WR32(qlt, 0x54, 0x65D0);
8792 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8793 buf += n; size_left -= n;
8794 REG_WR32(qlt, 0x54, 0x65E0);
8795 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8796 buf += n; size_left -= n;
8797 }
8798 if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip) ||
8799 (qlt->qlt_83xx_chip)) {
8800 REG_WR32(qlt, 0x54, 0x6F00);
8801 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8802 buf += n; size_left -= n;
8803 }
8804
8805 if (qlt->qlt_83xx_chip) {
8806 n = (int)snprintf(buf, size_left, "\nAT0 Array registers\n");
8807 buf += n; size_left -= n;
8808 REG_WR32(qlt, 0x54, 0x7080);
8809 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8810 buf += n; size_left -= n;
8811 REG_WR32(qlt, 0x54, 0x7090);
8812 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8813 buf += n; size_left -= n;
8814 REG_WR32(qlt, 0x54, 0x70A0);
8815 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8816 buf += n; size_left -= n;
8817 REG_WR32(qlt, 0x54, 0x70B0);
8818 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8819 buf += n; size_left -= n;
8820 REG_WR32(qlt, 0x54, 0x70C0);
8821 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8822 buf += n; size_left -= n;
8823 REG_WR32(qlt, 0x54, 0x70D0);
8824 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8825 buf += n; size_left -= n;
8826 REG_WR32(qlt, 0x54, 0x70E0);
8827 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8828 buf += n; size_left -= n;
8829 REG_WR32(qlt, 0x54, 0x70F0);
8830 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8831 buf += n; size_left -= n;
8832 }
8833
8834 EL(qlt, "reset chip\n");
8835 qlt->intr_sneak_counter = 10;
8836 mutex_enter(&qlt->intr_lock);
8837 if (qlt->qlt_mq_enabled) {
8838 for (i = 1; i < qlt->qlt_queue_cnt; i++) {
8839 mutex_enter(&qlt->mq_resp[i].mq_lock);
8840 }
8841 }
8842 (void) qlt_reset_chip(qlt);
8843 drv_usecwait(20);
8844 qlt->intr_sneak_counter = 0;
8845 if (qlt->qlt_mq_enabled) {
8846 for (i = 1; i < qlt->qlt_queue_cnt; i++) {
8847 mutex_exit(&qlt->mq_resp[i].mq_lock);
8848 }
8849 }
8850 mutex_exit(&qlt->intr_lock);
8851 EL(qlt, "reset chip, done\n");
8852
8853 /*
8854 * Memory
8855 */
8856 n = (int)snprintf(buf, size_left, "\nCode RAM\n");
8857 buf += n; size_left -= n;
8858
8859 addr = 0x20000;
8860 endaddr = (qlt->qlt_83xx_chip) ? 0x22400 : 0x22000;
8861 words_to_read = 0;
8862 while (addr < endaddr) {
8863 words_to_read = MBOX_DMA_MEM_SIZE >> 2;
8864 if ((words_to_read + addr) > endaddr) {
8865 words_to_read = endaddr - addr;
8866 }
8867 if ((ret = qlt_read_risc_ram(qlt, addr, words_to_read)) !=
8868 QLT_SUCCESS) {
8869 EL(qlt, "Error reading risc ram - CODE RAM status="
8870 "%llxh\n", ret);
8871 goto dump_fail;
8872 }
8873
8874 n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
8875 buf += n; size_left -= n;
8876
8877 if (size_left < 100000) {
8878 EL(qlt, "run out of space - CODE RAM size_left=%d\n",
8879 size_left);
8880 goto dump_ok;
8907 n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
8908 buf += n; size_left -= n;
8909 if (size_left < 100000) {
8910 EL(qlt, "run out of space - EXT RAM\n");
8911 goto dump_ok;
8912 }
8913 addr += words_to_read;
8914 }
8915
8916 /*
8917 * Label the end tag
8918 */
8919 n = (int)snprintf(buf, size_left, "[<==END] ISP Debug Dump\n");
8920 buf += n; size_left -= n;
8921
8922 /*
8923 * Queue dumping
8924 */
8925 n = (int)snprintf(buf, size_left, "\nRequest Queue\n");
8926 buf += n; size_left -= n;
8927
8928 if (qlt->qlt_mq_enabled) {
8929 for (i = 0; i < qlt->qlt_queue_cnt; i++) {
8930 if (qlt->mq_req[i].queue_mem_mq_base_addr) {
8931 n = (int)snprintf(buf, size_left,
8932 "\nQueue %d:\n", i);
8933 buf += n; size_left -= n;
8934 n = qlt_dump_queue(qlt,
8935 qlt->mq_req[i].queue_mem_mq_base_addr,
8936 REQUEST_QUEUE_MQ_ENTRIES,
8937 buf, size_left);
8938 buf += n; size_left -= n;
8939 }
8940 }
8941 } else {
8942 n = (int)snprintf(buf, size_left, "\nQueue 0:\n");
8943 buf += n; size_left -= n;
8944 n = qlt_dump_queue(qlt,
8945 qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET,
8946 REQUEST_QUEUE_ENTRIES, buf, size_left);
8947 buf += n; size_left -= n;
8948 }
8949
8950 if (!qlt->qlt_83xx_chip) {
8951 n = (int)snprintf(buf, size_left, "\nPriority Queue\n");
8952 buf += n; size_left -= n;
8953 n = qlt_dump_queue(qlt,
8954 qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET,
8955 PRIORITY_QUEUE_ENTRIES, buf, size_left);
8956 buf += n; size_left -= n;
8957 }
8958
8959 n = (int)snprintf(buf, size_left, "\nResponse Queue\n");
8960 buf += n; size_left -= n;
8961
8962 if (qlt->qlt_mq_enabled) {
8963 for (i = 0; i < qlt->qlt_queue_cnt; i++) {
8964 if (qlt->mq_resp[i].queue_mem_mq_base_addr) {
8965 n = (int)snprintf(buf, size_left,
8966 "\nQueue %d:\n", i);
8967 buf += n; size_left -= n;
8968 n = qlt_dump_queue(qlt,
8969 qlt->mq_resp[i].queue_mem_mq_base_addr,
8970 RESPONSE_QUEUE_MQ_ENTRIES,
8971 buf, size_left);
8972 buf += n; size_left -= n;
8973 }
8974 }
8975 } else {
8976 n = (int)snprintf(buf, size_left, "\nQueue 0:\n");
8977 buf += n; size_left -= n;
8978 n = qlt_dump_queue(qlt,
8979 qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET,
8980 RESPONSE_QUEUE_ENTRIES, buf, size_left);
8981 buf += n; size_left -= n;
8982 }
8983
8984 n = (int)snprintf(buf, size_left, "\nATIO Queue\nQueue 0:\n");
8985 buf += n; size_left -= n;
8986 n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET,
8987 ATIO_QUEUE_ENTRIES, buf, size_left);
8988 buf += n; size_left -= n;
8989
8990 /*
8991 * Label dump reason
8992 */
8993 if (ssci != NULL) {
8994 n = (int)snprintf(buf, size_left,
8995 "\nFirmware dump reason: %s-%s\n",
8996 qlt->qlt_port_alias, ssci->st_additional_info);
8997 } else {
8998 n = (int)snprintf(buf, size_left,
8999 "\nFirmware dump reason: %s-%s\n",
9000 qlt->qlt_port_alias, "no additional infor");
9001 }
9002 buf += n; size_left -= n;
9003
9004 dump_ok:
9005 EL(qlt, "left-%d\n", size_left);
9006 mutex_enter(&qlt->qlt_ioctl_lock);
9007 qlt->qlt_ioctl_flags &=
9008 ~(QLT_FWDUMP_INPROGRESS | QLT_FWDUMP_FETCHED_BY_USER);
9009 qlt->qlt_ioctl_flags |= QLT_FWDUMP_ISVALID;
9010 mutex_exit(&qlt->qlt_ioctl_lock);
9011 return (FCT_SUCCESS);
9012
9013 dump_fail:
9014 EL(qlt, "dump not done\n");
9015 mutex_enter(&qlt->qlt_ioctl_lock);
9016 qlt->qlt_ioctl_flags &= QLT_IOCTL_FLAG_MASK;
9017 mutex_exit(&qlt->qlt_ioctl_lock);
9018 return (FCT_FAILURE);
9019 }
9020
9021 static int
9022 qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr, int count,
9023 uint_t size_left)
9024 {
9025 int i;
9055 }
9056 if ((i + 1) & 7) {
9057 c = ' ';
9058 } else {
9059 c = '\n';
9060 }
9061 n = (int)(n + (int)snprintf(&buf[n], (uint_t)(size_left - n),
9062 "%08x%c", ptr[i], c));
9063 }
9064 return (n);
9065 }
9066
9067 static int
9068 qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries, caddr_t buf,
9069 uint_t size_left)
9070 {
9071 int i;
9072 int n;
9073 char c = ' ';
9074 int words;
9075 uint32_t *ptr;
9076 uint32_t w;
9077
9078 words = entries * 16;
9079 ptr = (uint32_t *)qadr;
9080 for (i = 0, n = 0; i < words; i++) {
9081 if ((i & 7) == 0) {
9082 n = (int)(n + (int)snprintf(&buf[n],
9083 (uint_t)(size_left - n), "%05x: ", i));
9084 }
9085 if ((i + 1) & 7) {
9086 c = ' ';
9087 } else {
9088 c = '\n';
9089 }
9090 w = QMEM_RD32(qlt, &ptr[i]);
9091 n = (int)(n + (int)snprintf(&buf[n], (size_left - n), "%08x%c",
9092 w, c));
9093 }
9094 return (n);
9095 }
9096
9097 /*
9098 * Only called by debug dump. Interrupts are disabled and mailboxes alongwith
9099 * mailbox ram is available.
9100 * Copy data from RISC RAM to system memory
9101 */
9102 static fct_status_t
9103 qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words)
9104 {
9105 uint64_t da;
9106 fct_status_t ret;
9107
9108 REG_WR16(qlt, REG_MBOX(0), MBC_DUMP_RAM_EXTENDED);
9109 da = qlt->queue_mem_cookie.dmac_laddress;
9110 da += MBOX_DMA_MEM_OFFSET;
9111
9112 /* System destination address */
9113 REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
9114 REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
9115 REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
9116 REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
9117
9118 /* Length */
9119 REG_WR16(qlt, REG_MBOX(5), LSW(words));
9120 REG_WR16(qlt, REG_MBOX(4), MSW(words));
9121
9122 /* RISC source address */
9123 REG_WR16(qlt, REG_MBOX(1), LSW(addr));
9124 REG_WR16(qlt, REG_MBOX(8), MSW(addr));
9125
9126 ret = qlt_raw_mailbox_command(qlt);
9127 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
9128 if (ret == QLT_SUCCESS) {
9129 (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
9130 MBOX_DMA_MEM_OFFSET, words << 2, DDI_DMA_SYNC_FORCPU);
9131 } else {
9132 EL(qlt, "qlt_raw_mailbox_command=0x0ch status=%llxh\n", ret);
9133 }
9134 return (ret);
9135 }
9136
9137 static fct_status_t
9138 qlt_mbx_mpi_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
9139 uint16_t direction)
9140 {
9141 uint64_t da;
9142 fct_status_t ret;
9143
9144 REG_WR16(qlt, REG_MBOX(0), MBC_MPI_RAM);
9145 da = qlt->queue_mem_cookie.dmac_laddress;
9146 da += MBOX_DMA_MEM_OFFSET;
9147
9148 /* System destination address */
9149 REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
9150 REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
9151 REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
9152 REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
9153
9154 /* Length */
9155 REG_WR16(qlt, REG_MBOX(5), LSW(words));
9156 REG_WR16(qlt, REG_MBOX(4), MSW(words));
9157
9158 /* RISC source address */
9159 REG_WR16(qlt, REG_MBOX(1), LSW(addr));
9160 REG_WR16(qlt, REG_MBOX(8), MSW(addr));
9161
9162 REG_WR16(qlt, REG_MBOX(9), direction);
9163 ret = qlt_raw_mailbox_command(qlt);
9164 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
9165 if (ret == QLT_SUCCESS) {
9166 (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
9167 MBOX_DMA_MEM_OFFSET, words << 2, DDI_DMA_SYNC_FORCPU);
9168 } else {
9169 EL(qlt, "qlt_raw_mailbox_command=0x05h status=%llxh\n", ret);
9170 }
9171 return (ret);
9172 }
9173
9174 static void
9175 qlt_verify_fw(qlt_state_t *qlt)
9176 {
9177 caddr_t req;
9178 uint16_t qi = 0;
9179
9180 /* Just put it on the request queue */
9181 mutex_enter(&qlt->mq_req[qi].mq_lock);
9182 req = qlt_get_req_entries(qlt, 1, qi);
9183 if (req == NULL) {
9184 EL(qlt, "req = NULL\n");
9185 mutex_exit(&qlt->mq_req[qi].mq_lock);
9186 return;
9187 }
9188
9189 bzero(req, IOCB_SIZE);
9190
9191 req[0] = 0x1b;
9192 req[1] = 1;
9193
9194 QMEM_WR32(qlt, (&req[4]), 0xffffffff);
9195 QMEM_WR16(qlt, (&req[0x8]), 1); /* options - don't update */
9196 QMEM_WR32(qlt, (&req[0x14]), 0x80010300);
9197
9198 qlt_submit_req_entries(qlt, 1, qi);
9199 mutex_exit(&qlt->mq_req[qi].mq_lock);
9200 }
9201
9202 static fct_status_t
9203 qlt_mq_destroy(qlt_state_t *qlt)
9204 {
9205 int idx;
9206
9207 for (idx = 1; idx < qlt->qlt_queue_cnt; idx++) {
9208 (void) ddi_dma_unbind_handle(
9209 qlt->mq_req[idx].queue_mem_mq_dma_handle);
9210 ddi_dma_mem_free(&qlt->mq_req[idx].queue_mem_mq_acc_handle);
9211 ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9212 (void) ddi_dma_unbind_handle(
9213 qlt->mq_resp[idx].queue_mem_mq_dma_handle);
9214 ddi_dma_mem_free(&qlt->mq_resp[idx].queue_mem_mq_acc_handle);
9215 ddi_dma_free_handle(&qlt->mq_resp[idx].queue_mem_mq_dma_handle);
9216 }
9217 return (QLT_SUCCESS);
9218 }
9219
9220 static fct_status_t
9221 qlt_mq_create(qlt_state_t *qlt, int idx)
9222 {
9223 ddi_device_acc_attr_t dev_acc_attr;
9224 size_t discard;
9225 uint_t ncookies;
9226
9227 dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
9228 dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
9229 dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
9230
9231 /*
9232 * MQ Request queue
9233 */
9234 if (ddi_dma_alloc_handle(qlt->dip, &qlt_queue_dma_attr_mq_req1,
9235 DDI_DMA_SLEEP, 0,
9236 &qlt->mq_req[idx].queue_mem_mq_dma_handle) != DDI_SUCCESS) {
9237 return (QLT_FAILURE);
9238 }
9239 if (ddi_dma_mem_alloc(qlt->mq_req[idx].queue_mem_mq_dma_handle,
9240 REQUEST_QUEUE_MQ_SIZE,
9241 &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
9242 &qlt->mq_req[idx].queue_mem_mq_base_addr, &discard,
9243 &qlt->mq_req[idx].queue_mem_mq_acc_handle) != DDI_SUCCESS) {
9244 ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9245 return (QLT_FAILURE);
9246 }
9247 if (ddi_dma_addr_bind_handle(
9248 qlt->mq_req[idx].queue_mem_mq_dma_handle,
9249 NULL, qlt->mq_req[idx].queue_mem_mq_base_addr,
9250 REQUEST_QUEUE_MQ_SIZE,
9251 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
9252 &qlt->mq_req[idx].queue_mem_mq_cookie,
9253 &ncookies) != DDI_SUCCESS) {
9254 ddi_dma_mem_free(&qlt->mq_req[idx].queue_mem_mq_acc_handle);
9255 ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9256 return (QLT_FAILURE);
9257 }
9258 if (ncookies != 1) {
9259 (void) ddi_dma_unbind_handle(
9260 qlt->mq_req[idx].queue_mem_mq_dma_handle);
9261 ddi_dma_mem_free(&qlt->mq_req[idx].queue_mem_mq_acc_handle);
9262 ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9263 return (QLT_FAILURE);
9264 }
9265
9266 /*
9267 * MQ Response queue
9268 */
9269 if (ddi_dma_alloc_handle(qlt->dip, &qlt_queue_dma_attr_mq_rsp1,
9270 DDI_DMA_SLEEP, 0,
9271 &qlt->mq_resp[idx].queue_mem_mq_dma_handle) != DDI_SUCCESS) {
9272 (void) ddi_dma_unbind_handle(
9273 qlt->mq_req[idx].queue_mem_mq_dma_handle);
9274 ddi_dma_mem_free(&qlt->mq_req[idx].queue_mem_mq_acc_handle);
9275 ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9276 return (QLT_FAILURE);
9277 }
9278 if (ddi_dma_mem_alloc(qlt->mq_resp[idx].queue_mem_mq_dma_handle,
9279 RESPONSE_QUEUE_MQ_SIZE,
9280 &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
9281 &qlt->mq_resp[idx].queue_mem_mq_base_addr, &discard,
9282 &qlt->mq_resp[idx].queue_mem_mq_acc_handle) != DDI_SUCCESS) {
9283 (void) ddi_dma_unbind_handle(
9284 qlt->mq_req[idx].queue_mem_mq_dma_handle);
9285 ddi_dma_mem_free(&qlt->mq_req[idx].queue_mem_mq_acc_handle);
9286 ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9287 ddi_dma_free_handle(&qlt->mq_resp[idx].queue_mem_mq_dma_handle);
9288 return (QLT_FAILURE);
9289 }
9290 if (ddi_dma_addr_bind_handle(
9291 qlt->mq_resp[idx].queue_mem_mq_dma_handle,
9292 NULL, qlt->mq_resp[idx].queue_mem_mq_base_addr,
9293 RESPONSE_QUEUE_MQ_SIZE,
9294 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
9295 &qlt->mq_resp[idx].queue_mem_mq_cookie,
9296 &ncookies) != DDI_SUCCESS) {
9297 (void) ddi_dma_unbind_handle(
9298 qlt->mq_req[idx].queue_mem_mq_dma_handle);
9299 ddi_dma_mem_free(&qlt->mq_req[idx].queue_mem_mq_acc_handle);
9300 ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9301 ddi_dma_mem_free(&qlt->mq_resp[idx].queue_mem_mq_acc_handle);
9302 ddi_dma_free_handle(&qlt->mq_resp[idx].queue_mem_mq_dma_handle);
9303 return (QLT_FAILURE);
9304 }
9305 if (ncookies != 1) {
9306 (void) ddi_dma_unbind_handle(
9307 qlt->mq_req[idx].queue_mem_mq_dma_handle);
9308 ddi_dma_mem_free(&qlt->mq_req[idx].queue_mem_mq_acc_handle);
9309 ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9310 (void) ddi_dma_unbind_handle(
9311 qlt->mq_resp[idx].queue_mem_mq_dma_handle);
9312 ddi_dma_mem_free(&qlt->mq_resp[idx].queue_mem_mq_acc_handle);
9313 ddi_dma_free_handle(&qlt->mq_resp[idx].queue_mem_mq_dma_handle);
9314 return (QLT_FAILURE);
9315 }
9316
9317 qlt->mq_req[idx].mq_ptr = qlt->mq_req[idx].queue_mem_mq_base_addr;
9318 qlt->mq_req[idx].mq_ndx_to_fw = qlt->mq_req[idx].mq_ndx_from_fw = 0;
9319 qlt->mq_req[idx].mq_available = REQUEST_QUEUE_MQ_ENTRIES - 1;
9320 bzero(qlt->mq_req[idx].mq_ptr, REQUEST_QUEUE_MQ_SIZE);
9321
9322 qlt->mq_resp[idx].mq_ptr = qlt->mq_resp[idx].queue_mem_mq_base_addr;
9323 qlt->mq_resp[idx].mq_ndx_to_fw = qlt->mq_resp[idx].mq_ndx_from_fw = 0;
9324 bzero(qlt->mq_resp[idx].mq_ptr, RESPONSE_QUEUE_MQ_SIZE);
9325
9326 return (QLT_SUCCESS);
9327 }
9328
9329 static void
9330 qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp)
9331 {
9332 uint16_t status;
9333 char info[80];
9334
9335 status = QMEM_RD16(qlt, rsp+8);
9336 if (status != 0) {
9337 (void) snprintf(info, 80, "qlt_handle_verify_fw_completion: "
9338 "status:%x, rsp:%p", status, (void *)rsp);
9339 if (status == 3) {
9340 uint16_t error_code;
9341
9342 error_code = QMEM_RD16(qlt, rsp+0xA);
9343 (void) snprintf(info, 80, "qlt_handle_verify_fw_"
9344 "completion: error code:%x", error_code);
9345 }
9346 }
9347 }
9348
9349 /*
9350 * qlt_el_trace_desc_ctor - Construct an extended logging trace descriptor.
9351 *
9352 * Input: Pointer to the adapter state structure.
9353 * Returns: Success or Failure.
9354 * Context: Kernel context.
9355 */
9356 static int
9357 qlt_el_trace_desc_ctor(qlt_state_t *qlt)
9358 {
9359 qlt_trace_entry_t *entry;
9360 size_t maxsize;
9361
9362 qlt->qlt_trace_desc =
9363 (qlt_trace_desc_t *)kmem_zalloc(
9364 sizeof (qlt_trace_desc_t), KM_SLEEP);
9365
9366 qlt->qlt_log_entries = QL_LOG_ENTRIES;
9367 maxsize = qlt->qlt_log_entries * sizeof (qlt_trace_entry_t);
9368 entry = kmem_zalloc(maxsize, KM_SLEEP);
9369
9370 mutex_init(&qlt->qlt_trace_desc->mutex, NULL,
9371 MUTEX_DRIVER, NULL);
9372
9373 qlt->qlt_trace_desc->trace_buffer = entry;
9374 qlt->qlt_trace_desc->trace_buffer_size = maxsize;
9375 qlt->qlt_trace_desc->nindex = 0;
9376
9377 qlt->qlt_trace_desc->nentries = qlt->qlt_log_entries;
9378 qlt->qlt_trace_desc->start = qlt->qlt_trace_desc->end = 0;
9379 qlt->qlt_trace_desc->csize = 0;
9380 qlt->qlt_trace_desc->count = 0;
9381
9382 return (DDI_SUCCESS);
9383 }
9384
9385 /*
9386 * qlt_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
9387 *
9388 * Input: Pointer to the adapter state structure.
9389 * Returns: Success or Failure.
9390 * Context: Kernel context.
9391 */
9392 static int
9393 qlt_el_trace_desc_dtor(qlt_state_t *qlt)
9394 {
9395 int rval = DDI_SUCCESS;
9396
9397 if (qlt->qlt_trace_desc != NULL) {
9398 if (qlt->qlt_trace_desc->trace_buffer != NULL) {
9399 kmem_free(qlt->qlt_trace_desc->trace_buffer,
9400 qlt->qlt_trace_desc->trace_buffer_size);
9401 }
9402 mutex_destroy(&qlt->qlt_trace_desc->mutex);
9403 kmem_free(qlt->qlt_trace_desc, sizeof (qlt_trace_desc_t));
9404 }
9405
9406 return (rval);
9407 }
9408
9409 /*
9410 * qlt_el_msg
9411 * Extended logging message
9412 *
9413 * Input:
9414 * qlt: adapter state pointer.
9415 * fn: function name.
9416 * ce: level
9417 * ...: Variable argument list.
9418 *
9419 * Context:
9420 * Kernel/Interrupt context.
9421 */
9422 void
9423 qlt_el_msg(qlt_state_t *qlt, const char *fn, int ce, ...)
9424 {
9425 char *s, *fmt = 0, *fmt1 = 0;
9426
9427 /*
9428 * EL_BUFFER_RESERVE 256 is the max # of bytes
9429 * that driver's log could be collected.
9430 * add 3 more buytes for safely maniplulation.
9431 */
9432 char buf[EL_BUFFER_RESERVE + 3];
9433 char buf1[QL_LOG_LENGTH];
9434 size_t tmp;
9435 size_t rval, rval1;
9436 va_list vl;
9437 qlt_trace_desc_t *desc = qlt->qlt_trace_desc;
9438 qlt_trace_entry_t *entry;
9439 uint32_t cindex;
9440 timespec_t time;
9441 uint32_t count;
9442 size_t left;
9443
9444 (void) bzero((void *)&buf[0], EL_BUFFER_RESERVE + 3);
9445 fmt1 = &buf[0];
9446
9447 TRACE_BUFFER_LOCK(qlt);
9448
9449 /* locate the entry to be filled out */
9450 cindex = desc->nindex;
9451 entry = &desc->trace_buffer[cindex];
9452
9453 count = desc->count;
9454
9455 desc->end = desc->nindex;
9456 desc->nindex++;
9457 if (desc->nindex == desc->nentries) {
9458 desc->nindex = 0;
9459 }
9460
9461 if (desc->csize < desc->nentries) {
9462 desc->csize ++;
9463 } else {
9464 /*
9465 * once wrapped, csize is fixed.
9466 * so we have to adjust start point
9467 */
9468 desc->start = desc->nindex;
9469 }
9470
9471 gethrestime(&time);
9472
9473 rval = snprintf(fmt1, (size_t)EL_BUFFER_RESERVE,
9474 QL_BANG "%d=>QEL %s(%d,%d):: %s, ", count, QL_NAME,
9475 qlt->instance, 0, fn);
9476
9477 rval1 = rval;
9478
9479 va_start(vl, ce);
9480 s = va_arg(vl, char *);
9481 fmt = fmt1 + rval;
9482 tmp = vsnprintf(fmt,
9483 (size_t)(uint32_t)((int)EL_BUFFER_RESERVE - rval), s, vl);
9484 va_end(vl);
9485
9486 rval += tmp;
9487 if (rval > QL_LOG_LENGTH - 1) {
9488 left = rval - (QL_LOG_LENGTH - 1);
9489
9490 /* store the remaining string */
9491 (void) strncpy(buf1, fmt1 + (QL_LOG_LENGTH - 1), left);
9492 (void) strncpy(entry->buf, fmt1, (QL_LOG_LENGTH - 1));
9493 entry->buf[QL_LOG_LENGTH - 1] = '\n';
9494
9495 bcopy((void *)&time, (void *)&entry->hs_time,
9496 sizeof (timespec_t));
9497
9498 /*
9499 * remaining msg will be stored in the nex entry
9500 * with same timestamp and same sequence number
9501 */
9502 cindex = desc->nindex;
9503 entry = &desc->trace_buffer[cindex];
9504
9505 desc->end = desc->nindex;
9506 desc->nindex++;
9507 if (desc->nindex == desc->nentries) {
9508 desc->nindex = 0;
9509 }
9510
9511 if (desc->csize < desc->nentries) {
9512 desc->csize ++;
9513 } else {
9514 desc->start = desc->nindex;
9515 }
9516
9517 (void) strncpy(&entry->buf[0], fmt1, rval1);
9518 (void) strncpy(&entry->buf[rval1], &buf1[0], left);
9519 entry->buf[rval1 + left] = 0;
9520
9521 bcopy((void *)&time, (void *)&entry->hs_time,
9522 sizeof (timespec_t));
9523
9524 if (qlt->qlt_eel_level == 1) {
9525 cmn_err(ce, fmt1);
9526 }
9527
9528 desc->count++;
9529
9530 TRACE_BUFFER_UNLOCK(qlt);
9531 return;
9532 }
9533
9534 desc->count ++;
9535 bcopy((void *)&time, (void *)&entry->hs_time,
9536 sizeof (timespec_t));
9537
9538 (void) strcpy(entry->buf, fmt1);
9539 entry->buf[rval] = 0;
9540
9541 TRACE_BUFFER_UNLOCK(qlt);
9542
9543 if (qlt->qlt_eel_level == 1) {
9544 cmn_err(ce, fmt1);
9545 }
9546 }
9547
9548 static int
9549 qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval)
9550 {
9551 return (ddi_getprop(DDI_DEV_T_ANY, qlt->dip,
9552 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, prop, defval));
9553 }
9554
9555 static int
9556 qlt_read_string_prop(qlt_state_t *qlt, char *prop, char **prop_val)
9557 {
9558 return (ddi_prop_lookup_string(DDI_DEV_T_ANY, qlt->dip,
9559 DDI_PROP_DONTPASS, prop, prop_val));
9560 }
9561
9562 static int
9563 qlt_read_int_instance_prop(qlt_state_t *qlt, char *prop, int defval)
9564 {
9565 char inst_prop[256];
9566 int val;
9567
9689 }
9690
9691 /*
9692 * ql_mps_reset
9693 * Reset MPS for FCoE functions.
9694 *
9695 * Input:
9696 * ha = virtual adapter state pointer.
9697 *
9698 * Context:
9699 * Kernel context.
9700 */
9701 static void
9702 qlt_mps_reset(qlt_state_t *qlt)
9703 {
9704 uint32_t data, dctl = 1000;
9705
9706 do {
9707 if (dctl-- == 0 || qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 1) !=
9708 QLT_SUCCESS) {
9709 EL(qlt, "qlt_mps_reset: semaphore request fail,"
9710 " cnt=%d\n", dctl);
9711 return;
9712 }
9713 if (qlt_raw_rd_risc_ram_word(qlt, 0x7c00, &data) !=
9714 QLT_SUCCESS) {
9715 (void) qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 0);
9716 EL(qlt, "qlt_mps_reset: semaphore read fail,"
9717 " cnt=%d\n", dctl);
9718 return;
9719 }
9720 } while (!(data & BIT_0));
9721
9722 if (qlt_raw_rd_risc_ram_word(qlt, 0x7A15, &data) == QLT_SUCCESS) {
9723 dctl = (uint16_t)PCICFG_RD16(qlt, 0x54);
9724 if ((data & 0xe0) != (dctl & 0xe0)) {
9725 data &= 0xff1f;
9726 data |= dctl & 0xe0;
9727 (void) qlt_raw_wrt_risc_ram_word(qlt, 0x7A15, data);
9728 }
9729 } else {
9730 EL(qlt, "qlt_mps_reset: read 0x7a15 failed.\n");
9731 }
9732 (void) qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 0);
9733 }
9734
9735 /*
9736 * qlt_raw_wrt_risc_ram_word
9737 * Write RISC RAM word.
9738 *
9739 * Input: qlt: adapter state pointer.
9740 * risc_address: risc ram word address.
9741 * data: data.
9742 *
9743 * Returns: qlt local function return status code.
9744 *
9745 * Context: Kernel context.
9746 */
9747 static fct_status_t
9748 qlt_raw_wrt_risc_ram_word(qlt_state_t *qlt, uint32_t risc_address,
9749 uint32_t data)
9750 {
9789 *data = REG_RD16(qlt, REG_MBOX(2));
9790 *data |= (REG_RD16(qlt, REG_MBOX(3)) << 16);
9791 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
9792 if (ret != QLT_SUCCESS) {
9793 EL(qlt, "qlt_raw_mailbox_command=MBC_READ_RAM_EXTENDED status"
9794 "=%llxh\n", ret);
9795 }
9796 return (ret);
9797 }
9798
9799 static void
9800 qlt_properties(qlt_state_t *qlt)
9801 {
9802 int32_t cnt = 0;
9803 int32_t defval = 0xffff;
9804
9805 if (qlt_wwn_overload_prop(qlt) == TRUE) {
9806 EL(qlt, "wwnn overloaded.\n");
9807 }
9808
9809 /* configure extended logging from conf file */
9810 if ((cnt = qlt_read_int_instance_prop(qlt, "extended-logging",
9811 defval)) != defval) {
9812 qlt->qlt_eel_level = (uint8_t)(cnt & 0xff);
9813 EL(qlt, "extended error logging=%d\n", cnt);
9814 }
9815
9816 if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt2k", defval)) !=
9817 defval) {
9818 qlt->qlt_bucketcnt[0] = cnt;
9819 EL(qlt, "2k bucket o/l=%d\n", cnt);
9820 }
9821
9822 if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt8k", defval)) !=
9823 defval) {
9824 qlt->qlt_bucketcnt[1] = cnt;
9825 EL(qlt, "8k bucket o/l=%d\n", cnt);
9826 }
9827
9828 if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt64k", defval)) !=
9829 defval) {
9830 qlt->qlt_bucketcnt[2] = cnt;
9831 EL(qlt, "64k bucket o/l=%d\n", cnt);
9832 }
9833
9834 if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt128k", defval)) !=
9835 defval) {
9836 qlt->qlt_bucketcnt[3] = cnt;
9837 EL(qlt, "128k bucket o/l=%d\n", cnt);
9838 }
9839
9840 if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt256", defval)) !=
9841 defval) {
9842 qlt->qlt_bucketcnt[4] = cnt;
9843 EL(qlt, "256k bucket o/l=%d\n", cnt);
9844 }
9845 }
9846
9847 /* ******************************************************************* */
9848 /* ****************** 27xx Dump Template Functions ******************* */
9849 /* ******************************************************************* */
9850
9851 /*
9852 * qlt_get_dmp_template
9853 * Get dump template from firmware module
9854 *
9855 * Input:
9856 * qlt: qlt_state_t pointer.
9857 *
9858 * Returns:
9859 * qlt local function return status code.
9860 *
9861 * Context:
9862 * Kernel context.
9863 */
9864 static fct_status_t
9865 qlt_27xx_get_dmp_template(qlt_state_t *qlt)
9866 {
9867 ddi_device_acc_attr_t dev_acc_attr;
9868 dev_info_t *dip = qlt->dip;
9869 uint_t ncookies;
9870 size_t discard;
9871 uint32_t word_count, cnt, *bp, *dp;
9872
9873 if (qlt->dmp_template_dma_handle != NULL) {
9874 (void) ddi_dma_unbind_handle(qlt->dmp_template_dma_handle);
9875 if (qlt->dmp_template_acc_handle != NULL) {
9876 ddi_dma_mem_free(&qlt->dmp_template_acc_handle);
9877 }
9878 ddi_dma_free_handle(&qlt->dmp_template_dma_handle);
9879 }
9880
9881 if ((word_count = tmplt2700_length01) == 0) {
9882 EL(qlt, "No dump template, length=0\n");
9883 return (QLT_FAILURE);
9884 }
9885
9886 dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
9887 dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
9888 dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
9889
9890 if (ddi_dma_alloc_handle(dip, &qlt_queue_dma_attr,
9891 DDI_DMA_SLEEP, 0, &qlt->dmp_template_dma_handle) !=
9892 DDI_SUCCESS) {
9893 EL(qlt, "Unable to allocate template handle");
9894 return (QLT_FAILURE);
9895 }
9896
9897 if (ddi_dma_mem_alloc(qlt->dmp_template_dma_handle,
9898 (word_count << 2), &dev_acc_attr, DDI_DMA_CONSISTENT,
9899 DDI_DMA_SLEEP, 0, &qlt->dmp_template_addr, &discard,
9900 &qlt->dmp_template_acc_handle) != DDI_SUCCESS) {
9901 ddi_dma_free_handle(&qlt->dmp_template_dma_handle);
9902 EL(qlt, "Unable to allocate template buffer");
9903 return (QLT_FAILURE);
9904 }
9905
9906 if (ddi_dma_addr_bind_handle(qlt->dmp_template_dma_handle, NULL,
9907 qlt->dmp_template_addr, (word_count << 2),
9908 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
9909 &qlt->dmp_template_cookie, &ncookies) != DDI_SUCCESS) {
9910 ddi_dma_mem_free(&qlt->dmp_template_acc_handle);
9911 ddi_dma_free_handle(&qlt->dmp_template_dma_handle);
9912 EL(qlt, "Unable to bind template handle");
9913 return (QLT_FAILURE);
9914 }
9915
9916 if (ncookies != 1) {
9917 (void) ddi_dma_unbind_handle(qlt->dmp_template_dma_handle);
9918 ddi_dma_mem_free(&qlt->dmp_template_acc_handle);
9919 ddi_dma_free_handle(&qlt->dmp_template_dma_handle);
9920 EL(qlt, "cookies (%d) > 1.\n", ncookies);
9921 return (QLT_FAILURE);
9922 }
9923
9924 /* Get big endian template. */
9925 bp = (uint32_t *)qlt->dmp_template_addr;
9926 dp = (uint32_t *)tmplt2700_code01;
9927 for (cnt = 0; cnt < word_count; cnt++) {
9928 ddi_put32(qlt->dmp_template_acc_handle, bp, *dp++);
9929 if (cnt > 6) {
9930 qlt_chg_endian((uint8_t *)bp, 4);
9931 }
9932 bp++;
9933 }
9934
9935 return (QLT_SUCCESS);
9936 }
9937
9938 static int
9939 qlt_27xx_dt_riob1(qlt_state_t *qlt, qlt_dt_riob1_t *entry,
9940 uint8_t *dbuff, uint8_t *dbuff_end)
9941 {
9942 int esize;
9943 uint32_t i, cnt;
9944 uint8_t *bp = dbuff;
9945 uint32_t addr = entry->addr;
9946 uint32_t reg = entry->pci_offset;
9947
9948 cnt = CHAR_TO_SHORT(entry->reg_count_l, entry->reg_count_h);
9949 esize = cnt * 4; /* addr */
9950 esize += cnt * entry->reg_size; /* data */
9951
9952 if (dbuff == NULL) {
9953 return (esize);
9954 }
9955 if (esize + dbuff >= dbuff_end) {
9956 EL(qlt, "skipped, no buffer space, needed=%xh\n", esize);
9957 entry->h.driver_flags = (uint8_t)
9958 (entry->h.driver_flags | SKIPPED_FLAG);
9959 return (0);
9960 }
9961
9962 REG_WR32(qlt, REG_IOBUS_BASE_ADDR, addr);
9963 while (cnt--) {
9964 *bp++ = LSB(LSW(addr));
9965 *bp++ = MSB(LSW(addr));
9966 *bp++ = LSB(MSW(addr));
9967 *bp++ = MSB(MSW(addr));
9968 for (i = 0; i < entry->reg_size; i++) {
9969 *bp++ = REG_RD8(qlt, reg++);
9970 }
9971 addr++;
9972 }
9973
9974 return (esize);
9975 }
9976
9977 static void
9978 qlt_27xx_dt_wiob1(qlt_state_t *qlt, qlt_dt_wiob1_t *entry,
9979 uint8_t *dbuff, uint8_t *dbuff_end)
9980 {
9981 uint32_t reg = entry->pci_offset;
9982
9983 if (dbuff == NULL) {
9984 return;
9985 }
9986 if (dbuff >= dbuff_end) {
9987 EL(qlt, "skipped, no buffer space, needed=0\n");
9988 entry->h.driver_flags = (uint8_t)
9989 (entry->h.driver_flags | SKIPPED_FLAG);
9990 return;
9991 }
9992
9993 REG_WR32(qlt, REG_IOBUS_BASE_ADDR, entry->addr);
9994 REG_WR32(qlt, reg, entry->data);
9995 }
9996
9997 static int
9998 qlt_27xx_dt_riob2(qlt_state_t *qlt, qlt_dt_riob2_t *entry,
9999 uint8_t *dbuff, uint8_t *dbuff_end)
10000 {
10001 int esize;
10002 uint32_t i, cnt;
10003 uint8_t *bp = dbuff;
10004 uint32_t reg = entry->pci_offset;
10005 uint32_t addr = entry->addr;
10006
10007 cnt = CHAR_TO_SHORT(entry->reg_count_l, entry->reg_count_h);
10008 esize = cnt * 4; /* addr */
10009 esize += cnt * entry->reg_size; /* data */
10010
10011 if (dbuff == NULL) {
10012 return (esize);
10013 }
10014 if (esize + dbuff >= dbuff_end) {
10015 EL(qlt, "skipped, no buffer space, needed=%xh\n", esize);
10016 entry->h.driver_flags = (uint8_t)
10017 (entry->h.driver_flags | SKIPPED_FLAG);
10018 return (0);
10019 }
10020
10021 REG_WR32(qlt, REG_IOBUS_BASE_ADDR, addr);
10022 REG_WR32(qlt, entry->bank_sel_offset, entry->reg_bank);
10023 while (cnt--) {
10024 *bp++ = LSB(LSW(addr));
10025 *bp++ = MSB(LSW(addr));
10026 *bp++ = LSB(MSW(addr));
10027 *bp++ = MSB(MSW(addr));
10028 for (i = 0; i < entry->reg_size; i++) {
10029 *bp++ = REG_RD8(qlt, reg++);
10030 }
10031 addr++;
10032 }
10033
10034 return (esize);
10035 }
10036
10037 static void
10038 qlt_27xx_dt_wiob2(qlt_state_t *qlt, qlt_dt_wiob2_t *entry,
10039 uint8_t *dbuff, uint8_t *dbuff_end)
10040 {
10041 uint16_t data;
10042 uint32_t reg = entry->pci_offset;
10043
10044 if (dbuff == NULL) {
10045 return;
10046 }
10047 if (dbuff >= dbuff_end) {
10048 EL(qlt, "skipped, no buffer space, needed=0\n");
10049 entry->h.driver_flags = (uint8_t)
10050 (entry->h.driver_flags | SKIPPED_FLAG);
10051 return;
10052 }
10053
10054 data = CHAR_TO_SHORT(entry->data_l, entry->data_h);
10055
10056 REG_WR32(qlt, REG_IOBUS_BASE_ADDR, entry->addr);
10057 REG_WR32(qlt, entry->bank_sel_offset, entry->reg_bank);
10058 REG_WR16(qlt, reg, data);
10059 }
10060
10061 static int
10062 qlt_27xx_dt_rpci(qlt_state_t *qlt, qlt_dt_rpci_t *entry, uint8_t *dbuff,
10063 uint8_t *dbuff_end)
10064 {
10065 int esize;
10066 uint32_t i;
10067 uint8_t *bp = dbuff;
10068 uint32_t reg = entry->addr;
10069
10070 esize = 4; /* addr */
10071 esize += 4; /* data */
10072
10073 if (dbuff == NULL) {
10074 return (esize);
10075 }
10076 if (esize + dbuff >= dbuff_end) {
10077 EL(qlt, "skipped, no buffer space, needed=%xh\n", esize);
10078 entry->h.driver_flags = (uint8_t)
10079 (entry->h.driver_flags | SKIPPED_FLAG);
10080 return (0);
10081 }
10082
10083 *bp++ = LSB(LSW(entry->addr));
10084 *bp++ = MSB(LSW(entry->addr));
10085 *bp++ = LSB(MSW(entry->addr));
10086 *bp++ = MSB(MSW(entry->addr));
10087 for (i = 0; i < 4; i++) {
10088 *bp++ = REG_RD8(qlt, reg++);
10089 }
10090
10091 return (esize);
10092 }
10093
10094 static void
10095 qlt_27xx_dt_wpci(qlt_state_t *qlt, qlt_dt_wpci_t *entry,
10096 uint8_t *dbuff, uint8_t *dbuff_end)
10097 {
10098 uint32_t reg = entry->addr;
10099
10100 if (dbuff == NULL) {
10101 return;
10102 }
10103 if (dbuff >= dbuff_end) {
10104 EL(qlt, "skipped, no buffer space, needed=0\n");
10105 entry->h.driver_flags = (uint8_t)
10106 (entry->h.driver_flags | SKIPPED_FLAG);
10107 return;
10108 }
10109
10110 REG_WR32(qlt, reg, entry->data);
10111 }
10112
10113 static int
10114 qlt_27xx_dt_rram(qlt_state_t *qlt, qlt_dt_rram_t *entry,
10115 uint8_t *dbuff, uint8_t *dbuff_end)
10116 {
10117 int esize, rval;
10118 uint32_t start = entry->start_addr;
10119 uint32_t end = entry->end_addr;
10120
10121 if (entry->ram_area == 2) {
10122 end = qlt->fw_ext_memory_end;
10123 } else if (entry->ram_area == 3) {
10124 start = qlt->fw_shared_ram_start;
10125 end = qlt->fw_shared_ram_end;
10126 } else if (entry->ram_area == 4) {
10127 start = qlt->fw_ddr_ram_start;
10128 end = qlt->fw_ddr_ram_end;
10129 } else if (entry->ram_area != 1) {
10130 EL(qlt, "skipped, unknown RAM_AREA %d\n", entry->ram_area);
10131 start = 0;
10132 end = 0;
10133 }
10134 esize = end > start ? end - start : 0;
10135 if (esize) {
10136 esize = (esize + 1) * 4;
10137 }
10138
10139 if (dbuff == NULL) {
10140 return (esize);
10141 }
10142 if (esize == 0 || esize + dbuff >= dbuff_end) {
10143 if (esize != 0) {
10144 EL(qlt, "skipped, no buffer space, needed=%xh\n",
10145 esize);
10146 } else {
10147 EL(qlt, "skipped, no ram_area=%xh, start=%xh "
10148 "end=%xh\n", entry->ram_area, start, end);
10149 }
10150 entry->h.driver_flags = (uint8_t)
10151 (entry->h.driver_flags | SKIPPED_FLAG);
10152 return (0);
10153 }
10154 entry->end_addr = end;
10155 entry->start_addr = start;
10156
10157 if ((rval = qlt_27xx_dump_ram(qlt, MBC_DUMP_RAM_EXTENDED,
10158 start, esize / 4, dbuff)) != QLT_SUCCESS) {
10159 EL(qlt, "dump_ram failed, rval=%xh, addr=%xh, len=%xh, "
10160 "esize=0\n", rval, start, esize / 4);
10161 return (0);
10162 }
10163
10164 return (esize);
10165 }
10166
10167 static int
10168 qlt_27xx_dt_gque(qlt_state_t *qlt, qlt_dt_gque_t *entry,
10169 uint8_t *dbuff, uint8_t *dbuff_end)
10170 {
10171 int esize;
10172 uint32_t cnt, q_cnt, e_cnt, i;
10173 uint8_t *bp = dbuff, *dp;
10174
10175 if (entry->queue_type == 1) {
10176 e_cnt = qlt->qlt_queue_cnt;
10177 esize = e_cnt * 2; /* queue number */
10178 esize += e_cnt * 2; /* queue entries */
10179
10180 /* queue size */
10181 esize += REQUEST_QUEUE_ENTRIES * IOCB_SIZE;
10182 for (q_cnt = 1; q_cnt < qlt->qlt_queue_cnt; q_cnt++) {
10183 esize += REQUEST_QUEUE_MQ_ENTRIES * IOCB_SIZE;
10184 }
10185
10186 if (dbuff == NULL) {
10187 return (esize);
10188 }
10189 if (esize + dbuff >= dbuff_end) {
10190 EL(qlt, "skipped, no buffer space, needed=%xh\n",
10191 esize);
10192 entry->h.driver_flags = (uint8_t)
10193 (entry->h.driver_flags | SKIPPED_FLAG);
10194 return (0);
10195 }
10196 entry->num_queues = e_cnt;
10197
10198 for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
10199 e_cnt = q_cnt == 0 ?
10200 REQUEST_QUEUE_ENTRIES : REQUEST_QUEUE_MQ_ENTRIES;
10201 dp = (uint8_t *)qlt->mq_req[q_cnt].mq_ptr;
10202 *bp++ = LSB(q_cnt);
10203 *bp++ = MSB(q_cnt);
10204 *bp++ = LSB(e_cnt);
10205 *bp++ = MSB(e_cnt);
10206 for (cnt = 0; cnt < e_cnt; cnt++) {
10207 for (i = 0; i < IOCB_SIZE; i++) {
10208 *bp++ = *dp++;
10209 }
10210 }
10211 }
10212 } else if (entry->queue_type == 2) {
10213
10214 e_cnt = qlt->qlt_queue_cnt;
10215 esize = e_cnt * 2; /* queue number */
10216 esize += e_cnt * 2; /* queue entries */
10217
10218 /* queue size */
10219 esize += RESPONSE_QUEUE_ENTRIES * IOCB_SIZE;
10220 for (q_cnt = 1; q_cnt < qlt->qlt_queue_cnt; q_cnt++) {
10221 esize += RESPONSE_QUEUE_MQ_ENTRIES * IOCB_SIZE;
10222 }
10223
10224 if (dbuff == NULL) {
10225 return (esize);
10226 }
10227 if (esize + dbuff >= dbuff_end) {
10228 EL(qlt, "skipped2, no buffer space, needed=%xh\n",
10229 esize);
10230 entry->h.driver_flags = (uint8_t)
10231 (entry->h.driver_flags | SKIPPED_FLAG);
10232 return (0);
10233 }
10234 entry->num_queues = e_cnt;
10235
10236 for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
10237 e_cnt = q_cnt == 0 ?
10238 RESPONSE_QUEUE_ENTRIES : RESPONSE_QUEUE_MQ_ENTRIES;
10239 dp = (uint8_t *)qlt->mq_resp[q_cnt].mq_ptr;
10240 *bp++ = LSB(q_cnt);
10241 *bp++ = MSB(q_cnt);
10242 *bp++ = LSB(e_cnt);
10243 *bp++ = MSB(e_cnt);
10244 for (cnt = 0; cnt < e_cnt; cnt++) {
10245 for (i = 0; i < IOCB_SIZE; i++) {
10246 *bp++ = *dp++;
10247 }
10248 }
10249 }
10250 } else if (entry->queue_type == 3) {
10251 e_cnt = 1;
10252 esize = e_cnt * 2; /* queue number */
10253 esize += e_cnt * 2; /* queue entries */
10254
10255 /* queue size */
10256 esize += RESPONSE_QUEUE_ENTRIES * IOCB_SIZE;
10257
10258 if (dbuff == NULL) {
10259 return (esize);
10260 }
10261 if (esize + dbuff >= dbuff_end) {
10262 EL(qlt, "skipped2, no buffer space, needed=%xh\n",
10263 esize);
10264 entry->h.driver_flags = (uint8_t)
10265 (entry->h.driver_flags | SKIPPED_FLAG);
10266 return (0);
10267 }
10268 entry->num_queues = e_cnt;
10269
10270 for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
10271 e_cnt = ATIO_QUEUE_ENTRIES;
10272 dp = (uint8_t *)qlt->atio_ptr;
10273 *bp++ = LSB(q_cnt);
10274 *bp++ = MSB(q_cnt);
10275 *bp++ = LSB(e_cnt);
10276 *bp++ = MSB(e_cnt);
10277 for (cnt = 0; cnt < e_cnt; cnt++) {
10278 for (i = 0; i < IOCB_SIZE; i++) {
10279 *bp++ = *dp++;
10280 }
10281 }
10282 }
10283 } else {
10284 EL(qlt, "skipped, unknown queue_type %d, esize=0\n",
10285 entry->queue_type);
10286 if (dbuff != NULL) {
10287 entry->h.driver_flags = (uint8_t)
10288 (entry->h.driver_flags | SKIPPED_FLAG);
10289 }
10290 return (0);
10291 }
10292
10293 return (esize);
10294 }
10295
10296 /*ARGSUSED*/
10297 static int
10298 qlt_27xx_dt_gfce(qlt_state_t *qlt, qlt_dt_gfce_t *entry,
10299 uint8_t *dbuff, uint8_t *dbuff_end)
10300 {
10301 if (dbuff != NULL) {
10302 entry->h.driver_flags = (uint8_t)
10303 (entry->h.driver_flags | SKIPPED_FLAG);
10304 }
10305
10306 return (0);
10307 }
10308
10309 static void
10310 qlt_27xx_dt_prisc(qlt_state_t *qlt, qlt_dt_prisc_t *entry,
10311 uint8_t *dbuff, uint8_t *dbuff_end)
10312 {
10313 clock_t timer;
10314
10315 if (dbuff == NULL) {
10316 return;
10317 }
10318 if (dbuff >= dbuff_end) {
10319 EL(qlt, "skipped, no buffer space, needed=0\n");
10320 entry->h.driver_flags = (uint8_t)
10321 (entry->h.driver_flags | SKIPPED_FLAG);
10322 return;
10323 }
10324
10325 /* Pause RISC. */
10326 if ((REG_RD32(qlt, REG_RISC_STATUS) & BIT_8) == 0) {
10327 REG_WR32(qlt, REG_HCCR, 0x30000000);
10328 for (timer = 30000;
10329 (REG_RD32(qlt, REG_RISC_STATUS) & BIT_8) == 0;
10330 timer--) {
10331 if (timer) {
10332 drv_usecwait(100);
10333 if (timer % 10000 == 0) {
10334 EL(qlt, "risc pause %d\n", timer);
10335 }
10336 } else {
10337 EL(qlt, "risc pause timeout\n");
10338 break;
10339 }
10340 }
10341 }
10342 }
10343
10344 static void
10345 qlt_27xx_dt_rrisc(qlt_state_t *qlt, qlt_dt_rrisc_t *entry,
10346 uint8_t *dbuff, uint8_t *dbuff_end)
10347 {
10348 clock_t timer;
10349 uint16_t rom_status;
10350
10351 if (dbuff == NULL) {
10352 return;
10353 }
10354 if (dbuff >= dbuff_end) {
10355 EL(qlt, "skipped, no buffer space, needed=0\n");
10356 entry->h.driver_flags = (uint8_t)
10357 (entry->h.driver_flags | SKIPPED_FLAG);
10358 return;
10359 }
10360
10361 /* Shutdown DMA. */
10362 REG_WR32(qlt, REG_CTRL_STATUS, DMA_SHUTDOWN_CTRL);
10363
10364 /* Wait for DMA to stop. */
10365 for (timer = 0; timer < 30000; timer++) {
10366 if (!(REG_RD32(qlt, REG_CTRL_STATUS) & DMA_ACTIVE_STATUS)) {
10367 break;
10368 }
10369 drv_usecwait(100);
10370 }
10371
10372 /* Reset the chip. */
10373 REG_WR32(qlt, REG_CTRL_STATUS, CHIP_SOFT_RESET);
10374 drv_usecwait(200);
10375
10376 /* Wait for RISC to recover from reset. */
10377 for (timer = 30000; timer; timer--) {
10378 rom_status = REG_RD16(qlt, REG_MBOX0);
10379 if ((rom_status & MBS_ROM_STATUS_MASK) != MBS_ROM_BUSY) {
10380 break;
10381 }
10382 drv_usecwait(100);
10383 }
10384
10385 /* Wait for reset to finish. */
10386 for (timer = 30000; timer; timer--) {
10387 if (!(REG_RD32(qlt, REG_CTRL_STATUS) & CHIP_SOFT_RESET)) {
10388 break;
10389 }
10390 drv_usecwait(100);
10391 }
10392
10393 /* XXX: Disable Interrupts (Probably not needed) */
10394 REG_WR32(qlt, REG_INTR_CTRL, 0);
10395
10396 qlt->qlt_intr_enabled = 0;
10397 }
10398
10399 static void
10400 qlt_27xx_dt_dint(qlt_state_t *qlt, qlt_dt_dint_t *entry,
10401 uint8_t *dbuff, uint8_t *dbuff_end)
10402 {
10403 if (dbuff == NULL) {
10404 return;
10405 }
10406 if (dbuff >= dbuff_end) {
10407 EL(qlt, "skipped, no buffer space, needed=0\n");
10408 entry->h.driver_flags = (uint8_t)
10409 (entry->h.driver_flags | SKIPPED_FLAG);
10410 return;
10411 }
10412
10413 PCICFG_WR32(qlt, entry->pci_offset, entry->data);
10414 }
10415
10416 /*ARGSUSED*/
10417 static int
10418 qlt_27xx_dt_ghbd(qlt_state_t *qlt, qlt_dt_ghbd_t *entry,
10419 uint8_t *dbuff, uint8_t *dbuff_end)
10420 {
10421 if (dbuff != NULL) {
10422 entry->h.driver_flags = (uint8_t)
10423 (entry->h.driver_flags | SKIPPED_FLAG);
10424 }
10425
10426 return (0);
10427 }
10428
10429 /*ARGSUSED*/
10430 static int
10431 qlt_27xx_dt_scra(qlt_state_t *qlt, qlt_dt_scra_t *entry,
10432 uint8_t *dbuff, uint8_t *dbuff_end)
10433 {
10434 if (dbuff != NULL) {
10435 entry->h.driver_flags = (uint8_t)
10436 (entry->h.driver_flags | SKIPPED_FLAG);
10437 }
10438
10439 return (0);
10440 }
10441
10442 static int
10443 qlt_27xx_dt_rrreg(qlt_state_t *qlt, qlt_dt_rrreg_t *entry,
10444 uint8_t *dbuff, uint8_t *dbuff_end)
10445 {
10446 int esize;
10447 uint32_t i;
10448 uint8_t *bp = dbuff;
10449 uint32_t addr = entry->addr;
10450 uint32_t cnt = entry->count;
10451
10452 esize = cnt * 4; /* addr */
10453 esize += cnt * 4; /* data */
10454
10455 if (dbuff == NULL) {
10456 return (esize);
10457 }
10458 if (esize + dbuff >= dbuff_end) {
10459 EL(qlt, "skipped, no buffer space, needed=%xh\n", esize);
10460 entry->h.driver_flags = (uint8_t)
10461 (entry->h.driver_flags | SKIPPED_FLAG);
10462 return (0);
10463 }
10464
10465 REG_WR32(qlt, REG_IOBUS_BASE_ADDR, 0x40);
10466 while (cnt--) {
10467 REG_WR32(qlt, 0xc0, addr | 0x80000000);
10468 *bp++ = LSB(LSW(addr));
10469 *bp++ = MSB(LSW(addr));
10470 *bp++ = LSB(MSW(addr));
10471 *bp++ = MSB(MSW(addr));
10472 for (i = 0; i < 4; i++) {
10473 *bp++ = REG_RD8(qlt, i);
10474 }
10475 addr += 4;
10476 }
10477
10478 return (esize);
10479 }
10480
10481 static void
10482 qlt_27xx_dt_wrreg(qlt_state_t *qlt, qlt_dt_wrreg_t *entry,
10483 uint8_t *dbuff, uint8_t *dbuff_end)
10484 {
10485 if (dbuff == NULL) {
10486 return;
10487 }
10488 if (dbuff >= dbuff_end) {
10489 EL(qlt, "skipped, no buffer space, needed=0\n");
10490 entry->h.driver_flags = (uint8_t)
10491 (entry->h.driver_flags | SKIPPED_FLAG);
10492 return;
10493 }
10494
10495 REG_WR32(qlt, REG_IOBUS_BASE_ADDR, 0x40);
10496 REG_WR32(qlt, 0xc4, entry->data);
10497 REG_WR32(qlt, 0xc0, entry->addr);
10498 }
10499
10500 static int
10501 qlt_27xx_dt_rrram(qlt_state_t *qlt, qlt_dt_rrram_t *entry,
10502 uint8_t *dbuff, uint8_t *dbuff_end)
10503 {
10504 int rval, esize;
10505
10506 esize = entry->count * 4; /* data */
10507
10508 if (dbuff == NULL) {
10509 return (esize);
10510 }
10511 if (esize + dbuff >= dbuff_end) {
10512 EL(qlt, "skipped, no buffer space, needed=%xh\n", esize);
10513 entry->h.driver_flags = (uint8_t)
10514 (entry->h.driver_flags | SKIPPED_FLAG);
10515 return (0);
10516 }
10517
10518 if ((rval = qlt_27xx_dump_ram(qlt, MBC_MPI_RAM, entry->addr,
10519 entry->count, dbuff)) != QLT_SUCCESS) {
10520 EL(qlt, "dump_ram failed, rval=%xh, addr=%xh, len=%xh, "
10521 "esize=0\n", rval, entry->addr, entry->count);
10522 return (0);
10523 }
10524
10525 return (esize);
10526 }
10527
10528 static int
10529 qlt_27xx_dt_rpcic(qlt_state_t *qlt, qlt_dt_rpcic_t *entry,
10530 uint8_t *dbuff, uint8_t *dbuff_end)
10531 {
10532 int esize;
10533 uint32_t i;
10534 uint8_t *bp = dbuff;
10535 uint32_t addr = entry->addr;
10536 uint32_t cnt = entry->count;
10537
10538 esize = cnt * 4; /* addr */
10539 esize += cnt * 4; /* data */
10540
10541 if (dbuff == NULL) {
10542 return (esize);
10543 }
10544 if (esize + dbuff >= dbuff_end) {
10545 EL(qlt, "skipped, no buffer space, needed=%xh\n", esize);
10546 entry->h.driver_flags = (uint8_t)
10547 (entry->h.driver_flags | SKIPPED_FLAG);
10548 return (0);
10549 }
10550
10551 while (cnt--) {
10552 *bp++ = LSB(LSW(addr));
10553 *bp++ = MSB(LSW(addr));
10554 *bp++ = LSB(MSW(addr));
10555 *bp++ = MSB(MSW(addr));
10556 for (i = 0; i < 4; i++) {
10557 *bp++ = PCICFG_RD8(qlt, addr++);
10558 }
10559 }
10560
10561 return (esize);
10562 }
10563
10564 /*ARGSUSED*/
10565 static int
10566 qlt_27xx_dt_gques(qlt_state_t *qlt, qlt_dt_gques_t *entry,
10567 uint8_t *dbuff, uint8_t *dbuff_end)
10568 {
10569 if (entry->queue_type == 1) {
10570 EL(qlt, "skipped, no request queue shadowing, esize=0\n");
10571 if (dbuff != NULL) {
10572 entry->num_queues = 0;
10573 entry->h.driver_flags = (uint8_t)
10574 (entry->h.driver_flags | SKIPPED_FLAG);
10575 }
10576 return (0);
10577 } else if (entry->queue_type == 2) {
10578 EL(qlt, "skipped, no response queue shadowing, esize=0\n");
10579 if (dbuff != NULL) {
10580 entry->num_queues = 0;
10581 entry->h.driver_flags = (uint8_t)
10582 (entry->h.driver_flags | SKIPPED_FLAG);
10583 }
10584 return (0);
10585 } else if (entry->queue_type == 3) {
10586 EL(qlt, "skipped, no ATIO queue, esize=0\n");
10587 if (dbuff != NULL) {
10588 entry->num_queues = 0;
10589 entry->h.driver_flags = (uint8_t)
10590 (entry->h.driver_flags | SKIPPED_FLAG);
10591 }
10592 return (0);
10593 } else {
10594 EL(qlt, "skipped, unknown queue_type %d, esize=0\n",
10595 entry->queue_type);
10596 if (dbuff != NULL) {
10597 entry->h.driver_flags = (uint8_t)
10598 (entry->h.driver_flags | SKIPPED_FLAG);
10599 }
10600 return (0);
10601 }
10602 }
10603
10604 static int
10605 qlt_27xx_dt_wdmp(qlt_state_t *qlt, qlt_dt_wdmp_t *entry,
10606 uint8_t *dbuff, uint8_t *dbuff_end)
10607 {
10608 int esize;
10609 uint8_t *bp = dbuff;
10610 uint32_t data, cnt = entry->length, *dp = entry->data;
10611
10612 esize = cnt;
10613 if (dbuff == NULL) {
10614 return (esize);
10615 }
10616 if (esize + dbuff >= dbuff_end) {
10617 EL(qlt, "skipped, no buffer space, needed=%xh\n", esize);
10618 entry->h.driver_flags = (uint8_t)
10619 (entry->h.driver_flags | SKIPPED_FLAG);
10620 return (0);
10621 }
10622
10623 while (cnt--) {
10624 data = *dp++;
10625 *bp++ = LSB(LSW(data));
10626 *bp++ = MSB(LSW(data));
10627 *bp++ = LSB(MSW(data));
10628 *bp++ = MSB(MSW(data));
10629 }
10630
10631 return (esize);
10632 }
10633
10634 /*
10635 * qlt_27xx_dump_ram
10636 * Dumps RAM.
10637 * Risc interrupts must be disabled when this routine is called.
10638 *
10639 * Input:
10640 * pi: port info pointer.
10641 * cmd: MBC_DUMP_RAM_EXTENDED/MBC_MPI_RAM.
10642 * risc_address: RISC code start address.
10643 * len: Number of words.
10644 * bp: buffer pointer.
10645 *
10646 * Returns:
10647 * qlt local function return status code.
10648 *
10649 * Context:
10650 * Interrupt or Kernel context, no mailbox commands allowed.
10651 */
10652 /*ARGSUSED*/
10653 static int
10654 qlt_27xx_dump_ram(qlt_state_t *qlt, uint16_t cmd, uint32_t risc_address,
10655 uint32_t len, uint8_t *bp)
10656 {
10657 uint8_t *dp;
10658 uint32_t words_to_read, endaddr;
10659 uint32_t i;
10660 int rval = QLT_SUCCESS;
10661
10662 endaddr = risc_address + len;
10663 words_to_read = 0;
10664 while (risc_address < endaddr) {
10665 words_to_read = MBOX_DMA_MEM_SIZE >> 2;
10666 if ((words_to_read + risc_address) > endaddr) {
10667 words_to_read = endaddr - risc_address;
10668 }
10669
10670 if (cmd == MBC_DUMP_RAM_EXTENDED) {
10671 rval = qlt_read_risc_ram(qlt, risc_address,
10672 words_to_read);
10673 } else {
10674 rval = qlt_mbx_mpi_ram(qlt, risc_address,
10675 words_to_read, 0);
10676 }
10677
10678 if (rval != QLT_SUCCESS) {
10679 EL(qlt, "Error reading risc ram = %xh len = %x\n",
10680 risc_address, words_to_read);
10681 return (rval);
10682 }
10683
10684 dp = (uint8_t *)(qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET);
10685 for (i = 0; i < (words_to_read * 4); i++) {
10686 *bp++ = *dp++;
10687 }
10688 risc_address += words_to_read;
10689 }
10690
10691 return (rval);
10692 }
10693
10694 static uint32_t
10695 qlt_27xx_dmp_parse_template(qlt_state_t *qlt, qlt_dt_hdr_t *template_hdr,
10696 uint8_t *dump_buff, uint32_t buff_size)
10697 {
10698 int e_cnt, esize, num_of_entries;
10699 uint32_t bsize;
10700 time_t time;
10701 uint8_t *dbuff, *dbuff_end;
10702 qlt_dt_entry_t *entry;
10703 int sane_end = 0;
10704
10705 dbuff = dump_buff; /* dbuff = NULL size determination. */
10706 dbuff_end = dump_buff + buff_size;
10707
10708 if (template_hdr->type != DT_THDR) {
10709 EL(qlt, "Template header not found\n");
10710 return (0);
10711 }
10712 if (dbuff != NULL) {
10713 (void) drv_getparm(TIME, &time);
10714 template_hdr->driver_timestamp = LSD(time);
10715 }
10716
10717 num_of_entries = template_hdr->num_of_entries;
10718 entry = (qlt_dt_entry_t *)((caddr_t)template_hdr +
10719 template_hdr->first_entry_offset);
10720
10721 bsize = template_hdr->size_of_template;
10722 for (e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
10723 /*
10724 * Decode the entry type and process it accordingly
10725 */
10726 esize = 0;
10727 switch (entry->h.type) {
10728 case DT_NOP:
10729 if (dbuff != NULL) {
10730 entry->h.driver_flags = (uint8_t)
10731 (entry->h.driver_flags | SKIPPED_FLAG);
10732 }
10733 break;
10734 case DT_TEND:
10735 if (dbuff != NULL) {
10736 entry->h.driver_flags = (uint8_t)
10737 (entry->h.driver_flags | SKIPPED_FLAG);
10738 }
10739 sane_end++;
10740 break;
10741 case DT_RIOB1:
10742 esize = qlt_27xx_dt_riob1(qlt, (qlt_dt_riob1_t *)entry,
10743 dbuff, dbuff_end);
10744 break;
10745 case DT_WIOB1:
10746 qlt_27xx_dt_wiob1(qlt, (qlt_dt_wiob1_t *)entry,
10747 dbuff, dbuff_end);
10748 break;
10749 case DT_RIOB2:
10750 esize = qlt_27xx_dt_riob2(qlt, (qlt_dt_riob2_t *)entry,
10751 dbuff, dbuff_end);
10752 break;
10753 case DT_WIOB2:
10754 qlt_27xx_dt_wiob2(qlt, (qlt_dt_wiob2_t *)entry,
10755 dbuff, dbuff_end);
10756 break;
10757 case DT_RPCI:
10758 esize = qlt_27xx_dt_rpci(qlt, (qlt_dt_rpci_t *)entry,
10759 dbuff, dbuff_end);
10760 break;
10761 case DT_WPCI:
10762 qlt_27xx_dt_wpci(qlt, (qlt_dt_wpci_t *)entry,
10763 dbuff, dbuff_end);
10764 break;
10765 case DT_RRAM:
10766 esize = qlt_27xx_dt_rram(qlt, (qlt_dt_rram_t *)entry,
10767 dbuff, dbuff_end);
10768 break;
10769 case DT_GQUE:
10770 esize = qlt_27xx_dt_gque(qlt, (qlt_dt_gque_t *)entry,
10771 dbuff, dbuff_end);
10772 break;
10773 case DT_GFCE:
10774 esize = qlt_27xx_dt_gfce(qlt, (qlt_dt_gfce_t *)entry,
10775 dbuff, dbuff_end);
10776 break;
10777 case DT_PRISC:
10778 qlt_27xx_dt_prisc(qlt, (qlt_dt_prisc_t *)entry,
10779 dbuff, dbuff_end);
10780 break;
10781 case DT_RRISC:
10782 qlt_27xx_dt_rrisc(qlt, (qlt_dt_rrisc_t *)entry,
10783 dbuff, dbuff_end);
10784 break;
10785 case DT_DINT:
10786 qlt_27xx_dt_dint(qlt, (qlt_dt_dint_t *)entry,
10787 dbuff, dbuff_end);
10788 break;
10789 case DT_GHBD:
10790 esize = qlt_27xx_dt_ghbd(qlt, (qlt_dt_ghbd_t *)entry,
10791 dbuff, dbuff_end);
10792 break;
10793 case DT_SCRA:
10794 esize = qlt_27xx_dt_scra(qlt, (qlt_dt_scra_t *)entry,
10795 dbuff, dbuff_end);
10796 break;
10797 case DT_RRREG:
10798 esize = qlt_27xx_dt_rrreg(qlt, (qlt_dt_rrreg_t *)entry,
10799 dbuff, dbuff_end);
10800 break;
10801 case DT_WRREG:
10802 qlt_27xx_dt_wrreg(qlt, (qlt_dt_wrreg_t *)entry,
10803 dbuff, dbuff_end);
10804 break;
10805 case DT_RRRAM:
10806 esize = qlt_27xx_dt_rrram(qlt, (qlt_dt_rrram_t *)entry,
10807 dbuff, dbuff_end);
10808 break;
10809 case DT_RPCIC:
10810 esize = qlt_27xx_dt_rpcic(qlt, (qlt_dt_rpcic_t *)entry,
10811 dbuff, dbuff_end);
10812 break;
10813 case DT_GQUES:
10814 esize = qlt_27xx_dt_gques(qlt, (qlt_dt_gques_t *)entry,
10815 dbuff, dbuff_end);
10816 break;
10817 case DT_WDMP:
10818 esize = qlt_27xx_dt_wdmp(qlt, (qlt_dt_wdmp_t *)entry,
10819 dbuff, dbuff_end);
10820 break;
10821 default:
10822 entry->h.driver_flags = (uint8_t)
10823 (entry->h.driver_flags | SKIPPED_FLAG);
10824 EL(qlt, "Entry ID=%d, type=%d unknown\n", e_cnt,
10825 entry->h.type);
10826 break;
10827 }
10828 if (dbuff != NULL && esize) {
10829 dbuff += esize;
10830 }
10831 bsize += esize;
10832 /* next entry in the template */
10833 entry = (qlt_dt_entry_t *)((caddr_t)entry + entry->h.size);
10834 }
10835 if (sane_end > 1) {
10836 EL(qlt, "Template configuration error. Check Template\n");
10837 }
10838
10839 return (bsize);
10840 }
|