1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright 2015 QLogic Corporation */
23
24 /*
25 * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
26 */
27
28 #pragma ident "Copyright 2015 QLogic Corporation; ql_init.c"
29
30 /*
31 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
32 *
33 * ***********************************************************************
34 * * **
35 * * NOTICE **
36 * * COPYRIGHT (C) 1996-2015 QLOGIC CORPORATION **
37 * * ALL RIGHTS RESERVED **
38 * * **
39 * ***********************************************************************
40 *
41 */
42
43 #include <ql_apps.h>
44 #include <ql_api.h>
45 #include <ql_debug.h>
46 #include <ql_init.h>
47 #include <ql_iocb.h>
48 #include <ql_isr.h>
49 #include <ql_mbx.h>
50 #include <ql_nx.h>
51 #include <ql_xioctl.h>
52
53 /*
54 * Local data
55 */
56
57 /*
58 * Local prototypes
59 */
60 static uint16_t ql_nvram_request(ql_adapter_state_t *, uint32_t);
61 static int ql_nvram_24xx_config(ql_adapter_state_t *);
62 static void ql_23_properties(ql_adapter_state_t *, ql_init_cb_t *);
63 static void ql_24xx_properties(ql_adapter_state_t *, ql_init_24xx_cb_t *);
64 static int ql_check_isp_firmware(ql_adapter_state_t *);
65 static int ql_load_flash_fw(ql_adapter_state_t *);
66 static int ql_configure_loop(ql_adapter_state_t *);
67 static int ql_configure_hba(ql_adapter_state_t *);
68 static int ql_configure_fabric(ql_adapter_state_t *);
69 static int ql_configure_device_d_id(ql_adapter_state_t *);
70 static void ql_update_dev(ql_adapter_state_t *, uint32_t);
71 static void ql_set_max_read_req(ql_adapter_state_t *);
72 static void ql_configure_n_port_info(ql_adapter_state_t *);
73 static void ql_reset_24xx_chip(ql_adapter_state_t *);
74 static void ql_mps_reset(ql_adapter_state_t *);
75
76 /*
77 * ql_initialize_adapter
78 * Initialize board.
79 *
80 * Input:
81 * ha = adapter state pointer.
82 *
83 * Returns:
84 * ql local function return status code.
85 *
86 * Context:
87 * Kernel context.
88 */
89 int
90 ql_initialize_adapter(ql_adapter_state_t *ha)
91 {
92 int rval;
93 class_svc_param_t *class3_param;
94 caddr_t msg;
95 la_els_logi_t *els = &ha->loginparams;
96 int retries = 5;
97
98 QL_PRINT_10(ha, "started cfg=0x%llx\n", ha->cfg_flags);
99
100 do {
101 /* Clear adapter flags. */
102 TASK_DAEMON_LOCK(ha);
103 ha->task_daemon_flags &= TASK_DAEMON_STOP_FLG |
104 TASK_DAEMON_SLEEPING_FLG | TASK_DAEMON_ALIVE_FLG |
105 TASK_DAEMON_IDLE_CHK_FLG;
106 ha->task_daemon_flags |= LOOP_DOWN;
107 TASK_DAEMON_UNLOCK(ha);
108
109 ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
110 ADAPTER_STATE_LOCK(ha);
111 ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
112 ha->flags &= ~ONLINE;
113 ADAPTER_STATE_UNLOCK(ha);
114
115 ha->state = FC_STATE_OFFLINE;
116 msg = "Loop OFFLINE";
117
118 rval = ql_pci_sbus_config(ha);
119 if (rval != QL_SUCCESS) {
120 TASK_DAEMON_LOCK(ha);
121 if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
122 EL(ha, "ql_pci_sbus_cfg, isp_abort_needed\n");
123 ha->task_daemon_flags |= ISP_ABORT_NEEDED;
124 }
125 TASK_DAEMON_UNLOCK(ha);
126 continue;
127 }
128
129 (void) ql_setup_fcache(ha);
130
131 /* Reset ISP chip. */
132 ql_reset_chip(ha);
133
134 /* Get NVRAM configuration if needed. */
135 if (ha->init_ctrl_blk.cb.version == 0) {
136 (void) ql_nvram_config(ha);
137 }
138
139 /* Determine which RISC code to use. */
140 if ((rval = ql_check_isp_firmware(ha)) != QL_SUCCESS) {
141 if (ha->dev_state != NX_DEV_READY) {
142 EL(ha, "dev_state not ready, isp_abort_needed_2"
143 "\n");
144 TASK_DAEMON_LOCK(ha);
145 ha->task_daemon_flags |= ISP_ABORT_NEEDED;
146 TASK_DAEMON_UNLOCK(ha);
147 break;
148 }
149 if ((rval = ql_mbx_wrap_test(ha, NULL)) == QL_SUCCESS) {
150 rval = ql_load_isp_firmware(ha);
151 }
152 }
153
154 if (rval == QL_SUCCESS && (rval = ql_set_cache_line(ha)) ==
155 QL_SUCCESS && (rval = ql_init_rings(ha)) == QL_SUCCESS) {
156
157 ql_enable_intr(ha);
158 (void) ql_fw_ready(ha, ha->fwwait);
159
160 if (!DRIVER_SUSPENDED(ha) &&
161 ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
162 if (ha->topology & QL_LOOP_CONNECTION) {
163 ha->state = ha->state | FC_STATE_LOOP;
164 msg = "Loop ONLINE";
165 TASK_DAEMON_LOCK(ha);
166 ha->task_daemon_flags |= STATE_ONLINE;
167 TASK_DAEMON_UNLOCK(ha);
168 } else if (ha->topology & QL_P2P_CONNECTION) {
169 ha->state = ha->state |
170 FC_STATE_ONLINE;
171 msg = "Link ONLINE";
172 TASK_DAEMON_LOCK(ha);
173 ha->task_daemon_flags |= STATE_ONLINE;
174 TASK_DAEMON_UNLOCK(ha);
175 } else {
176 msg = "Unknown Link state";
177 }
178 }
179 } else {
180 TASK_DAEMON_LOCK(ha);
181 if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
182 EL(ha, "failed, isp_abort_needed\n");
183 ha->task_daemon_flags |= ISP_ABORT_NEEDED |
184 LOOP_DOWN;
185 }
186 TASK_DAEMON_UNLOCK(ha);
187 }
188
189 } while (retries-- != 0 && ha->task_daemon_flags & ISP_ABORT_NEEDED);
190
191 cmn_err(CE_NOTE, "!Qlogic %s(%d): %s", QL_NAME, ha->instance, msg);
192
193 /* Enable ISP interrupts if not already enabled. */
194 if (!(ha->flags & INTERRUPTS_ENABLED)) {
195 ql_enable_intr(ha);
196 }
197
198 ADAPTER_STATE_LOCK(ha);
199 ha->flags |= ONLINE;
200 ADAPTER_STATE_UNLOCK(ha);
201
202 /*
203 * Set flash write-protection.
204 */
205 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2) &&
206 ha->dev_state == NX_DEV_READY) {
207 ql_24xx_protect_flash(ha);
208 }
209
210 TASK_DAEMON_LOCK(ha);
211 ha->task_daemon_flags &= ~(FC_STATE_CHANGE | MARKER_NEEDED |
212 COMMAND_WAIT_NEEDED);
213 TASK_DAEMON_UNLOCK(ha);
214
215 /*
216 * Setup login parameters.
217 */
218 bcopy(QL_VERSION, ha->adapter_stats->revlvl.qlddv, strlen(QL_VERSION));
219
220 els->common_service.fcph_version = 0x2006;
221 els->common_service.btob_credit = 3;
222 els->common_service.cmn_features =
223 ha->topology & QL_N_PORT ? 0x8000 : 0x8800;
224 els->common_service.conc_sequences = 0xff;
225 els->common_service.relative_offset = 3;
226 els->common_service.e_d_tov = 0x07d0;
227
228 class3_param = (class_svc_param_t *)&els->class_3;
229 class3_param->class_valid_svc_opt = 0x8800;
230 class3_param->rcv_data_size = els->common_service.rx_bufsize;
231 class3_param->conc_sequences = 0xff;
232 class3_param->open_sequences_per_exch = 1;
233
234 if (rval != QL_SUCCESS) {
235 EL(ha, "failed, rval = %xh\n", rval);
236 } else {
237 /*EMPTY*/
238 QL_PRINT_10(ha, "done\n");
239 }
240 return (rval);
241 }
242
243 /*
244 * ql_pci_sbus_config
245 * Setup device PCI/SBUS configuration registers.
246 *
247 * Input:
248 * ha = adapter state pointer.
249 *
250 * Returns:
251 * ql local function return status code.
252 *
253 * Context:
254 * Kernel context.
255 */
256 int
257 ql_pci_sbus_config(ql_adapter_state_t *ha)
258 {
259 uint32_t timer;
260 uint16_t cmd, w16;
261
262 QL_PRINT_10(ha, "started\n");
263
264 if (CFG_IST(ha, CFG_SBUS_CARD)) {
265 w16 = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
266 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_REVISION));
267 EL(ha, "FPGA rev is %d.%d", (w16 & 0xf0) >> 4,
268 w16 & 0xf);
269 } else {
270 /*
271 * we want to respect framework's setting of PCI
272 * configuration space command register and also
273 * want to make sure that all bits of interest to us
274 * are properly set in command register.
275 */
276 cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
277 cmd = (uint16_t)(cmd | PCI_COMM_IO | PCI_COMM_MAE |
278 PCI_COMM_ME | PCI_COMM_PARITY_DETECT |
279 PCI_COMM_SERR_ENABLE);
280 if (ql_get_cap_ofst(ha, PCI_CAP_ID_PCIX)) {
281 cmd = (uint16_t)(cmd | PCI_COMM_MEMWR_INVAL);
282 }
283
284 /*
285 * If this is a 2300 card and not 2312, reset the
286 * MEMWR_INVAL due to a bug in the 2300. Unfortunately, the
287 * 2310 also reports itself as a 2300 so we need to get the
288 * fb revision level -- a 6 indicates it really is a 2300 and
289 * not a 2310.
290 */
291
292 if (ha->device_id == 0x2300) {
293 /* Pause RISC. */
294 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
295 for (timer = 0; timer < 30000; timer++) {
296 if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) !=
297 0) {
298 break;
299 } else {
300 drv_usecwait(MILLISEC);
301 }
302 }
303
304 /* Select FPM registers. */
305 WRT16_IO_REG(ha, ctrl_status, 0x20);
306
307 /* Get the fb rev level */
308 if (RD16_IO_REG(ha, fb_cmd) == 6) {
309 cmd = (uint16_t)(cmd & ~PCI_COMM_MEMWR_INVAL);
310 }
311
312 /* Deselect FPM registers. */
313 WRT16_IO_REG(ha, ctrl_status, 0x0);
314
315 /* Release RISC module. */
316 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
317 for (timer = 0; timer < 30000; timer++) {
318 if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) ==
319 0) {
320 break;
321 } else {
322 drv_usecwait(MILLISEC);
323 }
324 }
325 } else if (ha->device_id == 0x2312) {
326 /*
327 * cPCI ISP2312 specific code to service function 1
328 * hot-swap registers.
329 */
330 if ((RD16_IO_REG(ha, ctrl_status) & ISP_FUNC_NUM_MASK)
331 != 0) {
332 ql_pci_config_put8(ha, 0x66, 0xc2);
333 }
334 }
335
336 if (!(CFG_IST(ha, CFG_CTRL_82XX)) &&
337 ha->pci_max_read_req != 0) {
338 ql_set_max_read_req(ha);
339 }
340
341 ql_pci_config_put16(ha, PCI_CONF_COMM, cmd);
342
343 /* Set cache line register. */
344 ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ, 0x10);
345
346 /* Set latency register. */
347 ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER, 0x40);
348
349 /* Reset expansion ROM address decode enable. */
350 if (!CFG_IST(ha, CFG_CTRL_278083)) {
351 w16 = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_ROM);
352 w16 = (uint16_t)(w16 & ~BIT_0);
353 ql_pci_config_put16(ha, PCI_CONF_ROM, w16);
354 }
355 }
356
357 QL_PRINT_10(ha, "done\n");
358
359 return (QL_SUCCESS);
360 }
361
362 /*
363 * Set the PCI max read request value.
364 *
365 * Input:
366 * ha: adapter state pointer.
367 *
368 * Output:
369 * none.
370 *
371 * Returns:
372 *
373 * Context:
374 * Kernel context.
375 */
376
377 static void
378 ql_set_max_read_req(ql_adapter_state_t *ha)
379 {
380 int ofst;
381 uint16_t read_req, w16;
382 uint16_t tmp = ha->pci_max_read_req;
383
384 QL_PRINT_3(ha, "started\n");
385
386 if ((ofst = ql_get_cap_ofst(ha, PCI_CAP_ID_PCIX))) {
387 ofst += PCI_PCIX_COMMAND;
388 QL_PRINT_10(ha, "PCI-X Command Reg = %xh\n", ofst);
389 /* check for vaild override value */
390 if (tmp == 512 || tmp == 1024 || tmp == 2048 ||
391 tmp == 4096) {
392 /* shift away the don't cares */
393 tmp = (uint16_t)(tmp >> 10);
394 /* convert bit pos to request value */
395 for (read_req = 0; tmp != 0; read_req++) {
396 tmp = (uint16_t)(tmp >> 1);
397 }
398 w16 = (uint16_t)ql_pci_config_get16(ha, ofst);
399 w16 = (uint16_t)(w16 & ~(BIT_3 & BIT_2));
400 w16 = (uint16_t)(w16 | (read_req << 2));
401 ql_pci_config_put16(ha, ofst, w16);
402 } else {
403 EL(ha, "invalid parameter value for "
404 "'pci-max-read-request': %d; using system "
405 "default\n", tmp);
406 }
407 } else if ((ofst = ql_get_cap_ofst(ha, PCI_CAP_ID_PCI_E))) {
408 ofst += PCI_PCIE_DEVICE_CONTROL;
409 QL_PRINT_10(ha, "PCI-E Device Control Reg = %xh\n", ofst);
410 if (tmp == 128 || tmp == 256 || tmp == 512 ||
411 tmp == 1024 || tmp == 2048 || tmp == 4096) {
412 /* shift away the don't cares */
413 tmp = (uint16_t)(tmp >> 8);
414 /* convert bit pos to request value */
415 for (read_req = 0; tmp != 0; read_req++) {
416 tmp = (uint16_t)(tmp >> 1);
417 }
418 w16 = (uint16_t)ql_pci_config_get16(ha, ofst);
419 w16 = (uint16_t)(w16 & ~(BIT_14 | BIT_13 |
420 BIT_12));
421 w16 = (uint16_t)(w16 | (read_req << 12));
422 ql_pci_config_put16(ha, ofst, w16);
423 } else {
424 EL(ha, "invalid parameter value for "
425 "'pci-max-read-request': %d; using system "
426 "default\n", tmp);
427 }
428 }
429 QL_PRINT_3(ha, "done\n");
430 }
431
432 /*
433 * NVRAM configuration.
434 *
435 * Input:
436 * ha: adapter state pointer.
437 * ha->req_q[0]: request ring
438 *
439 * Output:
440 * ha->init_ctrl_blk = initialization control block
441 * host adapters parameters in host adapter block
442 *
443 * Returns:
444 * ql local function return status code.
445 *
446 * Context:
447 * Kernel context.
448 */
449 int
450 ql_nvram_config(ql_adapter_state_t *ha)
451 {
452 uint32_t cnt;
453 caddr_t dptr1, dptr2;
454 ql_init_cb_t *icb = &ha->init_ctrl_blk.cb;
455 ql_ip_init_cb_t *ip_icb = &ha->ip_init_ctrl_blk.cb;
456 nvram_t *nv = (nvram_t *)ha->req_q[0]->req_ring.bp;
457 uint16_t *wptr = (uint16_t *)ha->req_q[0]->req_ring.bp;
458 uint8_t chksum = 0;
459 int rval;
460 int idpromlen;
461 char idprombuf[32];
462 uint32_t start_addr;
463 la_els_logi_t *els = &ha->loginparams;
464
465 QL_PRINT_10(ha, "started\n");
466
467 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
468 return (ql_nvram_24xx_config(ha));
469 }
470
471 start_addr = 0;
472 if ((rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA)) ==
473 QL_SUCCESS) {
474 /* Verify valid NVRAM checksum. */
475 for (cnt = 0; cnt < sizeof (nvram_t) / 2; cnt++) {
476 *wptr = (uint16_t)ql_get_nvram_word(ha,
477 (uint32_t)(cnt + start_addr));
478 chksum = (uint8_t)(chksum + (uint8_t)*wptr);
479 chksum = (uint8_t)(chksum + (uint8_t)(*wptr >> 8));
480 wptr++;
481 }
482 ql_release_nvram(ha);
483 }
484
485 /* Bad NVRAM data, set defaults parameters. */
486 if (rval != QL_SUCCESS || chksum || nv->id[0] != 'I' ||
487 nv->id[1] != 'S' || nv->id[2] != 'P' || nv->id[3] != ' ' ||
488 nv->nvram_version < 1) {
489
490 EL(ha, "failed, rval=%xh, checksum=%xh, "
491 "id=%02x%02x%02x%02xh, flsz=%xh, pciconfvid=%xh, "
492 "nvram_version=%x\n", rval, chksum, nv->id[0], nv->id[1],
493 nv->id[2], nv->id[3], ha->xioctl->fdesc.flash_size,
494 ha->subven_id, nv->nvram_version);
495
496 /* Don't print nvram message if it's an on-board 2200 */
497 if (!((CFG_IST(ha, CFG_CTRL_22XX)) &&
498 (ha->xioctl->fdesc.flash_size == 0))) {
499 cmn_err(CE_WARN, "%s(%d): NVRAM configuration failed,"
500 " using driver defaults.", QL_NAME, ha->instance);
501 }
502
503 /* Reset NVRAM data. */
504 bzero((void *)nv, sizeof (nvram_t));
505
506 /*
507 * Set default initialization control block.
508 */
509 nv->parameter_block_version = ICB_VERSION;
510 nv->firmware_options[0] = BIT_4 | BIT_3 | BIT_2 | BIT_1;
511 nv->firmware_options[1] = BIT_7 | BIT_5 | BIT_2;
512
513 nv->max_frame_length[1] = 4;
514
515 /*
516 * Allow 2048 byte frames for 2300
517 */
518 if (CFG_IST(ha, CFG_CTRL_2363)) {
519 nv->max_frame_length[1] = 8;
520 }
521 nv->max_iocb_allocation[1] = 1;
522 nv->execution_throttle[0] = 16;
523 nv->login_retry_count = 8;
524
525 idpromlen = 32;
526
527 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
528 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, ha->dip,
529 DDI_PROP_CANSLEEP, "idprom", (caddr_t)idprombuf,
530 &idpromlen) != DDI_PROP_SUCCESS) {
531
532 QL_PRINT_10(ha, "Unable to read idprom "
533 "property\n");
534 cmn_err(CE_WARN, "%s(%d) : Unable to read idprom "
535 "property", QL_NAME, ha->instance);
536
537 nv->port_name[2] = 33;
538 nv->port_name[3] = 224;
539 nv->port_name[4] = 139;
540 nv->port_name[7] = (uint8_t)
541 (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
542 } else {
543
544 nv->port_name[2] = idprombuf[2];
545 nv->port_name[3] = idprombuf[3];
546 nv->port_name[4] = idprombuf[4];
547 nv->port_name[5] = idprombuf[5];
548 nv->port_name[6] = idprombuf[6];
549 nv->port_name[7] = idprombuf[7];
550 nv->port_name[0] = (uint8_t)
551 (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
552 }
553
554 /* Don't print nvram message if it's an on-board 2200 */
555 if (!(CFG_IST(ha, CFG_CTRL_22XX)) &&
556 (ha->xioctl->fdesc.flash_size == 0)) {
557 cmn_err(CE_WARN, "%s(%d): Unreliable HBA NVRAM, using"
558 " default HBA parameters and temporary WWPN:"
559 " %02x%02x%02x%02x%02x%02x%02x%02x", QL_NAME,
560 ha->instance, nv->port_name[0], nv->port_name[1],
561 nv->port_name[2], nv->port_name[3],
562 nv->port_name[4], nv->port_name[5],
563 nv->port_name[6], nv->port_name[7]);
564 }
565
566 nv->login_timeout = 4;
567
568 /* Set default connection options for the 23xx to 2 */
569 if (!(CFG_IST(ha, CFG_CTRL_22XX))) {
570 nv->add_fw_opt[0] = (uint8_t)(nv->add_fw_opt[0] |
571 BIT_5);
572 }
573
574 /*
575 * Set default host adapter parameters
576 */
577 nv->host_p[0] = BIT_1;
578 nv->host_p[1] = BIT_2;
579 nv->reset_delay = 5;
580 nv->port_down_retry_count = 8;
581 nv->maximum_luns_per_target[0] = 8;
582
583 rval = QL_FUNCTION_FAILED;
584 }
585
586 /* Reset initialization control blocks. */
587 bzero((void *)icb, sizeof (ql_init_cb_t));
588 bzero((void *)ip_icb, sizeof (ql_ip_init_cb_t));
589
590 /*
591 * Copy over NVRAM RISC parameter block
592 * to initialization control block.
593 */
594 dptr1 = (caddr_t)icb;
595 dptr2 = (caddr_t)&nv->parameter_block_version;
596 cnt = (uint32_t)((uintptr_t)&icb->request_q_outpointer[0] -
597 (uintptr_t)&icb->version);
598 while (cnt-- != 0) {
599 *dptr1++ = *dptr2++;
600 }
601
602 /* Copy 2nd half. */
603 dptr1 = (caddr_t)&icb->add_fw_opt[0];
604 cnt = (uint32_t)((uintptr_t)&icb->reserved_3[0] -
605 (uintptr_t)&icb->add_fw_opt[0]);
606 while (cnt-- != 0) {
607 *dptr1++ = *dptr2++;
608 }
609
610 ha->execution_throttle = CHAR_TO_SHORT(nv->execution_throttle[0],
611 nv->execution_throttle[1]);
612 ha->loop_reset_delay = nv->reset_delay;
613 ha->port_down_retry_count = nv->port_down_retry_count;
614 ha->maximum_luns_per_target = CHAR_TO_SHORT(
615 nv->maximum_luns_per_target[0], nv->maximum_luns_per_target[1]);
616 if (ha->maximum_luns_per_target == 0) {
617 ha->maximum_luns_per_target++;
618 }
619 ha->adapter_features = CHAR_TO_SHORT(nv->adapter_features[0],
620 nv->adapter_features[1]);
621
622 /* Check for adapter node name (big endian). */
623 for (cnt = 0; cnt < 8; cnt++) {
624 if (icb->node_name[cnt] != 0) {
625 break;
626 }
627 }
628
629 /* Copy port name if no node name (big endian). */
630 if (cnt == 8) {
631 for (cnt = 0; cnt < 8; cnt++) {
632 icb->node_name[cnt] = icb->port_name[cnt];
633 }
634 icb->node_name[0] = (uint8_t)(icb->node_name[0] & ~BIT_0);
635 icb->port_name[0] = (uint8_t)(icb->node_name[0] | BIT_0);
636 }
637
638 ADAPTER_STATE_LOCK(ha);
639 ha->cfg_flags &= ~(CFG_ENABLE_FULL_LIP_LOGIN | CFG_ENABLE_TARGET_RESET |
640 CFG_ENABLE_LIP_RESET | CFG_LOAD_FLASH_FW | CFG_FAST_TIMEOUT |
641 CFG_DISABLE_RISC_CODE_LOAD | CFG_ENABLE_FWEXTTRACE |
642 CFG_ENABLE_FWFCETRACE | CFG_SET_CACHE_LINE_SIZE_1 | CFG_LR_SUPPORT);
643 if (nv->host_p[0] & BIT_4) {
644 ha->cfg_flags |= CFG_DISABLE_RISC_CODE_LOAD;
645 }
646 if (nv->host_p[0] & BIT_5) {
647 ha->cfg_flags |= CFG_SET_CACHE_LINE_SIZE_1;
648 }
649 if (nv->host_p[1] & BIT_2) {
650 ha->cfg_flags |= CFG_ENABLE_FULL_LIP_LOGIN;
651 }
652 if (nv->host_p[1] & BIT_3) {
653 ha->cfg_flags |= CFG_ENABLE_TARGET_RESET;
654 }
655 nv->adapter_features[0] & BIT_3 ?
656 (ha->flags |= MULTI_CHIP_ADAPTER) :
657 (ha->flags &= ~MULTI_CHIP_ADAPTER);
658 ADAPTER_STATE_UNLOCK(ha);
659
660 /* Get driver properties. */
661 ql_23_properties(ha, icb);
662
663 /*
664 * Setup driver firmware options.
665 */
666 icb->firmware_options[0] = (uint8_t)
667 (icb->firmware_options[0] | BIT_6 | BIT_1);
668
669 /*
670 * There is no use enabling fast post for SBUS or 2300
671 * Always enable 64bit addressing, except SBUS cards.
672 */
673 ha->cfg_flags |= CFG_ENABLE_64BIT_ADDRESSING;
674 if (CFG_IST(ha, CFG_SBUS_CARD | CFG_CTRL_2363)) {
675 icb->firmware_options[0] = (uint8_t)
676 (icb->firmware_options[0] & ~BIT_3);
677 if (CFG_IST(ha, CFG_SBUS_CARD)) {
678 icb->special_options[0] = (uint8_t)
679 (icb->special_options[0] | BIT_5);
680 ha->cfg_flags &= ~CFG_ENABLE_64BIT_ADDRESSING;
681 }
682 } else {
683 icb->firmware_options[0] = (uint8_t)
684 (icb->firmware_options[0] | BIT_3);
685 }
686 /* RIO and ZIO not supported. */
687 icb->add_fw_opt[0] = (uint8_t)(icb->add_fw_opt[0] &
688 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
689
690 icb->firmware_options[1] = (uint8_t)(icb->firmware_options[1] |
691 BIT_7 | BIT_6 | BIT_5 | BIT_2 | BIT_0);
692 icb->firmware_options[0] = (uint8_t)
693 (icb->firmware_options[0] & ~(BIT_5 | BIT_4));
694 icb->firmware_options[1] = (uint8_t)
695 (icb->firmware_options[1] & ~BIT_4);
696 if (CFG_IST(ha, CFG_ENABLE_FCP_2_SUPPORT)) {
697 icb->firmware_options[1] = (uint8_t)
698 (icb->firmware_options[1] | BIT_7 | BIT_6);
699 icb->add_fw_opt[1] = (uint8_t)
700 (icb->add_fw_opt[1] | BIT_5 | BIT_4);
701 }
702 icb->add_fw_opt[1] = (uint8_t)(icb->add_fw_opt[1] & ~(BIT_5 | BIT_4));
703 icb->special_options[0] = (uint8_t)(icb->special_options[0] | BIT_1);
704
705 if (CFG_IST(ha, CFG_CTRL_2363)) {
706 if ((icb->special_options[1] & 0x20) == 0) {
707 EL(ha, "50 ohm is not set\n");
708 }
709 }
710
711 /*
712 * Set host adapter parameters
713 */
714 /* Get adapter id string for Sun branded 23xx only */
715 if (CFG_IST(ha, CFG_CTRL_23XX) && nv->adapInfo[0] != 0) {
716 (void) snprintf((int8_t *)ha->adapInfo, 16, "%s",
717 nv->adapInfo);
718 }
719
720 ha->r_a_tov = (uint16_t)(icb->login_timeout < R_A_TOV_DEFAULT ?
721 R_A_TOV_DEFAULT : icb->login_timeout);
722
723 els->common_service.rx_bufsize = CHAR_TO_SHORT(
724 icb->max_frame_length[0], icb->max_frame_length[1]);
725 bcopy((void *)icb->port_name, (void *)els->nport_ww_name.raw_wwn, 8);
726 bcopy((void *)icb->node_name, (void *)els->node_ww_name.raw_wwn, 8);
727
728 cmn_err(CE_CONT, "!Qlogic %s(%d) WWPN=%02x%02x%02x%02x"
729 "%02x%02x%02x%02x : WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
730 QL_NAME, ha->instance,
731 els->nport_ww_name.raw_wwn[0], els->nport_ww_name.raw_wwn[1],
732 els->nport_ww_name.raw_wwn[2], els->nport_ww_name.raw_wwn[3],
733 els->nport_ww_name.raw_wwn[4], els->nport_ww_name.raw_wwn[5],
734 els->nport_ww_name.raw_wwn[6], els->nport_ww_name.raw_wwn[7],
735 els->node_ww_name.raw_wwn[0], els->node_ww_name.raw_wwn[1],
736 els->node_ww_name.raw_wwn[2], els->node_ww_name.raw_wwn[3],
737 els->node_ww_name.raw_wwn[4], els->node_ww_name.raw_wwn[5],
738 els->node_ww_name.raw_wwn[6], els->node_ww_name.raw_wwn[7]);
739 /*
740 * Setup ring parameters in initialization control block
741 */
742 cnt = ha->req_q[0]->req_entry_cnt;
743 icb->request_q_length[0] = LSB(cnt);
744 icb->request_q_length[1] = MSB(cnt);
745 cnt = ha->rsp_queues[0]->rsp_entry_cnt;
746 icb->response_q_length[0] = LSB(cnt);
747 icb->response_q_length[1] = MSB(cnt);
748
749 start_addr = ha->req_q[0]->req_ring.cookie.dmac_address;
750 icb->request_q_address[0] = LSB(LSW(start_addr));
751 icb->request_q_address[1] = MSB(LSW(start_addr));
752 icb->request_q_address[2] = LSB(MSW(start_addr));
753 icb->request_q_address[3] = MSB(MSW(start_addr));
754
755 start_addr = ha->req_q[0]->req_ring.cookie.dmac_notused;
756 icb->request_q_address[4] = LSB(LSW(start_addr));
757 icb->request_q_address[5] = MSB(LSW(start_addr));
758 icb->request_q_address[6] = LSB(MSW(start_addr));
759 icb->request_q_address[7] = MSB(MSW(start_addr));
760
761 start_addr = ha->rsp_queues[0]->rsp_ring.cookie.dmac_address;
762 icb->response_q_address[0] = LSB(LSW(start_addr));
763 icb->response_q_address[1] = MSB(LSW(start_addr));
764 icb->response_q_address[2] = LSB(MSW(start_addr));
765 icb->response_q_address[3] = MSB(MSW(start_addr));
766
767 start_addr = ha->rsp_queues[0]->rsp_ring.cookie.dmac_notused;
768 icb->response_q_address[4] = LSB(LSW(start_addr));
769 icb->response_q_address[5] = MSB(LSW(start_addr));
770 icb->response_q_address[6] = LSB(MSW(start_addr));
771 icb->response_q_address[7] = MSB(MSW(start_addr));
772
773 /*
774 * Setup IP initialization control block
775 */
776 ip_icb->version = IP_ICB_VERSION;
777
778 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
779 ip_icb->ip_firmware_options[0] = (uint8_t)
780 (ip_icb->ip_firmware_options[0] | BIT_2 | BIT_0);
781 } else {
782 ip_icb->ip_firmware_options[0] = (uint8_t)
783 (ip_icb->ip_firmware_options[0] | BIT_2);
784 }
785
786 cnt = RCVBUF_CONTAINER_CNT;
787 ip_icb->queue_size[0] = LSB(cnt);
788 ip_icb->queue_size[1] = MSB(cnt);
789
790 start_addr = ha->rcv_ring.cookie.dmac_address;
791 ip_icb->queue_address[0] = LSB(LSW(start_addr));
792 ip_icb->queue_address[1] = MSB(LSW(start_addr));
793 ip_icb->queue_address[2] = LSB(MSW(start_addr));
794 ip_icb->queue_address[3] = MSB(MSW(start_addr));
795
796 start_addr = ha->rcv_ring.cookie.dmac_notused;
797 ip_icb->queue_address[4] = LSB(LSW(start_addr));
798 ip_icb->queue_address[5] = MSB(LSW(start_addr));
799 ip_icb->queue_address[6] = LSB(MSW(start_addr));
800 ip_icb->queue_address[7] = MSB(MSW(start_addr));
801
802 if (rval != QL_SUCCESS) {
803 EL(ha, "failed, rval = %xh\n", rval);
804 } else {
805 /*EMPTY*/
806 QL_PRINT_10(ha, "done\n");
807 }
808 return (rval);
809 }
810
811 /*
812 * Get NVRAM data word
813 * Calculates word position in NVRAM and calls request routine to
814 * get the word from NVRAM.
815 *
816 * Input:
817 * ha = adapter state pointer.
818 * address = NVRAM word address.
819 *
820 * Returns:
821 * data word.
822 *
823 * Context:
824 * Kernel context.
825 */
826 uint16_t
827 ql_get_nvram_word(ql_adapter_state_t *ha, uint32_t address)
828 {
829 uint32_t nv_cmd;
830 uint16_t rval;
831
832 QL_PRINT_4(ha, "started\n");
833
834 nv_cmd = address << 16;
835 nv_cmd = nv_cmd | NV_READ_OP;
836
837 rval = (uint16_t)ql_nvram_request(ha, nv_cmd);
838
839 QL_PRINT_4(ha, "NVRAM data = %xh\n", rval);
840
841 return (rval);
842 }
843
844 /*
845 * NVRAM request
846 * Sends read command to NVRAM and gets data from NVRAM.
847 *
848 * Input:
849 * ha = adapter state pointer.
850 * nv_cmd = Bit 26= start bit
851 * Bit 25, 24 = opcode
852 * Bit 23-16 = address
853 * Bit 15-0 = write data
854 *
855 * Returns:
856 * data word.
857 *
858 * Context:
859 * Kernel context.
860 */
861 static uint16_t
862 ql_nvram_request(ql_adapter_state_t *ha, uint32_t nv_cmd)
863 {
864 uint8_t cnt;
865 uint16_t reg_data;
866 uint16_t data = 0;
867
868 /* Send command to NVRAM. */
869
870 nv_cmd <<= 5;
871 for (cnt = 0; cnt < 11; cnt++) {
872 if (nv_cmd & BIT_31) {
873 ql_nv_write(ha, NV_DATA_OUT);
874 } else {
875 ql_nv_write(ha, 0);
876 }
877 nv_cmd <<= 1;
878 }
879
880 /* Read data from NVRAM. */
881
882 for (cnt = 0; cnt < 16; cnt++) {
883 WRT16_IO_REG(ha, nvram, NV_SELECT + NV_CLOCK);
884 ql_nv_delay();
885 data <<= 1;
886 reg_data = RD16_IO_REG(ha, nvram);
887 if (reg_data & NV_DATA_IN) {
888 data = (uint16_t)(data | BIT_0);
889 }
890 WRT16_IO_REG(ha, nvram, NV_SELECT);
891 ql_nv_delay();
892 }
893
894 /* Deselect chip. */
895
896 WRT16_IO_REG(ha, nvram, NV_DESELECT);
897 ql_nv_delay();
898
899 return (data);
900 }
901
902 void
903 ql_nv_write(ql_adapter_state_t *ha, uint16_t data)
904 {
905 WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT));
906 ql_nv_delay();
907 WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT | NV_CLOCK));
908 ql_nv_delay();
909 WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT));
910 ql_nv_delay();
911 }
912
913 void
914 ql_nv_delay(void)
915 {
916 drv_usecwait(NV_DELAY_COUNT);
917 }
918
919 /*
920 * ql_nvram_24xx_config
921 * ISP2400 nvram.
922 *
923 * Input:
924 * ha: adapter state pointer.
925 * ha->req_q[0]: request ring
926 *
927 * Output:
928 * ha->init_ctrl_blk = initialization control block
929 * host adapters parameters in host adapter block
930 *
931 * Returns:
932 * ql local function return status code.
933 *
934 * Context:
935 * Kernel context.
936 */
937 int
938 ql_nvram_24xx_config(ql_adapter_state_t *ha)
939 {
940 uint32_t index, addr;
941 uint32_t chksum = 0, saved_chksum = 0;
942 uint32_t *longptr;
943 nvram_24xx_t nvram;
944 int idpromlen;
945 char idprombuf[32];
946 caddr_t src, dst;
947 uint16_t w1;
948 int rval;
949 nvram_24xx_t *nv = (nvram_24xx_t *)&nvram;
950 ql_init_24xx_cb_t *icb =
951 (ql_init_24xx_cb_t *)&ha->init_ctrl_blk.cb24;
952 ql_ip_init_24xx_cb_t *ip_icb = &ha->ip_init_ctrl_blk.cb24;
953 la_els_logi_t *els = &ha->loginparams;
954
955 QL_PRINT_10(ha, "started\n");
956
957 if ((rval = ql_lock_nvram(ha, &addr, LNF_NVRAM_DATA)) == QL_SUCCESS) {
958
959 /* Get NVRAM data and calculate checksum. */
960 longptr = (uint32_t *)nv;
961 chksum = saved_chksum = 0;
962 for (index = 0; index < sizeof (nvram_24xx_t) / 4; index++) {
963 rval = ql_24xx_read_flash(ha, addr++, longptr);
964 if (rval != QL_SUCCESS) {
965 EL(ha, "24xx_read_flash failed=%xh\n", rval);
966 break;
967 }
968 saved_chksum = chksum;
969 chksum += *longptr;
970 LITTLE_ENDIAN_32(longptr);
971 longptr++;
972 }
973
974 ql_release_nvram(ha);
975 }
976
977 /* Bad NVRAM data, set defaults parameters. */
978 if (rval != QL_SUCCESS || chksum || nv->id[0] != 'I' ||
979 nv->id[1] != 'S' || nv->id[2] != 'P' || nv->id[3] != ' ' ||
980 (nv->nvram_version[0] | nv->nvram_version[1]) == 0) {
981
982 cmn_err(CE_WARN, "%s(%d): NVRAM configuration failed, using "
983 "driver defaults.", QL_NAME, ha->instance);
984 EL(ha, "failed, rval=%xh, checksum=%xh, id=%c%c%c%c, "
985 "nvram_version=%x\n", rval, chksum, nv->id[0], nv->id[1],
986 nv->id[2], nv->id[3], CHAR_TO_SHORT(nv->nvram_version[0],
987 nv->nvram_version[1]));
988
989 saved_chksum = ~saved_chksum + 1;
990
991 (void) ql_flash_errlog(ha, FLASH_ERRLOG_NVRAM_CHKSUM_ERR, 0,
992 MSW(saved_chksum), LSW(saved_chksum));
993
994 /* Reset NVRAM data. */
995 bzero((void *)nv, sizeof (nvram_24xx_t));
996
997 /*
998 * Set default initialization control block.
999 */
1000 nv->nvram_version[0] = LSB(ICB_24XX_VERSION);
1001 nv->nvram_version[1] = MSB(ICB_24XX_VERSION);
1002
1003 nv->version[0] = 1;
1004 nv->max_frame_length[1] = 8;
1005 nv->execution_throttle[0] = 16;
1006 nv->exchange_count[0] = 128;
1007 nv->max_luns_per_target[0] = 8;
1008
1009 idpromlen = 32;
1010
1011 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
1012 if (rval = ddi_getlongprop_buf(DDI_DEV_T_ANY, ha->dip,
1013 DDI_PROP_CANSLEEP, "idprom", (caddr_t)idprombuf,
1014 &idpromlen) != DDI_PROP_SUCCESS) {
1015
1016 cmn_err(CE_WARN, "%s(%d) : Unable to read idprom "
1017 "property, rval=%x", QL_NAME, ha->instance, rval);
1018
1019 nv->port_name[0] = 33;
1020 nv->port_name[3] = 224;
1021 nv->port_name[4] = 139;
1022 nv->port_name[7] = (uint8_t)
1023 (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
1024 } else {
1025 nv->port_name[2] = idprombuf[2];
1026 nv->port_name[3] = idprombuf[3];
1027 nv->port_name[4] = idprombuf[4];
1028 nv->port_name[5] = idprombuf[5];
1029 nv->port_name[6] = idprombuf[6];
1030 nv->port_name[7] = idprombuf[7];
1031 nv->port_name[0] = (uint8_t)
1032 (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
1033 }
1034
1035 cmn_err(CE_WARN, "%s(%d): Unreliable HBA NVRAM, using default "
1036 "HBA parameters and temporary "
1037 "WWPN: %02x%02x%02x%02x%02x%02x%02x%02x", QL_NAME,
1038 ha->instance, nv->port_name[0], nv->port_name[1],
1039 nv->port_name[2], nv->port_name[3], nv->port_name[4],
1040 nv->port_name[5], nv->port_name[6], nv->port_name[7]);
1041
1042 nv->login_retry_count[0] = 8;
1043
1044 nv->firmware_options_1[0] = BIT_2 | BIT_1;
1045 nv->firmware_options_1[1] = BIT_5;
1046 nv->firmware_options_2[0] = BIT_5;
1047 nv->firmware_options_2[1] = BIT_4;
1048 nv->firmware_options_3[1] = BIT_6;
1049
1050 /*
1051 * Set default host adapter parameters
1052 */
1053 nv->host_p[0] = BIT_4 | BIT_1;
1054 nv->host_p[1] = BIT_3 | BIT_2;
1055 nv->reset_delay = 5;
1056 nv->max_luns_per_target[0] = 128;
1057 nv->port_down_retry_count[0] = 30;
1058 nv->link_down_timeout[0] = 30;
1059
1060 if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
1061 nv->firmware_options_3[2] = BIT_4;
1062 nv->feature_mask_l[0] = 9;
1063 nv->ext_blk.version[0] = 1;
1064 nv->ext_blk.fcf_vlan_match = 1;
1065 nv->ext_blk.fcf_vlan_id[0] = LSB(1002);
1066 nv->ext_blk.fcf_vlan_id[1] = MSB(1002);
1067 nv->fw.isp8001.e_node_mac_addr[1] = 2;
1068 nv->fw.isp8001.e_node_mac_addr[2] = 3;
1069 nv->fw.isp8001.e_node_mac_addr[3] = 4;
1070 nv->fw.isp8001.e_node_mac_addr[4] = MSB(ha->instance);
1071 nv->fw.isp8001.e_node_mac_addr[5] = LSB(ha->instance);
1072 }
1073
1074 rval = QL_FUNCTION_FAILED;
1075 }
1076
1077 /* Reset initialization control blocks. */
1078 bzero((void *)icb, sizeof (ql_init_24xx_cb_t));
1079
1080 /*
1081 * Copy over NVRAM Firmware Initialization Control Block.
1082 */
1083 dst = (caddr_t)icb;
1084 src = (caddr_t)&nv->version;
1085 index = (uint32_t)((uintptr_t)&icb->response_q_inpointer[0] -
1086 (uintptr_t)icb);
1087 while (index--) {
1088 *dst++ = *src++;
1089 }
1090 icb->login_retry_count[0] = nv->login_retry_count[0];
1091 icb->login_retry_count[1] = nv->login_retry_count[1];
1092 icb->link_down_on_nos[0] = nv->link_down_on_nos[0];
1093 icb->link_down_on_nos[1] = nv->link_down_on_nos[1];
1094
1095 /* Copy 2nd half. */
1096 dst = (caddr_t)&icb->interrupt_delay_timer;
1097 src = (caddr_t)&nv->interrupt_delay_timer;
1098 index = (uint32_t)((uintptr_t)&icb->qos -
1099 (uintptr_t)&icb->interrupt_delay_timer);
1100 while (index--) {
1101 *dst++ = *src++;
1102 }
1103
1104 ha->execution_throttle = 16;
1105 ha->loop_reset_delay = nv->reset_delay;
1106 ha->port_down_retry_count = CHAR_TO_SHORT(nv->port_down_retry_count[0],
1107 nv->port_down_retry_count[1]);
1108 ha->maximum_luns_per_target = CHAR_TO_SHORT(
1109 nv->max_luns_per_target[0], nv->max_luns_per_target[1]);
1110 if (ha->maximum_luns_per_target == 0) {
1111 ha->maximum_luns_per_target++;
1112 }
1113 if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
1114 dst = (caddr_t)icb->enode_mac_addr;
1115 src = (caddr_t)nv->fw.isp8001.e_node_mac_addr;
1116 index = sizeof (nv->fw.isp8001.e_node_mac_addr);
1117 while (index--) {
1118 *dst++ = *src++;
1119 }
1120 dst = (caddr_t)&icb->ext_blk;
1121 src = (caddr_t)&nv->ext_blk;
1122 index = sizeof (ql_ext_icb_8100_t);
1123 while (index--) {
1124 *dst++ = *src++;
1125 }
1126 EL(ha, "e_node_mac_addr=%02x-%02x-%02x-%02x-%02x-%02x\n",
1127 icb->enode_mac_addr[0], icb->enode_mac_addr[1],
1128 icb->enode_mac_addr[2], icb->enode_mac_addr[3],
1129 icb->enode_mac_addr[4], icb->enode_mac_addr[5]);
1130 }
1131
1132 /* Check for adapter node name (big endian). */
1133 for (index = 0; index < 8; index++) {
1134 if (icb->node_name[index] != 0) {
1135 break;
1136 }
1137 }
1138
1139 /* Copy port name if no node name (big endian). */
1140 if (index == 8) {
1141 for (index = 0; index < 8; index++) {
1142 icb->node_name[index] = icb->port_name[index];
1143 }
1144 icb->node_name[0] = (uint8_t)(icb->node_name[0] & ~BIT_0);
1145 icb->port_name[0] = (uint8_t)(icb->node_name[0] | BIT_0);
1146 }
1147
1148 ADAPTER_STATE_LOCK(ha);
1149 ha->cfg_flags &= ~(CFG_ENABLE_FULL_LIP_LOGIN | CFG_ENABLE_TARGET_RESET |
1150 CFG_ENABLE_LIP_RESET | CFG_LOAD_FLASH_FW | CFG_FAST_TIMEOUT |
1151 CFG_DISABLE_RISC_CODE_LOAD | CFG_ENABLE_FWEXTTRACE |
1152 CFG_ENABLE_FWFCETRACE | CFG_SET_CACHE_LINE_SIZE_1 | CFG_LR_SUPPORT);
1153 if (nv->host_p[1] & BIT_2) {
1154 ha->cfg_flags |= CFG_ENABLE_FULL_LIP_LOGIN;
1155 }
1156 if (nv->host_p[1] & BIT_3) {
1157 ha->cfg_flags |= CFG_ENABLE_TARGET_RESET;
1158 }
1159 ha->flags &= ~MULTI_CHIP_ADAPTER;
1160 ADAPTER_STATE_UNLOCK(ha);
1161
1162 /* Get driver properties. */
1163 ql_24xx_properties(ha, icb);
1164
1165 /*
1166 * Setup driver firmware options.
1167 */
1168 if (!CFG_IST(ha, CFG_FCOE_SUPPORT)) {
1169 icb->firmware_options_1[0] = (uint8_t)
1170 (icb->firmware_options_1[0] | BIT_1);
1171 icb->firmware_options_1[1] = (uint8_t)
1172 (icb->firmware_options_1[1] | BIT_5 | BIT_2);
1173 icb->firmware_options_3[0] = (uint8_t)
1174 (icb->firmware_options_3[0] | BIT_1);
1175 }
1176 icb->firmware_options_1[0] = (uint8_t)(icb->firmware_options_1[0] &
1177 ~(BIT_5 | BIT_4));
1178 icb->firmware_options_1[1] = (uint8_t)(icb->firmware_options_1[1] |
1179 BIT_6);
1180 icb->firmware_options_2[0] = (uint8_t)(icb->firmware_options_2[0] &
1181 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
1182 if (CFG_IST(ha, CFG_ENABLE_FCP_2_SUPPORT)) {
1183 icb->firmware_options_2[1] = (uint8_t)
1184 (icb->firmware_options_2[1] | BIT_4);
1185 } else {
1186 icb->firmware_options_2[1] = (uint8_t)
1187 (icb->firmware_options_2[1] & ~BIT_4);
1188 }
1189 icb->firmware_options_3[0] = (uint8_t)(icb->firmware_options_3[0] &
1190 ~BIT_7);
1191
1192 /*
1193 * Set host adapter parameters
1194 */
1195 w1 = CHAR_TO_SHORT(icb->login_timeout[0], icb->login_timeout[1]);
1196 ha->r_a_tov = (uint16_t)(w1 < R_A_TOV_DEFAULT ? R_A_TOV_DEFAULT : w1);
1197
1198 ADAPTER_STATE_LOCK(ha);
1199 ha->cfg_flags |= CFG_ENABLE_64BIT_ADDRESSING;
1200 if (CFG_IST(ha, CFG_CTRL_81XX) && nv->enhanced_features[0] & BIT_0) {
1201 ha->cfg_flags |= CFG_LR_SUPPORT;
1202 }
1203 ADAPTER_STATE_UNLOCK(ha);
1204
1205 /* Queue shadowing */
1206 if (ha->flags & QUEUE_SHADOW_PTRS) {
1207 icb->firmware_options_2[3] = (uint8_t)
1208 (icb->firmware_options_2[3] | BIT_6 | BIT_5);
1209 } else {
1210 icb->firmware_options_2[3] = (uint8_t)
1211 (icb->firmware_options_2[3] | ~(BIT_6 | BIT_5));
1212 }
1213
1214 /* ISP2422 Serial Link Control */
1215 if (CFG_IST(ha, CFG_CTRL_24XX)) {
1216 ha->serdes_param[0] = CHAR_TO_SHORT(nv->fw.isp2400.swing_opt[0],
1217 nv->fw.isp2400.swing_opt[1]);
1218 ha->serdes_param[1] = CHAR_TO_SHORT(nv->fw.isp2400.swing_1g[0],
1219 nv->fw.isp2400.swing_1g[1]);
1220 ha->serdes_param[2] = CHAR_TO_SHORT(nv->fw.isp2400.swing_2g[0],
1221 nv->fw.isp2400.swing_2g[1]);
1222 ha->serdes_param[3] = CHAR_TO_SHORT(nv->fw.isp2400.swing_4g[0],
1223 nv->fw.isp2400.swing_4g[1]);
1224 }
1225
1226 els->common_service.rx_bufsize = CHAR_TO_SHORT(
1227 icb->max_frame_length[0], icb->max_frame_length[1]);
1228 bcopy((void *)icb->port_name, (void *)els->nport_ww_name.raw_wwn, 8);
1229 bcopy((void *)icb->node_name, (void *)els->node_ww_name.raw_wwn, 8);
1230
1231 cmn_err(CE_CONT, "!Qlogic %s(%d) WWPN=%02x%02x%02x%02x"
1232 "%02x%02x%02x%02x : WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
1233 QL_NAME, ha->instance,
1234 els->nport_ww_name.raw_wwn[0], els->nport_ww_name.raw_wwn[1],
1235 els->nport_ww_name.raw_wwn[2], els->nport_ww_name.raw_wwn[3],
1236 els->nport_ww_name.raw_wwn[4], els->nport_ww_name.raw_wwn[5],
1237 els->nport_ww_name.raw_wwn[6], els->nport_ww_name.raw_wwn[7],
1238 els->node_ww_name.raw_wwn[0], els->node_ww_name.raw_wwn[1],
1239 els->node_ww_name.raw_wwn[2], els->node_ww_name.raw_wwn[3],
1240 els->node_ww_name.raw_wwn[4], els->node_ww_name.raw_wwn[5],
1241 els->node_ww_name.raw_wwn[6], els->node_ww_name.raw_wwn[7]);
1242 /*
1243 * Setup ring parameters in initialization control block
1244 */
1245 w1 = ha->req_q[0]->req_entry_cnt;
1246 icb->request_q_length[0] = LSB(w1);
1247 icb->request_q_length[1] = MSB(w1);
1248 w1 = ha->rsp_queues[0]->rsp_entry_cnt;
1249 icb->response_q_length[0] = LSB(w1);
1250 icb->response_q_length[1] = MSB(w1);
1251
1252 addr = ha->req_q[0]->req_ring.cookie.dmac_address;
1253 icb->request_q_address[0] = LSB(LSW(addr));
1254 icb->request_q_address[1] = MSB(LSW(addr));
1255 icb->request_q_address[2] = LSB(MSW(addr));
1256 icb->request_q_address[3] = MSB(MSW(addr));
1257
1258 addr = ha->req_q[0]->req_ring.cookie.dmac_notused;
1259 icb->request_q_address[4] = LSB(LSW(addr));
1260 icb->request_q_address[5] = MSB(LSW(addr));
1261 icb->request_q_address[6] = LSB(MSW(addr));
1262 icb->request_q_address[7] = MSB(MSW(addr));
1263
1264 addr = ha->rsp_queues[0]->rsp_ring.cookie.dmac_address;
1265 icb->response_q_address[0] = LSB(LSW(addr));
1266 icb->response_q_address[1] = MSB(LSW(addr));
1267 icb->response_q_address[2] = LSB(MSW(addr));
1268 icb->response_q_address[3] = MSB(MSW(addr));
1269
1270 addr = ha->rsp_queues[0]->rsp_ring.cookie.dmac_notused;
1271 icb->response_q_address[4] = LSB(LSW(addr));
1272 icb->response_q_address[5] = MSB(LSW(addr));
1273 icb->response_q_address[6] = LSB(MSW(addr));
1274 icb->response_q_address[7] = MSB(MSW(addr));
1275
1276 /*
1277 * Setup IP initialization control block
1278 */
1279 ip_icb->version = IP_ICB_24XX_VERSION;
1280
1281 ip_icb->ip_firmware_options[0] = (uint8_t)
1282 (ip_icb->ip_firmware_options[0] | BIT_2);
1283
1284 if (rval != QL_SUCCESS) {
1285 EL(ha, "failed, rval = %xh\n", rval);
1286 } else {
1287 /*EMPTY*/
1288 QL_PRINT_10(ha, "done\n");
1289 }
1290 return (rval);
1291 }
1292
1293 /*
1294 * ql_lock_nvram
1295 * Locks NVRAM access and returns starting address of NVRAM.
1296 *
1297 * Input:
1298 * ha: adapter state pointer.
1299 * addr: pointer for start address.
1300 * flags: Are mutually exclusive:
1301 * LNF_NVRAM_DATA --> get nvram
1302 * LNF_VPD_DATA --> get vpd data (24/25xx only).
1303 *
1304 * Returns:
1305 * ql local function return status code.
1306 *
1307 * Context:
1308 * Kernel context.
1309 */
1310 int
1311 ql_lock_nvram(ql_adapter_state_t *ha, uint32_t *addr, uint32_t flags)
1312 {
1313 int i;
1314
1315 QL_PRINT_3(ha, "started\n");
1316
1317 if ((flags & LNF_NVRAM_DATA) && (flags & LNF_VPD_DATA)) {
1318 EL(ha, "invalid options for function");
1319 return (QL_FUNCTION_FAILED);
1320 }
1321
1322 if (ha->device_id == 0x2312 || ha->device_id == 0x2322) {
1323 if ((flags & LNF_NVRAM_DATA) == 0) {
1324 EL(ha, "invalid 2312/2322 option for HBA");
1325 return (QL_FUNCTION_FAILED);
1326 }
1327
1328 /* if function number is non-zero, then adjust offset */
1329 *addr = ha->flash_nvram_addr;
1330
1331 /* Try to get resource lock. Wait for 10 seconds max */
1332 for (i = 0; i < 10000; i++) {
1333 /* if nvram busy bit is reset, acquire sema */
1334 if ((RD16_IO_REG(ha, nvram) & 0x8000) == 0) {
1335 WRT16_IO_REG(ha, host_to_host_sema, 1);
1336 drv_usecwait(MILLISEC);
1337 if (RD16_IO_REG(ha, host_to_host_sema) & 1) {
1338 break;
1339 }
1340 }
1341 drv_usecwait(MILLISEC);
1342 }
1343 if ((RD16_IO_REG(ha, host_to_host_sema) & 1) == 0) {
1344 cmn_err(CE_WARN, "%s(%d): unable to get NVRAM lock",
1345 QL_NAME, ha->instance);
1346 return (QL_FUNCTION_FAILED);
1347 }
1348 } else if (CFG_IST(ha, CFG_CTRL_24XX)) {
1349 if (flags & LNF_VPD_DATA) {
1350 *addr = NVRAM_DATA_ADDR | ha->flash_vpd_addr;
1351 } else if (flags & LNF_NVRAM_DATA) {
1352 *addr = NVRAM_DATA_ADDR | ha->flash_nvram_addr;
1353 } else {
1354 EL(ha, "invalid 2422 option for HBA");
1355 return (QL_FUNCTION_FAILED);
1356 }
1357
1358 GLOBAL_HW_LOCK();
1359 } else if (CFG_IST(ha, CFG_CTRL_252780818283)) {
1360 if (flags & LNF_VPD_DATA) {
1361 *addr = ha->flash_data_addr | ha->flash_vpd_addr;
1362 } else if (flags & LNF_NVRAM_DATA) {
1363 *addr = ha->flash_data_addr | ha->flash_nvram_addr;
1364 } else {
1365 EL(ha, "invalid 2581 option for HBA");
1366 return (QL_FUNCTION_FAILED);
1367 }
1368
1369 GLOBAL_HW_LOCK();
1370 } else {
1371 if ((flags & LNF_NVRAM_DATA) == 0) {
1372 EL(ha, "invalid option for HBA");
1373 return (QL_FUNCTION_FAILED);
1374 }
1375 *addr = 0;
1376 GLOBAL_HW_LOCK();
1377 }
1378
1379 QL_PRINT_3(ha, "done\n");
1380
1381 return (QL_SUCCESS);
1382 }
1383
1384 /*
1385 * ql_release_nvram
1386 * Releases NVRAM access.
1387 *
1388 * Input:
1389 * ha: adapter state pointer.
1390 *
1391 * Context:
1392 * Kernel context.
1393 */
1394 void
1395 ql_release_nvram(ql_adapter_state_t *ha)
1396 {
1397 QL_PRINT_3(ha, "started\n");
1398
1399 if (ha->device_id == 0x2312 || ha->device_id == 0x2322) {
1400 /* Release resource lock */
1401 WRT16_IO_REG(ha, host_to_host_sema, 0);
1402 } else {
1403 GLOBAL_HW_UNLOCK();
1404 }
1405
1406 QL_PRINT_3(ha, "done\n");
1407 }
1408
1409 /*
1410 * ql_23_properties
1411 * Copies driver properties to NVRAM or adapter structure.
1412 *
1413 * Driver properties are by design global variables and hidden
1414 * completely from administrators. Knowledgeable folks can
1415 * override the default values using driver.conf
1416 *
1417 * Input:
1418 * ha: adapter state pointer.
1419 * icb: Init control block structure pointer.
1420 *
1421 * Context:
1422 * Kernel context.
1423 */
1424 static void
1425 ql_23_properties(ql_adapter_state_t *ha, ql_init_cb_t *icb)
1426 {
1427 uint32_t data, cnt;
1428
1429 QL_PRINT_3(ha, "started\n");
1430
1431 /* Get frame payload size. */
1432 if ((data = ql_get_prop(ha, "max-frame-length")) == 0xffffffff) {
1433 data = 2048;
1434 }
1435 if (data == 512 || data == 1024 || data == 2048) {
1436 icb->max_frame_length[0] = LSB(data);
1437 icb->max_frame_length[1] = MSB(data);
1438 } else {
1439 EL(ha, "invalid parameter value for 'max-frame-length': "
1440 "%d; using nvram default of %d\n", data, CHAR_TO_SHORT(
1441 icb->max_frame_length[0], icb->max_frame_length[1]));
1442 }
1443
1444 /* Get max IOCB allocation. */
1445 icb->max_iocb_allocation[0] = 0;
1446 icb->max_iocb_allocation[1] = 1;
1447
1448 /* Get execution throttle. */
1449 if ((data = ql_get_prop(ha, "execution-throttle")) == 0xffffffff) {
1450 data = 32;
1451 }
1452 if (data != 0 && data < 65536) {
1453 icb->execution_throttle[0] = LSB(data);
1454 icb->execution_throttle[1] = MSB(data);
1455 } else {
1456 EL(ha, "invalid parameter value for 'execution-throttle': "
1457 "%d; using nvram default of %d\n", data, CHAR_TO_SHORT(
1458 icb->execution_throttle[0], icb->execution_throttle[1]));
1459 }
1460
1461 /* Get Login timeout. */
1462 if ((data = ql_get_prop(ha, "login-timeout")) == 0xffffffff) {
1463 data = 3;
1464 }
1465 if (data < 256) {
1466 icb->login_timeout = (uint8_t)data;
1467 } else {
1468 EL(ha, "invalid parameter value for 'login-timeout': "
1469 "%d; using nvram value of %d\n", data, icb->login_timeout);
1470 }
1471
1472 /* Get retry count. */
1473 if ((data = ql_get_prop(ha, "login-retry-count")) == 0xffffffff) {
1474 data = 4;
1475 }
1476 if (data < 256) {
1477 icb->login_retry_count = (uint8_t)data;
1478 } else {
1479 EL(ha, "invalid parameter value for 'login-retry-count': "
1480 "%d; using nvram value of %d\n", data,
1481 icb->login_retry_count);
1482 }
1483
1484 /* Get adapter hard loop ID enable. */
1485 data = ql_get_prop(ha, "enable-adapter-hard-loop-ID");
1486 if (data == 0) {
1487 icb->firmware_options[0] =
1488 (uint8_t)(icb->firmware_options[0] & ~BIT_0);
1489 } else if (data == 1) {
1490 icb->firmware_options[0] =
1491 (uint8_t)(icb->firmware_options[0] | BIT_0);
1492 } else if (data != 0xffffffff) {
1493 EL(ha, "invalid parameter value for "
1494 "'enable-adapter-hard-loop-ID': %d; using nvram value "
1495 "of %d\n", data, icb->firmware_options[0] & BIT_0 ? 1 : 0);
1496 }
1497
1498 /* Get adapter hard loop ID. */
1499 data = ql_get_prop(ha, "adapter-hard-loop-ID");
1500 if (data < 126) {
1501 icb->hard_address[0] = (uint8_t)data;
1502 } else if (data != 0xffffffff) {
1503 EL(ha, "invalid parameter value for 'adapter-hard-loop-ID': "
1504 "%d; using nvram value of %d\n",
1505 data, icb->hard_address[0]);
1506 }
1507
1508 /* Get LIP reset. */
1509 if ((data = ql_get_prop(ha, "enable-LIP-reset-on-bus-reset")) ==
1510 0xffffffff) {
1511 data = 0;
1512 }
1513 if (data == 0) {
1514 ha->cfg_flags &= ~CFG_ENABLE_LIP_RESET;
1515 } else if (data == 1) {
1516 ha->cfg_flags |= CFG_ENABLE_LIP_RESET;
1517 } else {
1518 EL(ha, "invalid parameter value for "
1519 "'enable-LIP-reset-on-bus-reset': %d; using nvram value "
1520 "of %d\n", data,
1521 CFG_IST(ha, CFG_ENABLE_LIP_RESET) ? 1 : 0);
1522 }
1523
1524 /* Get LIP full login. */
1525 if ((data = ql_get_prop(ha, "enable-LIP-full-login-on-bus-reset")) ==
1526 0xffffffff) {
1527 data = 1;
1528 }
1529 if (data == 0) {
1530 ha->cfg_flags &= ~CFG_ENABLE_FULL_LIP_LOGIN;
1531 } else if (data == 1) {
1532 ha->cfg_flags |= CFG_ENABLE_FULL_LIP_LOGIN;
1533 } else {
1534 EL(ha, "invalid parameter value for "
1535 "'enable-LIP-full-login-on-bus-reset': %d; using nvram "
1536 "value of %d\n", data,
1537 CFG_IST(ha, CFG_ENABLE_FULL_LIP_LOGIN) ? 1 : 0);
1538 }
1539
1540 /* Get target reset. */
1541 if ((data = ql_get_prop(ha, "enable-target-reset-on-bus-reset")) ==
1542 0xffffffff) {
1543 data = 0;
1544 }
1545 if (data == 0) {
1546 ha->cfg_flags &= ~CFG_ENABLE_TARGET_RESET;
1547 } else if (data == 1) {
1548 ha->cfg_flags |= CFG_ENABLE_TARGET_RESET;
1549 } else {
1550 EL(ha, "invalid parameter value for "
1551 "'enable-target-reset-on-bus-reset': %d; using nvram "
1552 "value of %d", data,
1553 CFG_IST(ha, CFG_ENABLE_TARGET_RESET) ? 1 : 0);
1554 }
1555
1556 /* Get reset delay. */
1557 if ((data = ql_get_prop(ha, "reset-delay")) == 0xffffffff) {
1558 data = 5;
1559 }
1560 if (data != 0 && data < 256) {
1561 ha->loop_reset_delay = (uint8_t)data;
1562 } else {
1563 EL(ha, "invalid parameter value for 'reset-delay': %d; "
1564 "using nvram value of %d", data, ha->loop_reset_delay);
1565 }
1566
1567 /* Get port down retry count. */
1568 if ((data = ql_get_prop(ha, "port-down-retry-count")) == 0xffffffff) {
1569 data = 8;
1570 }
1571 if (data < 256) {
1572 ha->port_down_retry_count = (uint8_t)data;
1573 } else {
1574 EL(ha, "invalid parameter value for 'port-down-retry-count':"
1575 " %d; using nvram value of %d\n", data,
1576 ha->port_down_retry_count);
1577 }
1578
1579 /* Get connection mode setting. */
1580 if ((data = ql_get_prop(ha, "connection-options")) == 0xffffffff) {
1581 data = 2;
1582 }
1583 cnt = CFG_IST(ha, CFG_CTRL_22XX) ? 3 : 2;
1584 if (data <= cnt) {
1585 icb->add_fw_opt[0] = (uint8_t)(icb->add_fw_opt[0] &
1586 ~(BIT_6 | BIT_5 | BIT_4));
1587 icb->add_fw_opt[0] = (uint8_t)(icb->add_fw_opt[0] |
1588 (uint8_t)(data << 4));
1589 } else {
1590 EL(ha, "invalid parameter value for 'connection-options': "
1591 "%d; using nvram value of %d\n", data,
1592 (icb->add_fw_opt[0] >> 4) & 0x3);
1593 }
1594
1595 /* Get data rate setting. */
1596 if ((CFG_IST(ha, CFG_CTRL_22XX)) == 0) {
1597 if ((data = ql_get_prop(ha, "fc-data-rate")) == 0xffffffff) {
1598 data = 2;
1599 }
1600 if (data < 3) {
1601 icb->special_options[1] = (uint8_t)
1602 (icb->special_options[1] & 0x3f);
1603 icb->special_options[1] = (uint8_t)
1604 (icb->special_options[1] | (uint8_t)(data << 6));
1605 } else {
1606 EL(ha, "invalid parameter value for 'fc-data-rate': "
1607 "%d; using nvram value of %d\n", data,
1608 (icb->special_options[1] >> 6) & 0x3);
1609 }
1610 }
1611
1612 /* Get IP FW container count. */
1613 ha->ip_init_ctrl_blk.cb.cc[0] = LSB(ql_ip_buffer_count);
1614 ha->ip_init_ctrl_blk.cb.cc[1] = MSB(ql_ip_buffer_count);
1615
1616 /* Get IP low water mark. */
1617 ha->ip_init_ctrl_blk.cb.low_water_mark[0] = LSB(ql_ip_low_water);
1618 ha->ip_init_ctrl_blk.cb.low_water_mark[1] = MSB(ql_ip_low_water);
1619
1620 /* Get IP fast register post count. */
1621 ha->ip_init_ctrl_blk.cb.fast_post_reg_count[0] =
1622 ql_ip_fast_post_count;
1623
1624 ADAPTER_STATE_LOCK(ha);
1625
1626 ql_common_properties(ha);
1627
1628 ADAPTER_STATE_UNLOCK(ha);
1629
1630 QL_PRINT_3(ha, "done\n");
1631 }
1632
1633 /*
1634 * ql_common_properties
1635 * Driver properties adapter structure.
1636 *
1637 * Driver properties are by design global variables and hidden
1638 * completely from administrators. Knowledgeable folks can
1639 * override the default values using driver.conf
1640 *
1641 * Input:
1642 * ha: adapter state pointer.
1643 *
1644 * Context:
1645 * Kernel context.
1646 */
1647 void
1648 ql_common_properties(ql_adapter_state_t *ha)
1649 {
1650 uint32_t data;
1651
1652 QL_PRINT_10(ha, "started\n");
1653
1654 /* Get extended logging enable. */
1655 if ((data = ql_get_prop(ha, "extended-logging")) == 0xffffffff ||
1656 data == 0) {
1657 ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING;
1658 } else if (data == 1) {
1659 ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING;
1660 } else {
1661 EL(ha, "invalid parameter value for 'extended-logging': %d;"
1662 " using default value of 0\n", data);
1663 ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING;
1664 }
1665
1666 /* Get FCP 2 Error Recovery. */
1667 if ((data = ql_get_prop(ha, "enable-FCP-2-error-recovery")) ==
1668 0xffffffff || data == 1) {
1669 ha->cfg_flags |= CFG_ENABLE_FCP_2_SUPPORT;
1670 } else if (data == 0) {
1671 ha->cfg_flags &= ~CFG_ENABLE_FCP_2_SUPPORT;
1672 } else {
1673 EL(ha, "invalid parameter value for "
1674 "'enable-FCP-2-error-recovery': %d; using nvram value of "
1675 "1\n", data);
1676 ha->cfg_flags |= CFG_ENABLE_FCP_2_SUPPORT;
1677 }
1678
1679 #ifdef QL_DEBUG_LEVEL_2
1680 ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING;
1681 #endif
1682
1683 /* Get port down retry delay. */
1684 if ((data = ql_get_prop(ha, "port-down-retry-delay")) == 0xffffffff) {
1685 ha->port_down_retry_delay = PORT_RETRY_TIME;
1686 } else if (data < 256) {
1687 ha->port_down_retry_delay = (uint8_t)data;
1688 } else {
1689 EL(ha, "invalid parameter value for 'port-down-retry-delay':"
1690 " %d; using default value of %d", data, PORT_RETRY_TIME);
1691 ha->port_down_retry_delay = PORT_RETRY_TIME;
1692 }
1693
1694 /* Get queue full retry count. */
1695 if ((data = ql_get_prop(ha, "queue-full-retry-count")) == 0xffffffff) {
1696 ha->qfull_retry_count = 16;
1697 } else if (data < 256) {
1698 ha->qfull_retry_count = (uint8_t)data;
1699 } else {
1700 EL(ha, "invalid parameter value for 'queue-full-retry-count':"
1701 " %d; using default value of 16", data);
1702 ha->qfull_retry_count = 16;
1703 }
1704
1705 /* Get queue full retry delay. */
1706 if ((data = ql_get_prop(ha, "queue-full-retry-delay")) == 0xffffffff) {
1707 ha->qfull_retry_delay = PORT_RETRY_TIME;
1708 } else if (data < 256) {
1709 ha->qfull_retry_delay = (uint8_t)data;
1710 } else {
1711 EL(ha, "invalid parameter value for 'queue-full-retry-delay':"
1712 " %d; using default value of %d", data, PORT_RETRY_TIME);
1713 ha->qfull_retry_delay = PORT_RETRY_TIME;
1714 }
1715
1716 /* Get loop down timeout. */
1717 if ((data = ql_get_prop(ha, "link-down-timeout")) == 0xffffffff) {
1718 data = 0;
1719 } else if (data > 255) {
1720 EL(ha, "invalid parameter value for 'link-down-timeout': %d;"
1721 " using nvram value of 0\n", data);
1722 data = 0;
1723 }
1724 ha->loop_down_abort_time = (uint8_t)(LOOP_DOWN_TIMER_START - data);
1725 if (ha->loop_down_abort_time == LOOP_DOWN_TIMER_START) {
1726 ha->loop_down_abort_time--;
1727 } else if (ha->loop_down_abort_time <= LOOP_DOWN_TIMER_END) {
1728 ha->loop_down_abort_time = LOOP_DOWN_TIMER_END + 1;
1729 }
1730
1731 /* Get link down error enable. */
1732 if ((data = ql_get_prop(ha, "enable-link-down-error")) == 0xffffffff ||
1733 data == 1) {
1734 ha->cfg_flags |= CFG_ENABLE_LINK_DOWN_REPORTING;
1735 } else if (data == 0) {
1736 ha->cfg_flags &= ~CFG_ENABLE_LINK_DOWN_REPORTING;
1737 } else {
1738 EL(ha, "invalid parameter value for 'link-down-error': %d;"
1739 " using default value of 1\n", data);
1740 }
1741
1742 /*
1743 * Get firmware dump flags.
1744 * TAKE_FW_DUMP_ON_MAILBOX_TIMEOUT BIT_0
1745 * TAKE_FW_DUMP_ON_ISP_SYSTEM_ERROR BIT_1
1746 * TAKE_FW_DUMP_ON_DRIVER_COMMAND_TIMEOUT BIT_2
1747 * TAKE_FW_DUMP_ON_LOOP_OFFLINE_TIMEOUT BIT_3
1748 */
1749 ha->cfg_flags &= ~(CFG_DUMP_MAILBOX_TIMEOUT |
1750 CFG_DUMP_ISP_SYSTEM_ERROR | CFG_DUMP_DRIVER_COMMAND_TIMEOUT |
1751 CFG_DUMP_LOOP_OFFLINE_TIMEOUT);
1752 if ((data = ql_get_prop(ha, "firmware-dump-flags")) != 0xffffffff) {
1753 if (data & BIT_0) {
1754 ha->cfg_flags |= CFG_DUMP_MAILBOX_TIMEOUT;
1755 }
1756 if (data & BIT_1) {
1757 ha->cfg_flags |= CFG_DUMP_ISP_SYSTEM_ERROR;
1758 }
1759 if (data & BIT_2) {
1760 ha->cfg_flags |= CFG_DUMP_DRIVER_COMMAND_TIMEOUT;
1761 }
1762 if (data & BIT_3) {
1763 ha->cfg_flags |= CFG_DUMP_LOOP_OFFLINE_TIMEOUT;
1764 }
1765 }
1766
1767 /* Get the PCI max read request size override. */
1768 ha->pci_max_read_req = 0;
1769 if ((data = ql_get_prop(ha, "pci-max-read-request")) != 0xffffffff &&
1770 data != 0) {
1771 ha->pci_max_read_req = (uint16_t)(data);
1772 }
1773
1774 /* Get the plogi retry params overrides. */
1775 if ((data = ql_get_prop(ha, "plogi_params_retry_count")) !=
1776 0xffffffff && data != 0) {
1777 ha->plogi_params->retry_cnt = (uint32_t)(data);
1778 }
1779 if ((data = ql_get_prop(ha, "plogi_params_retry_delay")) !=
1780 0xffffffff && data != 0) {
1781 ha->plogi_params->retry_dly_usec = (uint32_t)(data);
1782 }
1783
1784 /*
1785 * Set default fw wait, adjusted for slow FCF's.
1786 * Revisit when FCF's as fast as FC switches.
1787 */
1788 ha->fwwait = (uint8_t)(CFG_IST(ha, CFG_FCOE_SUPPORT) ? 45 : 10);
1789 /* Get the attach fw_ready override value. */
1790 if ((data = ql_get_prop(ha, "init-loop-sync-wait")) != 0xffffffff) {
1791 if (data > 0 && data <= 240) {
1792 ha->fwwait = (uint8_t)data;
1793 } else {
1794 EL(ha, "invalid parameter value for "
1795 "'init-loop-sync-wait': %d; using default "
1796 "value of %d\n", data, ha->fwwait);
1797 }
1798 }
1799
1800 /* Get fm-capable property */
1801 ha->fm_capabilities = DDI_FM_NOT_CAPABLE;
1802 if ((data = ql_get_prop(ha, "fm-capable")) != 0xffffffff) {
1803 if (data == 0) {
1804 ha->fm_capabilities = DDI_FM_NOT_CAPABLE;
1805 } else if (data > 0xf) {
1806 ha->fm_capabilities = 0xf;
1807
1808 } else {
1809 ha->fm_capabilities = (int)(data);
1810 }
1811 } else {
1812 ha->fm_capabilities = (int)(DDI_FM_EREPORT_CAPABLE
1813 | DDI_FM_ERRCB_CAPABLE);
1814 }
1815
1816 if ((data = ql_get_prop(ha, "msix-vectors")) == 0xffffffff) {
1817 ha->mq_msix_vectors = 0;
1818 } else if (data < 256) {
1819 ha->mq_msix_vectors = (uint8_t)data;
1820 } else {
1821 EL(ha, "invalid parameter value for 'msix-vectors': "
1822 "%d; using value of %d\n", data, 0);
1823 ha->mq_msix_vectors = 0;
1824 }
1825
1826 /* Get number of completion threads. */
1827 if ((data = ql_get_prop(ha, "completion-threads")) == 0xffffffff) {
1828 ha->completion_thds = 4;
1829 } else if (data < 256 && data >= 1) {
1830 ha->completion_thds = (uint8_t)data;
1831 } else {
1832 EL(ha, "invalid parameter value for 'completion-threads':"
1833 " %d; using default value of %d", data, 4);
1834 ha->completion_thds = 4;
1835 }
1836
1837 QL_PRINT_3(ha, "done\n");
1838 }
1839
1840 /*
1841 * ql_24xx_properties
1842 * Copies driver properties to NVRAM or adapter structure.
1843 *
1844 * Driver properties are by design global variables and hidden
1845 * completely from administrators. Knowledgeable folks can
1846 * override the default values using /etc/system.
1847 *
1848 * Input:
1849 * ha: adapter state pointer.
1850 * icb: Init control block structure pointer.
1851 *
1852 * Context:
1853 * Kernel context.
1854 */
1855 static void
1856 ql_24xx_properties(ql_adapter_state_t *ha, ql_init_24xx_cb_t *icb)
1857 {
1858 uint32_t data;
1859
1860 QL_PRINT_10(ha, "started\n");
1861
1862 /* Get frame size */
1863 if ((data = ql_get_prop(ha, "max-frame-length")) == 0xffffffff) {
1864 data = 2048;
1865 }
1866 if (data == 512 || data == 1024 || data == 2048 || data == 2112) {
1867 icb->max_frame_length[0] = LSB(data);
1868 icb->max_frame_length[1] = MSB(data);
1869 } else {
1870 EL(ha, "invalid parameter value for 'max-frame-length': %d;"
1871 " using nvram default of %d\n", data, CHAR_TO_SHORT(
1872 icb->max_frame_length[0], icb->max_frame_length[1]));
1873 }
1874
1875 /* Get execution throttle. */
1876 if ((data = ql_get_prop(ha, "execution-throttle")) == 0xffffffff) {
1877 data = 32;
1878 }
1879 if (data != 0 && data < 65536) {
1880 icb->execution_throttle[0] = LSB(data);
1881 icb->execution_throttle[1] = MSB(data);
1882 } else {
1883 EL(ha, "invalid parameter value for 'execution-throttle':"
1884 " %d; using nvram default of %d\n", data, CHAR_TO_SHORT(
1885 icb->execution_throttle[0], icb->execution_throttle[1]));
1886 }
1887
1888 /* Get Login timeout. */
1889 if ((data = ql_get_prop(ha, "login-timeout")) == 0xffffffff) {
1890 data = 3;
1891 }
1892 if (data < 65536) {
1893 icb->login_timeout[0] = LSB(data);
1894 icb->login_timeout[1] = MSB(data);
1895 } else {
1896 EL(ha, "invalid parameter value for 'login-timeout': %d; "
1897 "using nvram value of %d\n", data, CHAR_TO_SHORT(
1898 icb->login_timeout[0], icb->login_timeout[1]));
1899 }
1900
1901 /* Get retry count. */
1902 if ((data = ql_get_prop(ha, "login-retry-count")) == 0xffffffff) {
1903 data = 4;
1904 }
1905 if (data < 65536) {
1906 icb->login_retry_count[0] = LSB(data);
1907 icb->login_retry_count[1] = MSB(data);
1908 } else {
1909 EL(ha, "invalid parameter value for 'login-retry-count': "
1910 "%d; using nvram value of %d\n", data, CHAR_TO_SHORT(
1911 icb->login_retry_count[0], icb->login_retry_count[1]));
1912 }
1913
1914 /* Get adapter hard loop ID enable. */
1915 data = ql_get_prop(ha, "enable-adapter-hard-loop-ID");
1916 if (data == 0) {
1917 icb->firmware_options_1[0] =
1918 (uint8_t)(icb->firmware_options_1[0] & ~BIT_0);
1919 } else if (data == 1) {
1920 icb->firmware_options_1[0] =
1921 (uint8_t)(icb->firmware_options_1[0] | BIT_0);
1922 } else if (data != 0xffffffff) {
1923 EL(ha, "invalid parameter value for "
1924 "'enable-adapter-hard-loop-ID': %d; using nvram value "
1925 "of %d\n", data,
1926 icb->firmware_options_1[0] & BIT_0 ? 1 : 0);
1927 }
1928
1929 /* Get adapter hard loop ID. */
1930 data = ql_get_prop(ha, "adapter-hard-loop-ID");
1931 if (data < 126) {
1932 icb->hard_address[0] = LSB(data);
1933 icb->hard_address[1] = MSB(data);
1934 } else if (data != 0xffffffff) {
1935 EL(ha, "invalid parameter value for 'adapter-hard-loop-ID':"
1936 " %d; using nvram value of %d\n", data, CHAR_TO_SHORT(
1937 icb->hard_address[0], icb->hard_address[1]));
1938 }
1939
1940 /* Get LIP reset. */
1941 if ((data = ql_get_prop(ha, "enable-LIP-reset-on-bus-reset")) ==
1942 0xffffffff) {
1943 data = 0;
1944 }
1945 if (data == 0) {
1946 ha->cfg_flags &= ~CFG_ENABLE_LIP_RESET;
1947 } else if (data == 1) {
1948 ha->cfg_flags |= CFG_ENABLE_LIP_RESET;
1949 } else {
1950 EL(ha, "invalid parameter value for "
1951 "'enable-LIP-reset-on-bus-reset': %d; using value of 0\n",
1952 data);
1953 }
1954
1955 /* Get LIP full login. */
1956 if ((data = ql_get_prop(ha, "enable-LIP-full-login-on-bus-reset")) ==
1957 0xffffffff) {
1958 data = 1;
1959 }
1960 if (data == 0) {
1961 ha->cfg_flags &= ~CFG_ENABLE_FULL_LIP_LOGIN;
1962 } else if (data == 1) {
1963 ha->cfg_flags |= CFG_ENABLE_FULL_LIP_LOGIN;
1964 } else {
1965 EL(ha, "invalid parameter value for "
1966 "'enable-LIP-full-login-on-bus-reset': %d; using nvram "
1967 "value of %d\n", data,
1968 ha->cfg_flags & CFG_ENABLE_FULL_LIP_LOGIN ? 1 : 0);
1969 }
1970
1971 /* Get target reset. */
1972 if ((data = ql_get_prop(ha, "enable-target-reset-on-bus-reset")) ==
1973 0xffffffff) {
1974 data = 0;
1975 }
1976 if (data == 0) {
1977 ha->cfg_flags &= ~CFG_ENABLE_TARGET_RESET;
1978 } else if (data == 1) {
1979 ha->cfg_flags |= CFG_ENABLE_TARGET_RESET;
1980 } else {
1981 EL(ha, "invalid parameter value for "
1982 "'enable-target-reset-on-bus-reset': %d; using nvram "
1983 "value of %d", data,
1984 ha->cfg_flags & CFG_ENABLE_TARGET_RESET ? 1 : 0);
1985 }
1986
1987 /* Get reset delay. */
1988 if ((data = ql_get_prop(ha, "reset-delay")) == 0xffffffff) {
1989 data = 5;
1990 }
1991 if (data != 0 && data < 256) {
1992 ha->loop_reset_delay = (uint8_t)data;
1993 } else {
1994 EL(ha, "invalid parameter value for 'reset-delay': %d; "
1995 "using nvram value of %d", data, ha->loop_reset_delay);
1996 }
1997
1998 /* Get port down retry count. */
1999 if ((data = ql_get_prop(ha, "port-down-retry-count")) == 0xffffffff) {
2000 data = 8;
2001 }
2002 if (data < 256) {
2003 ha->port_down_retry_count = (uint16_t)data;
2004 } else {
2005 EL(ha, "invalid parameter value for 'port-down-retry-count':"
2006 " %d; using nvram value of %d\n", data,
2007 ha->port_down_retry_count);
2008 }
2009
2010 if (!(CFG_IST(ha, CFG_FCOE_SUPPORT))) {
2011 uint32_t conn;
2012
2013 /* Get connection mode setting. */
2014 if ((conn = ql_get_prop(ha, "connection-options")) ==
2015 0xffffffff) {
2016 conn = 2;
2017 }
2018 if (conn <= 2) {
2019 icb->firmware_options_2[0] = (uint8_t)
2020 (icb->firmware_options_2[0] &
2021 ~(BIT_6 | BIT_5 | BIT_4));
2022 icb->firmware_options_2[0] = (uint8_t)
2023 (icb->firmware_options_2[0] | (uint8_t)(conn << 4));
2024 } else {
2025 EL(ha, "invalid parameter value for 'connection-"
2026 "options': %d; using nvram value of %d\n", conn,
2027 (icb->firmware_options_2[0] >> 4) & 0x3);
2028 }
2029 conn = icb->firmware_options_2[0] >> 4 & 0x3;
2030 if (conn == 0 && ha->max_vports > 125) {
2031 ha->max_vports = 125;
2032 }
2033
2034 /* Get data rate setting. */
2035 if ((data = ql_get_prop(ha, "fc-data-rate")) == 0xffffffff) {
2036 data = 2;
2037 }
2038 if ((CFG_IST(ha, CFG_CTRL_24XX) && data < 4) ||
2039 (CFG_IST(ha, CFG_CTRL_25XX) && data < 5) ||
2040 (CFG_IST(ha, CFG_CTRL_2783) && data < 6)) {
2041 if (CFG_IST(ha, CFG_CTRL_2783) && data == 5 &&
2042 conn == 0) {
2043 EL(ha, "invalid parameter value for 'fc-data-"
2044 "rate': %d; using nvram value of %d\n",
2045 data, 2);
2046 data = 2;
2047 }
2048 icb->firmware_options_3[1] = (uint8_t)
2049 (icb->firmware_options_3[1] & 0x1f);
2050 icb->firmware_options_3[1] = (uint8_t)
2051 (icb->firmware_options_3[1] | (uint8_t)(data << 5));
2052 } else {
2053 EL(ha, "invalid parameter value for 'fc-data-rate': "
2054 "%d; using nvram value of %d\n", data,
2055 (icb->firmware_options_3[1] >> 5) & 0x7);
2056 }
2057 }
2058
2059 /* Get IP FW container count. */
2060 ha->ip_init_ctrl_blk.cb24.cc[0] = LSB(ql_ip_buffer_count);
2061 ha->ip_init_ctrl_blk.cb24.cc[1] = MSB(ql_ip_buffer_count);
2062
2063 /* Get IP low water mark. */
2064 ha->ip_init_ctrl_blk.cb24.low_water_mark[0] = LSB(ql_ip_low_water);
2065 ha->ip_init_ctrl_blk.cb24.low_water_mark[1] = MSB(ql_ip_low_water);
2066
2067 ADAPTER_STATE_LOCK(ha);
2068
2069 /* Get enable flash load. */
2070 if ((data = ql_get_prop(ha, "enable-flash-load")) == 0xffffffff ||
2071 data == 0) {
2072 ha->cfg_flags &= ~CFG_LOAD_FLASH_FW;
2073 } else if (data == 1) {
2074 ha->cfg_flags |= CFG_LOAD_FLASH_FW;
2075 } else {
2076 EL(ha, "invalid parameter value for 'enable-flash-load': "
2077 "%d; using default value of 0\n", data);
2078 }
2079
2080 /* Enable firmware extended tracing */
2081 if ((data = ql_get_prop(ha, "enable-fwexttrace")) != 0xffffffff) {
2082 if (data != 0) {
2083 ha->cfg_flags |= CFG_ENABLE_FWEXTTRACE;
2084 }
2085 }
2086
2087 /* Enable firmware fc tracing */
2088 if ((data = ql_get_prop(ha, "enable-fwfcetrace")) != 0xffffffff) {
2089 ha->cfg_flags |= CFG_ENABLE_FWFCETRACE;
2090 ha->fwfcetraceopt = data;
2091 }
2092
2093 /* Enable fast timeout */
2094 if ((data = ql_get_prop(ha, "enable-fasttimeout")) != 0xffffffff) {
2095 if (data != 0) {
2096 ha->cfg_flags |= CFG_FAST_TIMEOUT;
2097 }
2098 }
2099
2100 ql_common_properties(ha);
2101
2102 ADAPTER_STATE_UNLOCK(ha);
2103
2104 QL_PRINT_3(ha, "done\n");
2105 }
2106
2107 /*
2108 * ql_get_prop
2109 * Get property value from configuration file.
2110 *
2111 * Input:
2112 * ha= adapter state pointer.
2113 * string = property string pointer.
2114 *
2115 * Returns:
2116 * 0xFFFFFFFF = no property else property value.
2117 *
2118 * Context:
2119 * Kernel context.
2120 */
2121 uint32_t
2122 ql_get_prop(ql_adapter_state_t *ha, char *string)
2123 {
2124 char buf[256];
2125 uint32_t data = 0xffffffff;
2126
2127 /*
2128 * Look for a adapter instance NPIV (virtual port) specific parameter
2129 */
2130 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2131 (void) sprintf(buf, "hba%d-vp%d-%s", ha->instance,
2132 ha->vp_index, string);
2133 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2134 data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip, 0,
2135 buf, (int)0xffffffff);
2136 }
2137
2138 /*
2139 * Get adapter instance parameter if a vp specific one isn't found.
2140 */
2141 if (data == 0xffffffff) {
2142 (void) sprintf(buf, "hba%d-%s", ha->instance, string);
2143 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2144 data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip,
2145 0, buf, (int)0xffffffff);
2146 }
2147
2148 /* Adapter instance parameter found? */
2149 if (data == 0xffffffff) {
2150 /* No, get default parameter. */
2151 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2152 data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip, 0,
2153 string, (int)0xffffffff);
2154 }
2155
2156 return (data);
2157 }
2158
2159 /*
2160 * ql_check_isp_firmware
2161 * Checks if using already loaded RISC code or drivers copy.
2162 * If using already loaded code, save a copy of it.
2163 *
2164 * Input:
2165 * ha = adapter state pointer.
2166 *
2167 * Returns:
2168 * ql local function return status code.
2169 *
2170 * Context:
2171 * Kernel context.
2172 */
2173 static int
2174 ql_check_isp_firmware(ql_adapter_state_t *ha)
2175 {
2176 int rval;
2177 uint16_t word_count;
2178 uint32_t byte_count;
2179 uint32_t fw_size, *lptr;
2180 caddr_t bufp;
2181 uint16_t risc_address = (uint16_t)ha->risc_fw[0].addr;
2182
2183 QL_PRINT_10(ha, "started\n");
2184
2185 /* Test for firmware running. */
2186 if (CFG_IST(ha, CFG_CTRL_82XX)) {
2187 if ((rval = ql_8021_fw_chk(ha)) == QL_SUCCESS) {
2188 rval = ql_start_firmware(ha);
2189 }
2190 } else if (CFG_IST(ha, CFG_CTRL_278083)) {
2191 ha->dev_state = NX_DEV_READY;
2192 if (ha->rom_status == MBS_ROM_FW_RUNNING) {
2193 EL(ha, "ISP ROM Status = MBS_ROM_FW_RUNNING\n");
2194 rval = QL_SUCCESS;
2195 } else if (ha->rom_status == MBS_ROM_IDLE) {
2196 EL(ha, "ISP ROM Status = MBS_ROM_IDLE\n");
2197 rval = QL_FUNCTION_FAILED;
2198 } else {
2199 EL(ha, "ISP ROM Status, mbx0=%xh\n", ha->rom_status);
2200 rval = QL_FUNCTION_FAILED;
2201 }
2202 } else if (CFG_IST(ha, CFG_DISABLE_RISC_CODE_LOAD)) {
2203 ha->dev_state = NX_DEV_READY;
2204 if (ha->risc_code != NULL) {
2205 kmem_free(ha->risc_code, ha->risc_code_size);
2206 ha->risc_code = NULL;
2207 ha->risc_code_size = 0;
2208 }
2209
2210 /* Get RISC code length. */
2211 rval = ql_rd_risc_ram(ha, risc_address + 3,
2212 ha->req_q[0]->req_ring.cookie.dmac_laddress, 1);
2213 if (rval == QL_SUCCESS) {
2214 lptr = (uint32_t *)ha->req_q[0]->req_ring.bp;
2215 fw_size = *lptr << 1;
2216
2217 if ((bufp = kmem_alloc(fw_size, KM_SLEEP)) != NULL) {
2218 ha->risc_code_size = fw_size;
2219 ha->risc_code = bufp;
2220 ha->fw_transfer_size = 128;
2221
2222 /* Dump RISC code. */
2223 do {
2224 if (fw_size > ha->fw_transfer_size) {
2225 byte_count =
2226 ha->fw_transfer_size;
2227 } else {
2228 byte_count = fw_size;
2229 }
2230
2231 word_count =
2232 (uint16_t)(byte_count >> 1);
2233
2234 rval = ql_rd_risc_ram(ha, risc_address,
2235 ha->req_q[0]->req_ring.cookie.
2236 dmac_laddress, word_count);
2237 if (rval != QL_SUCCESS) {
2238 kmem_free(ha->risc_code,
2239 ha->risc_code_size);
2240 ha->risc_code = NULL;
2241 ha->risc_code_size = 0;
2242 break;
2243 }
2244
2245 (void) ddi_dma_sync(
2246 ha->req_q[0]->req_ring.dma_handle,
2247 0, byte_count,
2248 DDI_DMA_SYNC_FORKERNEL);
2249 ddi_rep_get16(
2250 ha->req_q[0]->req_ring.acc_handle,
2251 (uint16_t *)bufp, (uint16_t *)
2252 ha->req_q[0]->req_ring.bp,
2253 word_count, DDI_DEV_AUTOINCR);
2254
2255 risc_address += word_count;
2256 fw_size -= byte_count;
2257 bufp += byte_count;
2258 } while (fw_size != 0);
2259 }
2260 rval = QL_FUNCTION_FAILED;
2261 }
2262 } else {
2263 ha->dev_state = NX_DEV_READY;
2264 rval = QL_FUNCTION_FAILED;
2265 }
2266
2267 if (rval != QL_SUCCESS) {
2268 EL(ha, "Load RISC code\n");
2269 } else {
2270 /*EMPTY*/
2271 QL_PRINT_10(ha, "done\n");
2272 }
2273 return (rval);
2274 }
2275
2276 /*
2277 * ql_load_isp_firmware
2278 * Load and start RISC firmware.
2279 * Uses request ring for DMA buffer.
2280 *
2281 * Input:
2282 * ha = adapter state pointer.
2283 *
2284 * Returns:
2285 * ql local function return status code.
2286 *
2287 * Context:
2288 * Kernel context.
2289 */
2290 int
2291 ql_load_isp_firmware(ql_adapter_state_t *vha)
2292 {
2293 caddr_t risc_code_address;
2294 uint32_t risc_address, risc_code_size;
2295 int rval = QL_FUNCTION_FAILED;
2296 uint32_t word_count, cnt;
2297 size_t byte_count;
2298 ql_adapter_state_t *ha = vha->pha;
2299
2300 QL_PRINT_10(ha, "started\n");
2301
2302 if (CFG_IST(ha, CFG_CTRL_82XX)) {
2303 rval = ql_8021_reset_fw(ha) == NX_DEV_READY ?
2304 QL_SUCCESS : QL_FUNCTION_FAILED;
2305 } else {
2306 if (CFG_IST(ha, CFG_CTRL_81XX)) {
2307 ql_mps_reset(ha);
2308 }
2309
2310 if (CFG_IST(ha, CFG_LOAD_FLASH_FW)) {
2311 QL_PRINT_10(ha, "CFG_LOAD_FLASH_FW exit\n");
2312 return (ql_load_flash_fw(ha));
2313 }
2314
2315 if (CFG_IST(ha, CFG_CTRL_27XX)) {
2316 (void) ql_2700_get_module_dmp_template(ha);
2317 }
2318
2319 /* Load firmware segments */
2320 for (cnt = 0; cnt < MAX_RISC_CODE_SEGMENTS &&
2321 ha->risc_fw[cnt].code != NULL; cnt++) {
2322
2323 risc_code_address = ha->risc_fw[cnt].code;
2324 risc_address = ha->risc_fw[cnt].addr;
2325 if ((risc_address = ha->risc_fw[cnt].addr) == 0) {
2326 continue;
2327 }
2328 risc_code_size = ha->risc_fw[cnt].length;
2329
2330 while (risc_code_size) {
2331 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2332 word_count = ha->fw_transfer_size >> 2;
2333 if (word_count > risc_code_size) {
2334 word_count = risc_code_size;
2335 }
2336 byte_count = word_count << 2;
2337
2338 ddi_rep_put32(
2339 ha->req_q[0]->req_ring.acc_handle,
2340 (uint32_t *)risc_code_address,
2341 (uint32_t *)
2342 ha->req_q[0]->req_ring.bp,
2343 word_count, DDI_DEV_AUTOINCR);
2344 } else {
2345 word_count = ha->fw_transfer_size >> 1;
2346 if (word_count > risc_code_size) {
2347 word_count = risc_code_size;
2348 }
2349 byte_count = word_count << 1;
2350
2351 ddi_rep_put16(
2352 ha->req_q[0]->req_ring.acc_handle,
2353 (uint16_t *)risc_code_address,
2354 (uint16_t *)
2355 ha->req_q[0]->req_ring.bp,
2356 word_count, DDI_DEV_AUTOINCR);
2357 }
2358
2359 (void) ddi_dma_sync(
2360 ha->req_q[0]->req_ring.dma_handle,
2361 0, byte_count, DDI_DMA_SYNC_FORDEV);
2362
2363 rval = ql_wrt_risc_ram(ha, risc_address,
2364 ha->req_q[0]->req_ring.cookie.dmac_laddress,
2365 word_count);
2366 if (rval != QL_SUCCESS) {
2367 EL(ha, "failed, load=%xh\n", rval);
2368 cnt = MAX_RISC_CODE_SEGMENTS;
2369 break;
2370 }
2371
2372 risc_address += word_count;
2373 risc_code_size -= word_count;
2374 risc_code_address += byte_count;
2375 }
2376 }
2377 }
2378 bzero(ha->req_q[0]->req_ring.bp, ha->fw_transfer_size);
2379
2380 /* Start firmware. */
2381 if (rval == QL_SUCCESS) {
2382 rval = ql_start_firmware(ha);
2383 }
2384
2385 if (rval != QL_SUCCESS) {
2386 EL(ha, "failed, rval = %xh\n", rval);
2387 } else {
2388 /*EMPTY*/
2389 QL_PRINT_10(ha, "done\n");
2390 }
2391
2392 return (rval);
2393 }
2394
2395 /*
2396 * ql_load_flash_fw
2397 * Gets ISP24xx firmware from flash and loads ISP.
2398 *
2399 * Input:
2400 * ha: adapter state pointer.
2401 *
2402 * Returns:
2403 * ql local function return status code.
2404 */
2405 static int
2406 ql_load_flash_fw(ql_adapter_state_t *ha)
2407 {
2408 int rval;
2409 uint8_t seg_cnt;
2410 uint32_t risc_address, xfer_size, count, *bp, faddr;
2411 uint32_t risc_code_size = 0;
2412
2413 QL_PRINT_10(ha, "started\n");
2414
2415 if (CFG_IST(ha, CFG_CTRL_278083)) {
2416 if ((rval = ql_load_flash_image(ha)) != QL_SUCCESS) {
2417 EL(ha, "load_flash_image status=%xh\n", rval);
2418 } else if (CFG_IST(ha, CFG_CTRL_27XX) &&
2419 (rval = ql_2700_get_flash_dmp_template(ha)) !=
2420 QL_SUCCESS) {
2421 EL(ha, "get_flash_dmp_template status=%xh\n", rval);
2422 }
2423 } else {
2424 faddr = ha->flash_data_addr | ha->flash_fw_addr;
2425
2426 for (seg_cnt = 0; seg_cnt < 2; seg_cnt++) {
2427 xfer_size = ha->fw_transfer_size >> 2;
2428 do {
2429 GLOBAL_HW_LOCK();
2430
2431 /* Read data from flash. */
2432 bp = (uint32_t *)ha->req_q[0]->req_ring.bp;
2433 for (count = 0; count < xfer_size; count++) {
2434 rval = ql_24xx_read_flash(ha, faddr++,
2435 bp);
2436 if (rval != QL_SUCCESS) {
2437 break;
2438 }
2439 ql_chg_endian((uint8_t *)bp++, 4);
2440 }
2441
2442 GLOBAL_HW_UNLOCK();
2443
2444 if (rval != QL_SUCCESS) {
2445 EL(ha, "24xx_read_flash failed=%xh\n",
2446 rval);
2447 break;
2448 }
2449
2450 if (risc_code_size == 0) {
2451 bp = (uint32_t *)
2452 ha->req_q[0]->req_ring.bp;
2453 risc_address = bp[2];
2454 risc_code_size = bp[3];
2455 ha->risc_fw[seg_cnt].addr =
2456 risc_address;
2457 }
2458
2459 if (risc_code_size < xfer_size) {
2460 faddr -= xfer_size - risc_code_size;
2461 xfer_size = risc_code_size;
2462 }
2463
2464 (void) ddi_dma_sync(
2465 ha->req_q[0]->req_ring.dma_handle,
2466 0, xfer_size << 2, DDI_DMA_SYNC_FORDEV);
2467
2468 rval = ql_wrt_risc_ram(ha, risc_address,
2469 ha->req_q[0]->req_ring.cookie.dmac_laddress,
2470 xfer_size);
2471 if (rval != QL_SUCCESS) {
2472 EL(ha, "ql_wrt_risc_ram failed=%xh\n",
2473 rval);
2474 break;
2475 }
2476
2477 risc_address += xfer_size;
2478 risc_code_size -= xfer_size;
2479 } while (risc_code_size);
2480
2481 if (rval != QL_SUCCESS) {
2482 break;
2483 }
2484 }
2485 }
2486
2487 /* Start firmware. */
2488 if (rval == QL_SUCCESS) {
2489 rval = ql_start_firmware(ha);
2490 }
2491
2492 if (rval != QL_SUCCESS) {
2493 EL(ha, "failed, rval = %xh\n", rval);
2494 } else {
2495 /*EMPTY*/
2496 QL_PRINT_10(ha, "done\n");
2497 }
2498 return (rval);
2499 }
2500
2501 /*
2502 * ql_start_firmware
2503 * Starts RISC code.
2504 *
2505 * Input:
2506 * ha = adapter state pointer.
2507 *
2508 * Returns:
2509 * ql local function return status code.
2510 *
2511 * Context:
2512 * Kernel context.
2513 */
2514 int
2515 ql_start_firmware(ql_adapter_state_t *vha)
2516 {
2517 int rval, rval2;
2518 uint32_t data;
2519 ql_mbx_data_t mr = {0};
2520 ql_adapter_state_t *ha = vha->pha;
2521 ql_init_24xx_cb_t *icb =
2522 (ql_init_24xx_cb_t *)&ha->init_ctrl_blk.cb24;
2523
2524 QL_PRINT_10(ha, "started\n");
2525
2526 if (CFG_IST(ha, CFG_CTRL_82XX)) {
2527 /* Save firmware version. */
2528 rval = ql_get_fw_version(ha, &mr, MAILBOX_TOV);
2529 ha->fw_major_version = mr.mb[1];
2530 ha->fw_minor_version = mr.mb[2];
2531 ha->fw_subminor_version = mr.mb[3];
2532 ha->fw_attributes = mr.mb[6];
2533 } else if ((rval = ql_verify_checksum(ha)) == QL_SUCCESS) {
2534 /* Verify checksum of loaded RISC code. */
2535 /* Start firmware execution. */
2536 (void) ql_execute_fw(ha);
2537
2538 /* Save firmware version. */
2539 (void) ql_get_fw_version(ha, &mr, MAILBOX_TOV);
2540 ha->fw_major_version = mr.mb[1];
2541 ha->fw_minor_version = mr.mb[2];
2542 ha->fw_subminor_version = mr.mb[3];
2543 ha->fw_ext_memory_end = SHORT_TO_LONG(mr.mb[4], mr.mb[5]);
2544 ha->fw_ext_memory_size = ((ha->fw_ext_memory_end -
2545 0x100000) + 1) * 4;
2546 if (CFG_IST(ha, CFG_CTRL_278083)) {
2547 ha->fw_attributes = SHORT_TO_LONG(mr.mb[6], mr.mb[15]);
2548 ha->phy_fw_major_version = LSB(mr.mb[13]);
2549 ha->phy_fw_minor_version = MSB(mr.mb[14]);
2550 ha->phy_fw_subminor_version = LSB(mr.mb[14]);
2551 ha->fw_ext_attributes = SHORT_TO_LONG(mr.mb[16],
2552 mr.mb[17]);
2553 } else {
2554 ha->fw_attributes = mr.mb[6];
2555 ha->phy_fw_major_version = LSB(mr.mb[8]);
2556 ha->phy_fw_minor_version = MSB(mr.mb[9]);
2557 ha->phy_fw_subminor_version = LSB(mr.mb[9]);
2558 ha->mpi_capability_list =
2559 SHORT_TO_LONG(mr.mb[13], mr.mb[12]);
2560 }
2561 ha->mpi_fw_major_version = LSB(mr.mb[10]);
2562 ha->mpi_fw_minor_version = MSB(mr.mb[11]);
2563 ha->mpi_fw_subminor_version = LSB(mr.mb[11]);
2564 if (CFG_IST(ha, CFG_CTRL_27XX)) {
2565 ha->fw_shared_ram_start =
2566 SHORT_TO_LONG(mr.mb[18], mr.mb[19]);
2567 ha->fw_shared_ram_end =
2568 SHORT_TO_LONG(mr.mb[20], mr.mb[21]);
2569 ha->fw_ddr_ram_start =
2570 SHORT_TO_LONG(mr.mb[22], mr.mb[23]);
2571 ha->fw_ddr_ram_end =
2572 SHORT_TO_LONG(mr.mb[24], mr.mb[25]);
2573 }
2574 if (CFG_IST(ha, CFG_FLASH_ACC_SUPPORT)) {
2575 if ((rval2 = ql_flash_access(ha, FAC_GET_SECTOR_SIZE,
2576 0, 0, &data)) == QL_SUCCESS) {
2577 ha->xioctl->fdesc.block_size = data << 2;
2578 QL_PRINT_10(ha, "fdesc.block_size="
2579 "%xh\n",
2580 ha->xioctl->fdesc.block_size);
2581 } else {
2582 EL(ha, "flash_access status=%xh\n", rval2);
2583 }
2584 }
2585
2586 /* Set Serdes Transmit Parameters. */
2587 if (CFG_IST(ha, CFG_CTRL_24XX) && ha->serdes_param[0] & BIT_0) {
2588 mr.mb[1] = ha->serdes_param[0];
2589 mr.mb[2] = ha->serdes_param[1];
2590 mr.mb[3] = ha->serdes_param[2];
2591 mr.mb[4] = ha->serdes_param[3];
2592 (void) ql_serdes_param(ha, &mr);
2593 }
2594 }
2595 /* ETS workaround */
2596 if (CFG_IST(ha, CFG_CTRL_81XX) && ql_enable_ets) {
2597 if (ql_get_firmware_option(ha, &mr) == QL_SUCCESS) {
2598 mr.mb[2] = (uint16_t)
2599 (mr.mb[2] | FO2_FCOE_512_MAX_MEM_WR_BURST);
2600 (void) ql_set_firmware_option(ha, &mr);
2601 }
2602 }
2603
2604 if (ha->flags & MULTI_QUEUE) {
2605 QL_PRINT_10(ha, "MULTI_QUEUE\n");
2606 icb->msi_x_vector[0] = LSB(ha->rsp_queues[0]->msi_x_vector);
2607 icb->msi_x_vector[1] = MSB(ha->rsp_queues[0]->msi_x_vector);
2608 if (ha->iflags & IFLG_INTR_MSIX &&
2609 CFG_IST(ha, CFG_NO_INTR_HSHAKE_SUP)) {
2610 QL_PRINT_10(ha, "NO_INTR_HANDSHAKE\n");
2611 ADAPTER_STATE_LOCK(ha);
2612 ha->flags |= NO_INTR_HANDSHAKE;
2613 ADAPTER_STATE_UNLOCK(ha);
2614 icb->firmware_options_2[2] = (uint8_t)
2615 (icb->firmware_options_2[2] & ~(BIT_6 | BIT_5));
2616 icb->firmware_options_2[2] = (uint8_t)
2617 (icb->firmware_options_2[2] | BIT_7);
2618 } else {
2619 icb->firmware_options_2[2] = (uint8_t)
2620 (icb->firmware_options_2[2] & ~BIT_5);
2621 icb->firmware_options_2[2] = (uint8_t)
2622 (icb->firmware_options_2[2] | BIT_7 | BIT_6);
2623 }
2624 } else {
2625 icb->firmware_options_2[2] = (uint8_t)
2626 (icb->firmware_options_2[2] & ~(BIT_7 | BIT_5));
2627 icb->firmware_options_2[2] = (uint8_t)
2628 (icb->firmware_options_2[2] | BIT_6);
2629 }
2630 icb->firmware_options_2[3] = (uint8_t)
2631 (icb->firmware_options_2[3] & ~(BIT_1 | BIT_0));
2632
2633 /* Set fw execution throttle. */
2634 if (CFG_IST(ha, CFG_CTRL_22XX) ||
2635 ql_get_resource_cnts(ha, &mr) != QL_SUCCESS) {
2636 icb->execution_throttle[0] = 0xff;
2637 icb->execution_throttle[1] = 0xff;
2638 } else {
2639 icb->execution_throttle[0] = LSB(mr.mb[6]);
2640 icb->execution_throttle[1] = MSB(mr.mb[6]);
2641 }
2642 EL(ha, "icb->execution_throttle %d\n",
2643 CHAR_TO_SHORT(icb->execution_throttle[0],
2644 icb->execution_throttle[1]));
2645
2646 if (rval != QL_SUCCESS) {
2647 ha->task_daemon_flags &= ~FIRMWARE_LOADED;
2648 EL(ha, "failed, rval = %xh\n", rval);
2649 } else {
2650 ha->task_daemon_flags |= FIRMWARE_LOADED;
2651 QL_PRINT_10(ha, "done\n");
2652 }
2653 return (rval);
2654 }
2655
2656 /*
2657 * ql_set_cache_line
2658 * Sets PCI cache line parameter.
2659 *
2660 * Input:
2661 * ha = adapter state pointer.
2662 *
2663 * Returns:
2664 * ql local function return status code.
2665 *
2666 * Context:
2667 * Kernel context.
2668 */
2669 int
2670 ql_set_cache_line(ql_adapter_state_t *ha)
2671 {
2672 QL_PRINT_3(ha, "started\n");
2673
2674 /* Set the cache line. */
2675 if (CFG_IST(ha->pha, CFG_SET_CACHE_LINE_SIZE_1)) {
2676 /* Set cache line register. */
2677 ql_pci_config_put8(ha->pha, PCI_CONF_CACHE_LINESZ, 1);
2678 }
2679
2680 QL_PRINT_3(ha, "done\n");
2681
2682 return (QL_SUCCESS);
2683 }
2684
2685 /*
2686 * ql_init_rings
2687 * Initializes firmware and ring pointers.
2688 *
2689 * Beginning of response ring has initialization control block
2690 * already built by nvram config routine.
2691 *
2692 * Input:
2693 * ha = adapter state pointer.
2694 * ha->req_q = request rings
2695 * ha->rsp_queues = response rings
2696 * ha->init_ctrl_blk = initialization control block
2697 *
2698 * Returns:
2699 * ql local function return status code.
2700 *
2701 * Context:
2702 * Kernel context.
2703 */
2704 int
2705 ql_init_rings(ql_adapter_state_t *vha2)
2706 {
2707 int rval, rval2;
2708 uint16_t index;
2709 ql_mbx_data_t mr;
2710 ql_adapter_state_t *ha = vha2->pha;
2711
2712 QL_PRINT_3(ha, "started\n");
2713
2714 /* Clear outstanding commands array. */
2715 for (index = 0; index < ha->osc_max_cnt; index++) {
2716 ha->outstanding_cmds[index] = NULL;
2717 }
2718 ha->osc_index = 1;
2719
2720 ha->pending_cmds.first = NULL;
2721 ha->pending_cmds.last = NULL;
2722
2723 /* Initialize firmware. */
2724 ha->req_q[0]->req_ring_ptr = ha->req_q[0]->req_ring.bp;
2725 ha->req_q[0]->req_ring_index = 0;
2726 ha->req_q[0]->req_q_cnt = REQUEST_ENTRY_CNT - 1;
2727 ha->rsp_queues[0]->rsp_ring_ptr = ha->rsp_queues[0]->rsp_ring.bp;
2728 ha->rsp_queues[0]->rsp_ring_index = 0;
2729
2730 if (ha->flags & VP_ENABLED) {
2731 ql_adapter_state_t *vha;
2732 ql_init_24xx_cb_t *icb = &ha->init_ctrl_blk.cb24;
2733
2734 bzero(icb->vp_count,
2735 ((uintptr_t)icb + sizeof (ql_init_24xx_cb_t)) -
2736 (uintptr_t)icb->vp_count);
2737 icb->vp_count[0] = ha->max_vports - 1;
2738
2739 /* Allow connection option 2. */
2740 icb->global_vp_option[0] = BIT_1;
2741
2742 /* Setup default options for all ports. */
2743 for (index = 0; index < ha->max_vports; index++) {
2744 icb->vpc[index].options = VPO_TARGET_MODE_DISABLED |
2745 VPO_INITIATOR_MODE_ENABLED;
2746 }
2747 /* Setup enabled ports. */
2748 for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
2749 if (vha->vp_index == 0 ||
2750 vha->vp_index >= ha->max_vports) {
2751 continue;
2752 }
2753
2754 index = (uint8_t)(vha->vp_index - 1);
2755 bcopy(vha->loginparams.node_ww_name.raw_wwn,
2756 icb->vpc[index].node_name, 8);
2757 bcopy(vha->loginparams.nport_ww_name.raw_wwn,
2758 icb->vpc[index].port_name, 8);
2759
2760 if (vha->flags & VP_ENABLED) {
2761 icb->vpc[index].options = (uint8_t)
2762 (icb->vpc[index].options | VPO_ENABLED);
2763 }
2764 }
2765 }
2766
2767 for (index = 0; index < 2; index++) {
2768 rval = ql_init_firmware(ha);
2769 if (rval == QL_COMMAND_ERROR) {
2770 EL(ha, "stopping firmware\n");
2771 (void) ql_stop_firmware(ha);
2772 } else {
2773 break;
2774 }
2775 }
2776
2777 if (rval == QL_SUCCESS && CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
2778 /* Tell firmware to enable MBA_PORT_BYPASS_CHANGED event */
2779 rval = ql_get_firmware_option(ha, &mr);
2780 if (rval == QL_SUCCESS) {
2781 mr.mb[1] = (uint16_t)(mr.mb[1] | BIT_9);
2782 mr.mb[2] = 0;
2783 mr.mb[3] = BIT_10;
2784 rval = ql_set_firmware_option(ha, &mr);
2785 }
2786 }
2787
2788 if ((rval == QL_SUCCESS) && (CFG_IST(ha, CFG_ENABLE_FWFCETRACE))) {
2789 /* Firmware Fibre Channel Event Trace Buffer */
2790 if ((rval2 = ql_get_dma_mem(ha, &ha->fwfcetracebuf, FWFCESIZE,
2791 LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN)) != QL_SUCCESS) {
2792 EL(ha, "fcetrace buffer alloc failed: %xh\n", rval2);
2793 } else {
2794 if ((rval2 = ql_fw_etrace(ha, &ha->fwfcetracebuf,
2795 FTO_FCE_TRACE_ENABLE, NULL)) != QL_SUCCESS) {
2796 EL(ha, "fcetrace enable failed: %xh\n", rval2);
2797 ql_free_phys(ha, &ha->fwfcetracebuf);
2798 }
2799 }
2800 }
2801
2802 if ((rval == QL_SUCCESS) && (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE))) {
2803 /* Firmware Extended Trace Buffer */
2804 if ((rval2 = ql_get_dma_mem(ha, &ha->fwexttracebuf, FWEXTSIZE,
2805 LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN)) != QL_SUCCESS) {
2806 EL(ha, "exttrace buffer alloc failed: %xh\n", rval2);
2807 } else {
2808 if ((rval2 = ql_fw_etrace(ha, &ha->fwexttracebuf,
2809 FTO_EXT_TRACE_ENABLE, NULL)) != QL_SUCCESS) {
2810 EL(ha, "exttrace enable failed: %xh\n", rval2);
2811 ql_free_phys(ha, &ha->fwexttracebuf);
2812 }
2813 }
2814 }
2815
2816 if (rval == QL_SUCCESS && CFG_IST(ha, CFG_CTRL_MENLO)) {
2817 ql_mbx_iocb_t *pkt;
2818 clock_t timer;
2819
2820 /* Wait for firmware login of menlo. */
2821 for (timer = 3000; timer; timer--) {
2822 if (ha->flags & MENLO_LOGIN_OPERATIONAL) {
2823 break;
2824 }
2825
2826 if (!(ha->flags & INTERRUPTS_ENABLED) ||
2827 ddi_in_panic()) {
2828 if (INTERRUPT_PENDING(ha)) {
2829 (void) ql_isr((caddr_t)ha);
2830 INTR_LOCK(ha);
2831 ha->intr_claimed = B_TRUE;
2832 INTR_UNLOCK(ha);
2833 }
2834 }
2835
2836 /* Delay for 1 tick (10 milliseconds). */
2837 ql_delay(ha, 10000);
2838 }
2839
2840 if (timer == 0) {
2841 rval = QL_FUNCTION_TIMEOUT;
2842 } else {
2843 pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
2844 if (pkt == NULL) {
2845 EL(ha, "failed, kmem_zalloc\n");
2846 rval = QL_MEMORY_ALLOC_FAILED;
2847 } else {
2848 pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
2849 pkt->mvfy.entry_count = 1;
2850 pkt->mvfy.options_status =
2851 LE_16(VMF_DO_NOT_UPDATE_FW);
2852
2853 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
2854 sizeof (ql_mbx_iocb_t));
2855 LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
2856 LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
2857
2858 if (rval != QL_SUCCESS ||
2859 (pkt->mvfy.entry_status & 0x3c) != 0 ||
2860 pkt->mvfy.options_status != CS_COMPLETE) {
2861 EL(ha, "failed, status=%xh, es=%xh, "
2862 "cs=%xh, fc=%xh\n", rval,
2863 pkt->mvfy.entry_status & 0x3c,
2864 pkt->mvfy.options_status,
2865 pkt->mvfy.failure_code);
2866 if (rval == QL_SUCCESS) {
2867 rval = QL_FUNCTION_FAILED;
2868 }
2869 }
2870
2871 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
2872 }
2873 }
2874 }
2875
2876 if (rval != QL_SUCCESS) {
2877 TASK_DAEMON_LOCK(ha);
2878 ha->task_daemon_flags &= ~FIRMWARE_UP;
2879 TASK_DAEMON_UNLOCK(ha);
2880 EL(ha, "failed, rval = %xh\n", rval);
2881 } else {
2882 TASK_DAEMON_LOCK(ha);
2883 ha->task_daemon_flags |= FIRMWARE_UP;
2884 TASK_DAEMON_UNLOCK(ha);
2885 QL_PRINT_3(ha, "done\n");
2886 }
2887 return (rval);
2888 }
2889
2890 /*
2891 * ql_fw_ready
2892 * Waits for firmware ready. If firmware becomes ready
2893 * device queues and RISC code are synchronized.
2894 *
2895 * Input:
2896 * ha = adapter state pointer.
2897 * secs = max wait time, in seconds (0-255).
2898 *
2899 * Returns:
2900 * ql local function return status code.
2901 *
2902 * Context:
2903 * Kernel context.
2904 */
2905 int
2906 ql_fw_ready(ql_adapter_state_t *ha, uint8_t secs)
2907 {
2908 ql_mbx_data_t mr;
2909 clock_t timer, login_wait, wait;
2910 clock_t dly = 250000;
2911 clock_t sec_delay = MICROSEC / dly;
2912 int rval = QL_FUNCTION_FAILED;
2913 uint16_t state[6] = {0};
2914
2915 QL_PRINT_3(ha, "started\n");
2916
2917 login_wait = ha->r_a_tov * 2 * sec_delay;
2918 timer = wait = secs * sec_delay;
2919 state[0] = 0xffff;
2920
2921 /* Wait for ISP to finish LIP */
2922 while (login_wait != 0 && wait != 0 &&
2923 !(ha->task_daemon_flags & ISP_ABORT_NEEDED) &&
2924 !(ha->flags & MPI_RESET_NEEDED)) {
2925
2926 rval = ql_get_firmware_state(ha, &mr);
2927 if (rval == QL_SUCCESS) {
2928 if (mr.mb[1] != FSTATE_READY) {
2929 if (mr.mb[1] == FSTATE_LOSS_SYNC &&
2930 mr.mb[4] == FSTATE_MPI_NIC_ERROR &&
2931 CFG_IST(ha, CFG_FCOE_SUPPORT)) {
2932 EL(ha, "mpi_nic_error, "
2933 "isp_abort_needed\n");
2934 ADAPTER_STATE_LOCK(ha);
2935 ha->flags |= MPI_RESET_NEEDED;
2936 ADAPTER_STATE_UNLOCK(ha);
2937 if (!(ha->task_daemon_flags &
2938 ABORT_ISP_ACTIVE)) {
2939 TASK_DAEMON_LOCK(ha);
2940 ha->task_daemon_flags |=
2941 ISP_ABORT_NEEDED;
2942 TASK_DAEMON_UNLOCK(ha);
2943 }
2944 }
2945 if (mr.mb[1] != FSTATE_WAIT_LOGIN) {
2946 timer = --wait;
2947 } else {
2948 timer = --login_wait;
2949 }
2950 rval = QL_FUNCTION_FAILED;
2951 } else {
2952 /* Firmware is ready. Get 2 * R_A_TOV. */
2953 rval = ql_get_timeout_parameters(ha,
2954 &ha->r_a_tov);
2955 if (rval != QL_SUCCESS) {
2956 EL(ha, "failed, get_timeout_param"
2957 "=%xh\n", rval);
2958 }
2959
2960 /* Configure loop. */
2961 rval = ql_configure_loop(ha);
2962 (void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
2963
2964 if (ha->task_daemon_flags &
2965 LOOP_RESYNC_NEEDED) {
2966 wait--;
2967 EL(ha, "loop trans; tdf=%xh\n",
2968 ha->task_daemon_flags);
2969 } else {
2970 break;
2971 }
2972 }
2973 } else {
2974 break;
2975 }
2976
2977 if (state[0] != mr.mb[1] || state[1] != mr.mb[2] ||
2978 state[2] != mr.mb[3] || state[3] != mr.mb[4] ||
2979 state[4] != mr.mb[5] || state[5] != mr.mb[6]) {
2980 EL(ha, "mbx1=%xh, mbx2=%xh, mbx3=%xh, mbx4=%xh, "
2981 "mbx5=%xh, mbx6=%xh\n", mr.mb[1], mr.mb[2],
2982 mr.mb[3], mr.mb[4], mr.mb[5], mr.mb[6]);
2983 state[0] = mr.mb[1];
2984 state[1] = mr.mb[2];
2985 state[2] = mr.mb[3];
2986 state[3] = mr.mb[4];
2987 state[4] = mr.mb[5];
2988 state[5] = mr.mb[6];
2989 }
2990
2991 /* Delay for a tick if waiting. */
2992 if (timer != 0) {
2993 if (timer % 4 == 0) {
2994 delay(drv_usectohz(dly));
2995 } else {
2996 drv_usecwait(dly);
2997 }
2998 } else {
2999 rval = QL_FUNCTION_TIMEOUT;
3000 }
3001 }
3002
3003 if (rval != QL_SUCCESS) {
3004 if ((ha->task_daemon_flags & ISP_ABORT_NEEDED ||
3005 ha->flags & MPI_RESET_NEEDED) &&
3006 ha->task_daemon_flags & LOOP_RESYNC_NEEDED) {
3007 TASK_DAEMON_LOCK(ha);
3008 ha->task_daemon_flags &= ~LOOP_RESYNC_NEEDED;
3009 TASK_DAEMON_UNLOCK(ha);
3010 }
3011 EL(ha, "failed, rval = %xh\n", rval);
3012 } else {
3013 /*EMPTY*/
3014 QL_PRINT_3(ha, "done\n");
3015 }
3016 return (rval);
3017 }
3018
3019 /*
3020 * ql_configure_loop
3021 * Setup configurations based on loop.
3022 *
3023 * Input:
3024 * ha = adapter state pointer.
3025 *
3026 * Returns:
3027 * ql local function return status code.
3028 *
3029 * Context:
3030 * Kernel context.
3031 */
3032 static int
3033 ql_configure_loop(ql_adapter_state_t *ha)
3034 {
3035 int rval = QL_SUCCESS;
3036 ql_adapter_state_t *vha;
3037
3038 QL_PRINT_10(ha, "started\n");
3039
3040 for (vha = ha; vha != NULL; vha = vha->vp_next) {
3041 TASK_DAEMON_LOCK(ha);
3042 if (!(vha->task_daemon_flags & LOOP_RESYNC_NEEDED) &&
3043 vha->vp_index != 0 &&
3044 (!(vha->flags & VP_ENABLED) ||
3045 vha->flags & VP_ID_NOT_ACQUIRED)) {
3046 TASK_DAEMON_UNLOCK(ha);
3047 continue;
3048 }
3049 vha->task_daemon_flags &= ~LOOP_RESYNC_NEEDED;
3050 TASK_DAEMON_UNLOCK(ha);
3051
3052 rval = ql_configure_hba(vha);
3053 if (rval == QL_SUCCESS && !(ha->task_daemon_flags &
3054 (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
3055 rval = ql_configure_device_d_id(vha);
3056 if (rval == QL_SUCCESS && !(ha->task_daemon_flags &
3057 (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
3058 (void) ql_configure_fabric(vha);
3059 }
3060 }
3061 }
3062
3063 if (rval != QL_SUCCESS) {
3064 EL(ha, "failed, rval = %xh\n", rval);
3065 } else {
3066 /*EMPTY*/
3067 QL_PRINT_10(ha, "done\n");
3068 }
3069 return (rval);
3070 }
3071
3072 /*
3073 * ql_configure_n_port_info
3074 * Setup configurations based on N port 2 N port topology.
3075 *
3076 * Input:
3077 * ha = adapter state pointer.
3078 *
3079 * Returns:
3080 * ql local function return status code.
3081 *
3082 * Context:
3083 * Kernel context.
3084 * ADAPTER_STATE_LOCK must be already obtained
3085 */
3086 static void
3087 ql_configure_n_port_info(ql_adapter_state_t *ha)
3088 {
3089 ql_tgt_t tmp_tq;
3090 ql_tgt_t *tq;
3091 uint8_t *cb_port_name;
3092 ql_link_t *link;
3093 int index, rval;
3094 uint16_t loop_id = 0;
3095 uint32_t found = 0;
3096 ql_dev_id_list_t *list;
3097 uint32_t list_size;
3098 ql_mbx_data_t mr;
3099 port_id_t d_id = {0, 0, 0, 0};
3100
3101 QL_PRINT_10(ha, "started\n");
3102
3103 /* Free existing target queues. */
3104 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
3105 link = ha->dev[index].first;
3106 while (link != NULL) {
3107 tq = link->base_address;
3108 link = link->next;
3109
3110 /* workaround FW issue, do implicit logout */
3111 /* Never logo to the reused loopid!! */
3112 if ((tq->loop_id != 0x7ff) &&
3113 (tq->loop_id != 0x7fe)) {
3114 if (found == 0) {
3115 rval = ql_get_port_database(ha,
3116 tq, PDF_NONE);
3117 if ((rval == QL_SUCCESS) &&
3118 (tq->master_state ==
3119 PD_STATE_PORT_LOGGED_IN)) {
3120 EL(ha, "nport id (%xh) "
3121 "loop_id=%xh "
3122 "reappeared\n",
3123 tq->d_id.b24,
3124 tq->loop_id);
3125 bcopy((void *)&tq->port_name[0],
3126 (void *)&ha->n_port->
3127 port_name[0],
3128 8);
3129 bcopy((void *)&tq->node_name[0],
3130 (void *)&ha->n_port->
3131 node_name[0],
3132 8);
3133 ha->n_port->d_id.b24 =
3134 tq->d_id.b24;
3135 found = 1;
3136 continue;
3137 }
3138 }
3139 (void) ql_logout_fabric_port(ha, tq);
3140 }
3141
3142 tq->loop_id = PORT_NO_LOOP_ID;
3143 }
3144 }
3145
3146 if (found == 1) {
3147 QL_PRINT_10(ha, "done found\n");
3148 return;
3149 }
3150
3151 tq = &tmp_tq;
3152
3153 /*
3154 * If the N_Port's WWPN is larger than our's then it has the
3155 * N_Port login initiative. It will have determined that and
3156 * logged in with the firmware. This results in a device
3157 * database entry. In this situation we will later send up a PLOGI
3158 * by proxy for the N_Port to get things going.
3159 *
3160 * If the N_Ports WWPN is smaller then the firmware has the
3161 * N_Port login initiative and does a FLOGI in order to obtain the
3162 * N_Ports WWNN and WWPN. These names are required later
3163 * during Leadvilles FLOGI. No PLOGI is done by the firmware in
3164 * anticipation of a PLOGI via the driver from the upper layers.
3165 * Upon reciept of said PLOGI the driver issues an ELS PLOGI
3166 * pass-through command and the firmware assumes the s_id
3167 * and the N_Port assumes the d_id and Bob's your uncle.
3168 */
3169
3170 /*
3171 * In N port 2 N port topology the FW provides a port database entry at
3172 * loop_id 0x7fe which allows us to acquire the Ports WWPN.
3173 */
3174 tq->d_id.b.al_pa = 0;
3175 tq->d_id.b.area = 0;
3176 tq->d_id.b.domain = 0;
3177 tq->loop_id = 0x7fe;
3178
3179 rval = ql_get_port_database(ha, tq, PDF_NONE);
3180
3181 /*
3182 * Only collect the P2P remote port information in the case of
3183 * QL_SUCCESS. FW should have always logged in (flogi) to remote
3184 * port at this point.
3185 */
3186 if (rval == QL_SUCCESS) {
3187 cb_port_name = &ha->loginparams.nport_ww_name.raw_wwn[0];
3188
3189 if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
3190 (la_wwn_t *)cb_port_name) == 1)) {
3191 EL(ha, "target port has N_Port login initiative\n");
3192 } else {
3193 EL(ha, "host port has N_Port login initiative\n");
3194 }
3195
3196 /* Capture the N Ports WWPN */
3197
3198 bcopy((void *)&tq->port_name[0],
3199 (void *)&ha->n_port->port_name[0], 8);
3200 bcopy((void *)&tq->node_name[0],
3201 (void *)&ha->n_port->node_name[0], 8);
3202
3203 /* Resolve an n_port_handle */
3204 ha->n_port->n_port_handle = 0x7fe;
3205
3206 }
3207
3208 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
3209 list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
3210
3211 if (ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
3212 QL_SUCCESS) {
3213 /* For the p2p mr.mb[1] must be 1 */
3214 if (mr.mb[1] == 1) {
3215 index = 0;
3216 ql_dev_list(ha, list, index,
3217 &d_id, &loop_id);
3218 ha->n_port->n_port_handle = loop_id;
3219
3220 tq->loop_id = loop_id;
3221 tq->d_id.b24 = d_id.b24;
3222 ha->n_port->d_id.b24 = d_id.b24;
3223 } else {
3224 for (index = 0; index <= LAST_LOCAL_LOOP_ID;
3225 index++) {
3226 /* resuse tq */
3227 tq->loop_id = (uint16_t)index;
3228 rval = ql_get_port_database(ha, tq,
3229 PDF_NONE);
3230 if (rval == QL_NOT_LOGGED_IN) {
3231 if (tq->master_state ==
3232 PD_STATE_PLOGI_PENDING) {
3233 ha->n_port->
3234 n_port_handle =
3235 tq->loop_id;
3236 ha->n_port->d_id.b24 =
3237 tq->hard_addr.b24;
3238 break;
3239 }
3240 } else if (rval == QL_SUCCESS) {
3241 ha->n_port->n_port_handle =
3242 tq->loop_id;
3243 ha->n_port->d_id.b24 =
3244 tq->hard_addr.b24;
3245
3246 break;
3247 }
3248 }
3249 if (index > LAST_LOCAL_LOOP_ID) {
3250 EL(ha, "P2P:exceeded last id, "
3251 "n_port_handle = %xh\n",
3252 ha->n_port->n_port_handle);
3253
3254 ha->n_port->n_port_handle = 0;
3255 tq->loop_id = 0;
3256 }
3257 }
3258 } else {
3259 kmem_free(list, list_size);
3260 EL(ha, "ql_get_dev_list unsuccessful\n");
3261 return;
3262 }
3263
3264 /* with the tq->loop_id to get the port database */
3265
3266 rval = ql_get_port_database(ha, tq, PDF_NONE);
3267
3268 if (rval == QL_NOT_LOGGED_IN) {
3269 if (tq->master_state == PD_STATE_PLOGI_PENDING) {
3270 bcopy((void *)&tq->port_name[0],
3271 (void *)&ha->n_port->port_name[0], 8);
3272 bcopy((void *)&tq->node_name[0],
3273 (void *)&ha->n_port->node_name[0], 8);
3274 bcopy((void *)&tq->hard_addr,
3275 (void *)&ha->n_port->d_id,
3276 sizeof (port_id_t));
3277 ha->n_port->d_id.b24 = d_id.b24;
3278 }
3279 } else if (rval == QL_SUCCESS) {
3280 bcopy((void *)&tq->port_name[0],
3281 (void *)&ha->n_port->port_name[0], 8);
3282 bcopy((void *)&tq->node_name[0],
3283 (void *)&ha->n_port->node_name[0], 8);
3284 bcopy((void *)&tq->hard_addr,
3285 (void *)&ha->n_port->d_id, sizeof (port_id_t));
3286 ha->n_port->d_id.b24 = d_id.b24;
3287
3288 }
3289
3290 kmem_free(list, list_size);
3291
3292 EL(ha, "d_id = %xh, nport_handle = %xh, tq->loop_id = %xh",
3293 tq->d_id.b24, ha->n_port->n_port_handle, tq->loop_id);
3294 }
3295
3296
3297 /*
3298 * ql_configure_hba
3299 * Setup adapter context.
3300 *
3301 * Input:
3302 * ha = adapter state pointer.
3303 *
3304 * Returns:
3305 * ql local function return status code.
3306 *
3307 * Context:
3308 * Kernel context.
3309 */
3310 static int
3311 ql_configure_hba(ql_adapter_state_t *ha)
3312 {
3313 uint8_t *bp;
3314 int rval;
3315 uint32_t state;
3316 ql_mbx_data_t mr;
3317
3318 QL_PRINT_10(ha, "started\n");
3319
3320 /* Get host addresses. */
3321 rval = ql_get_adapter_id(ha, &mr);
3322 if (rval == QL_SUCCESS) {
3323 ha->topology = (uint8_t)(ha->topology &
3324 ~(QL_N_PORT | QL_NL_PORT | QL_F_PORT | QL_FL_PORT));
3325
3326 /* Save Host d_id, alpa, loop ID. */
3327 ha->loop_id = mr.mb[1];
3328 ha->d_id.b.al_pa = LSB(mr.mb[2]);
3329 ha->d_id.b.area = MSB(mr.mb[2]);
3330 ha->d_id.b.domain = LSB(mr.mb[3]);
3331 ha->bbcr_initial = LSB(mr.mb[15]);
3332 ha->bbcr_runtime = MSB(mr.mb[15]);
3333
3334 ADAPTER_STATE_LOCK(ha);
3335 ha->flags &= ~FDISC_ENABLED;
3336 ADAPTER_STATE_UNLOCK(ha);
3337
3338 /* Get loop topology. */
3339 switch (mr.mb[6]) {
3340 case GID_TOP_NL_PORT:
3341 ha->topology = (uint8_t)(ha->topology | QL_NL_PORT);
3342 ha->loop_id = mr.mb[1];
3343 break;
3344 case GID_TOP_FL_PORT:
3345 ha->topology = (uint8_t)(ha->topology | QL_FL_PORT);
3346 ha->loop_id = mr.mb[1];
3347 break;
3348 case GID_TOP_N_PORT:
3349 case GID_TOP_N_PORT_NO_TGT:
3350 ha->flags |= POINT_TO_POINT;
3351 ha->topology = (uint8_t)(ha->topology | QL_N_PORT);
3352 ha->loop_id = 0xffff;
3353 if (CFG_IST(ha, CFG_N2N_SUPPORT)) {
3354 ql_configure_n_port_info(ha);
3355 }
3356 break;
3357 case GID_TOP_F_PORT:
3358 ha->flags |= POINT_TO_POINT;
3359 ha->topology = (uint8_t)(ha->topology | QL_F_PORT);
3360 ha->loop_id = 0xffff;
3361
3362 /* Get supported option. */
3363 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2) &&
3364 mr.mb[7] & GID_FP_NPIV_SUPPORT) {
3365 ADAPTER_STATE_LOCK(ha);
3366 ha->flags |= FDISC_ENABLED;
3367 ADAPTER_STATE_UNLOCK(ha);
3368 }
3369 /* Get VLAN ID, mac address */
3370 if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
3371 ha->flags |= FDISC_ENABLED;
3372 ha->fabric_params = mr.mb[7];
3373 ha->fcoe_vlan_id = (uint16_t)(mr.mb[9] & 0xfff);
3374 ha->fcoe_fcf_idx = mr.mb[10];
3375 ha->fcoe_vnport_mac[5] = MSB(mr.mb[11]);
3376 ha->fcoe_vnport_mac[4] = LSB(mr.mb[11]);
3377 ha->fcoe_vnport_mac[3] = MSB(mr.mb[12]);
3378 ha->fcoe_vnport_mac[2] = LSB(mr.mb[12]);
3379 ha->fcoe_vnport_mac[1] = MSB(mr.mb[13]);
3380 ha->fcoe_vnport_mac[0] = LSB(mr.mb[13]);
3381 }
3382 break;
3383 default:
3384 QL_PRINT_2(ha, "UNKNOWN topology=%xh, d_id=%xh\n",
3385 mr.mb[6], ha->d_id.b24);
3386 rval = QL_FUNCTION_FAILED;
3387 break;
3388 }
3389
3390 if (CFG_IST(ha, CFG_CTRL_2363 | CFG_ISP_FW_TYPE_2)) {
3391 mr.mb[1] = 0;
3392 mr.mb[2] = 0;
3393 rval = ql_data_rate(ha, &mr);
3394 if (rval != QL_SUCCESS) {
3395 EL(ha, "data_rate status=%xh\n", rval);
3396 state = FC_STATE_FULL_SPEED;
3397 } else {
3398 ha->iidma_rate = mr.mb[1];
3399 if (mr.mb[1] == IIDMA_RATE_1GB) {
3400 state = FC_STATE_1GBIT_SPEED;
3401 } else if (mr.mb[1] == IIDMA_RATE_2GB) {
3402 state = FC_STATE_2GBIT_SPEED;
3403 } else if (mr.mb[1] == IIDMA_RATE_4GB) {
3404 state = FC_STATE_4GBIT_SPEED;
3405 } else if (mr.mb[1] == IIDMA_RATE_8GB) {
3406 state = FC_STATE_8GBIT_SPEED;
3407 } else if (mr.mb[1] == IIDMA_RATE_10GB) {
3408 state = FC_STATE_10GBIT_SPEED;
3409 } else if (mr.mb[1] == IIDMA_RATE_16GB) {
3410 state = FC_STATE_16GBIT_SPEED;
3411 } else if (mr.mb[1] == IIDMA_RATE_32GB) {
3412 state = FC_STATE_32GBIT_SPEED;
3413 } else {
3414 state = 0;
3415 }
3416 }
3417 } else {
3418 ha->iidma_rate = IIDMA_RATE_1GB;
3419 state = FC_STATE_FULL_SPEED;
3420 }
3421 ha->state = FC_PORT_STATE_MASK(ha->state) | state;
3422 } else if (rval == MBS_COMMAND_ERROR) {
3423 EL(ha, "mbox cmd error, rval = %xh, mr.mb[1]=%hx\n",
3424 rval, mr.mb[1]);
3425 }
3426
3427 if (rval != QL_SUCCESS) {
3428 EL(ha, "failed, rval = %xh\n", rval);
3429 } else {
3430 bp = ha->loginparams.nport_ww_name.raw_wwn;
3431 EL(ha, "topology=%xh, hba port id=%xh, "
3432 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n",
3433 ha->topology, ha->d_id.b24, bp[0], bp[1],
3434 bp[2], bp[3], bp[4], bp[5], bp[6], bp[7]);
3435 }
3436 return (rval);
3437 }
3438
3439 /*
3440 * ql_configure_device_d_id
3441 * Updates device loop ID.
3442 * Also adds to device queue any new devices found on private loop.
3443 *
3444 * Input:
3445 * ha = adapter state pointer.
3446 *
3447 * Returns:
3448 * ql local function return status code.
3449 *
3450 * Context:
3451 * Kernel context.
3452 */
3453 static int
3454 ql_configure_device_d_id(ql_adapter_state_t *ha)
3455 {
3456 port_id_t d_id;
3457 ql_link_t *link;
3458 int rval;
3459 int loop;
3460 ql_tgt_t *tq;
3461 ql_dev_id_list_t *list;
3462 uint32_t list_size;
3463 uint16_t index, loop_id;
3464 ql_mbx_data_t mr;
3465 uint8_t retries = MAX_DEVICE_LOST_RETRY;
3466
3467 QL_PRINT_10(ha, "started\n");
3468
3469 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
3470 list = kmem_zalloc(list_size, KM_SLEEP);
3471 if (list == NULL) {
3472 rval = QL_MEMORY_ALLOC_FAILED;
3473 EL(ha, "failed, rval = %xh\n", rval);
3474 return (rval);
3475 }
3476
3477 do {
3478 /*
3479 * Get data from RISC code d_id list to init each device queue.
3480 */
3481 rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
3482 if (rval != QL_SUCCESS) {
3483 kmem_free(list, list_size);
3484 EL(ha, "failed, rval = %xh\n", rval);
3485 return (rval);
3486 }
3487
3488 /*
3489 * Mark queues as unusable selectively.
3490 * If the current topology is AL, only fabric tgt queues
3491 * are marked as unusable and eventually removed.
3492 * If the current topology is P2P, all fabric tgt queues
3493 * are processed in ql_configure_n_port_info().
3494 * If the current topology is Fabric, all previous created
3495 * non-fabric device should be marked as lost and eventually
3496 * should be removed.
3497 */
3498 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
3499 for (link = ha->dev[index].first; link != NULL;
3500 link = link->next) {
3501 tq = link->base_address;
3502
3503 if (VALID_DEVICE_ID(ha, tq->loop_id)) {
3504 DEVICE_QUEUE_LOCK(tq);
3505 if (!(tq->flags & TQF_PLOGI_PROGRS) &&
3506 !(ha->topology & QL_N_PORT)) {
3507 tq->loop_id = (uint16_t)
3508 (tq->loop_id |
3509 PORT_LOST_ID);
3510 }
3511 if ((ha->topology & QL_NL_PORT) &&
3512 (tq->flags & TQF_FABRIC_DEVICE)) {
3513 tq->loop_id = (uint16_t)
3514 (tq->loop_id |
3515 PORT_LOST_ID);
3516 }
3517 DEVICE_QUEUE_UNLOCK(tq);
3518 }
3519 }
3520 }
3521
3522 /* If device not in queues add new queue. */
3523 for (index = 0; index < mr.mb[1]; index++) {
3524 ql_dev_list(ha, list, index, &d_id, &loop_id);
3525
3526 if (VALID_DEVICE_ID(ha, loop_id)) {
3527 ADAPTER_STATE_LOCK(ha);
3528 tq = ql_dev_init(ha, d_id, loop_id);
3529 ADAPTER_STATE_UNLOCK(ha);
3530 if (tq != NULL) {
3531 tq->loop_id = loop_id;
3532
3533 /* Test for fabric device. */
3534 if (ha->topology & QL_F_PORT ||
3535 d_id.b.domain !=
3536 ha->d_id.b.domain ||
3537 d_id.b.area != ha->d_id.b.area) {
3538 tq->flags |= TQF_FABRIC_DEVICE;
3539 }
3540
3541 if (ql_get_port_database(ha, tq,
3542 PDF_NONE) == QL_SUCCESS) {
3543 tq->loop_id = (uint16_t)
3544 (tq->loop_id &
3545 ~PORT_LOST_ID);
3546 }
3547 }
3548 }
3549 }
3550
3551 /* 24xx does not report switch devices in ID list. */
3552 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2) &&
3553 ha->topology & QL_FABRIC_CONNECTION) {
3554 d_id.b24 = FS_FABRIC_F_PORT;
3555 ADAPTER_STATE_LOCK(ha);
3556 tq = ql_dev_init(ha, d_id, FL_PORT_24XX_HDL);
3557 ADAPTER_STATE_UNLOCK(ha);
3558 if (tq != NULL) {
3559 tq->flags |= TQF_FABRIC_DEVICE;
3560 (void) ql_get_port_database(ha, tq, PDF_NONE);
3561 }
3562
3563 d_id.b24 = FS_NAME_SERVER;
3564 ADAPTER_STATE_LOCK(ha);
3565 tq = ql_dev_init(ha, d_id, SNS_24XX_HDL);
3566 ADAPTER_STATE_UNLOCK(ha);
3567 if (tq != NULL) {
3568 tq->flags |= TQF_FABRIC_DEVICE;
3569 if (ha->vp_index != 0) {
3570 (void) ql_login_fport(ha, tq,
3571 SNS_24XX_HDL, LFF_NONE, NULL);
3572 }
3573 (void) ql_get_port_database(ha, tq, PDF_NONE);
3574 }
3575 }
3576
3577 /* Allocate queue for broadcast. */
3578 d_id.b24 = FS_BROADCAST;
3579 ADAPTER_STATE_LOCK(ha);
3580 (void) ql_dev_init(ha, d_id, (uint16_t)
3581 (CFG_IST(ha, CFG_ISP_FW_TYPE_2) ? BROADCAST_24XX_HDL :
3582 IP_BROADCAST_LOOP_ID));
3583 ADAPTER_STATE_UNLOCK(ha);
3584
3585 /*
3586 * Topology change (fabric<->p2p),(fabric<->al)
3587 * (al<->p2p) have to be taken care of.
3588 */
3589 loop = FALSE;
3590 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
3591 ql_update_dev(ha, index);
3592 }
3593
3594 if ((ha->topology & QL_NL_PORT) && (mr.mb[1] != 0)) {
3595 loop = FALSE;
3596 } else if (mr.mb[1] == 0 && !(ha->topology & QL_F_PORT)) {
3597 loop = TRUE;
3598 }
3599
3600 /* Give devices time to recover. */
3601 if (loop == TRUE) {
3602 drv_usecwait(1000000);
3603 }
3604 } while (retries-- && loop == TRUE &&
3605 !(ha->pha->task_daemon_flags & LOOP_RESYNC_NEEDED));
3606
3607 kmem_free(list, list_size);
3608
3609 if (rval != QL_SUCCESS) {
3610 EL(ha, "failed=%xh\n", rval);
3611 } else {
3612 /*EMPTY*/
3613 QL_PRINT_10(ha, "done\n");
3614 }
3615
3616 return (rval);
3617 }
3618
3619 /*
3620 * ql_dev_list
3621 * Gets device d_id and loop ID from firmware device list.
3622 *
3623 * Input:
3624 * ha: adapter state pointer.
3625 * list device list pointer.
3626 * index: list index of device data.
3627 * d_id: pointer for d_id data.
3628 * id: pointer for loop ID.
3629 *
3630 * Context:
3631 * Kernel context.
3632 */
3633 void
3634 ql_dev_list(ql_adapter_state_t *ha, union ql_dev_id_list *list,
3635 uint32_t index, port_id_t *d_id, uint16_t *id)
3636 {
3637 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
3638 struct ql_24_dev_id *list24 = (struct ql_24_dev_id *)list;
3639
3640 d_id->b.al_pa = list24[index].al_pa;
3641 d_id->b.area = list24[index].area;
3642 d_id->b.domain = list24[index].domain;
3643 *id = CHAR_TO_SHORT(list24[index].n_port_hdl_l,
3644 list24[index].n_port_hdl_h);
3645
3646 } else if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
3647 struct ql_ex_dev_id *list23 = (struct ql_ex_dev_id *)list;
3648
3649 d_id->b.al_pa = list23[index].al_pa;
3650 d_id->b.area = list23[index].area;
3651 d_id->b.domain = list23[index].domain;
3652 *id = CHAR_TO_SHORT(list23[index].loop_id_l,
3653 list23[index].loop_id_h);
3654
3655 } else {
3656 struct ql_dev_id *list22 = (struct ql_dev_id *)list;
3657
3658 d_id->b.al_pa = list22[index].al_pa;
3659 d_id->b.area = list22[index].area;
3660 d_id->b.domain = list22[index].domain;
3661 *id = (uint16_t)list22[index].loop_id;
3662 }
3663 }
3664
3665 /*
3666 * ql_configure_fabric
3667 * Setup fabric context.
3668 *
3669 * Input:
3670 * ha = adapter state pointer.
3671 *
3672 * Returns:
3673 * ql local function return status code.
3674 *
3675 * Context:
3676 * Kernel context.
3677 */
3678 static int
3679 ql_configure_fabric(ql_adapter_state_t *ha)
3680 {
3681 port_id_t d_id;
3682 ql_tgt_t *tq;
3683 int rval = QL_FUNCTION_FAILED;
3684
3685 QL_PRINT_10(ha, "started\n");
3686
3687 if (ha->topology & QL_FABRIC_CONNECTION) {
3688 /* Test switch fabric controller present. */
3689 d_id.b24 = FS_FABRIC_F_PORT;
3690 tq = ql_d_id_to_queue(ha, d_id);
3691 if (tq != NULL) {
3692 /* Get port/node names of F_Port. */
3693 (void) ql_get_port_database(ha, tq, PDF_NONE);
3694
3695 d_id.b24 = FS_NAME_SERVER;
3696 tq = ql_d_id_to_queue(ha, d_id);
3697 if (tq != NULL) {
3698 (void) ql_get_port_database(ha, tq, PDF_NONE);
3699 rval = QL_SUCCESS;
3700 }
3701 }
3702 }
3703
3704 if (rval != QL_SUCCESS) {
3705 EL(ha, "failed=%xh\n", rval);
3706 } else {
3707 /*EMPTY*/
3708 QL_PRINT_10(ha, "done\n");
3709 }
3710 return (rval);
3711 }
3712
3713 /*
3714 * ql_reset_chip
3715 * Reset ISP chip.
3716 *
3717 * Input:
3718 * ha = adapter block pointer.
3719 * All activity on chip must be already stopped.
3720 * ADAPTER_STATE_LOCK must be released.
3721 *
3722 * Context:
3723 * Interrupt or Kernel context, no mailbox commands allowed.
3724 */
3725 void
3726 ql_reset_chip(ql_adapter_state_t *vha)
3727 {
3728 uint32_t cnt;
3729 uint16_t cmd;
3730 ql_adapter_state_t *ha = vha->pha;
3731
3732 QL_PRINT_10(ha, "started\n");
3733
3734 /*
3735 * accessing pci space while not powered can cause panic's
3736 * on some platforms (i.e. Sunblade 1000's)
3737 */
3738 if (ha->power_level == PM_LEVEL_D3) {
3739 QL_PRINT_2(ha, "Low Power exit\n");
3740 return;
3741 }
3742
3743 /* Disable ISP interrupts. */
3744 ql_disable_intr(ha);
3745
3746 /* Reset all outbound mailbox registers */
3747 for (cnt = 0; cnt < ha->reg_off->mbox_cnt; cnt++) {
3748 WRT16_IO_REG(ha, mailbox_in[cnt], (uint16_t)0);
3749 }
3750
3751 if (CFG_IST(ha, CFG_CTRL_82XX)) {
3752 ha->timeout_cnt = 0;
3753 ql_8021_reset_chip(ha);
3754 QL_PRINT_10(ha, "8021 exit\n");
3755 return;
3756 }
3757
3758 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
3759 ql_reset_24xx_chip(ha);
3760 QL_PRINT_10(ha, "24xx exit\n");
3761 return;
3762 }
3763 QL_PRINT_10(ha, "CFG_ISP_FW_TYPE_1 reset\n");
3764
3765 /*
3766 * We are going to reset the chip in case of 2300. That might cause
3767 * a PBM ERR if a DMA transaction is in progress. One way of
3768 * avoiding it is to disable Bus Master operation before we start
3769 * the reset activity.
3770 */
3771 cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
3772 cmd = (uint16_t)(cmd & ~PCI_COMM_ME);
3773 ql_pci_config_put16(ha, PCI_CONF_COMM, cmd);
3774
3775 /* Pause RISC. */
3776 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
3777 for (cnt = 0; cnt < 30000; cnt++) {
3778 if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) != 0) {
3779 break;
3780 }
3781 drv_usecwait(MILLISEC);
3782 }
3783
3784 /*
3785 * A call to ql_isr() can still happen through
3786 * ql_mailbox_command(). So Mark that we are/(will-be)
3787 * running from rom code now.
3788 */
3789 TASK_DAEMON_LOCK(ha);
3790 ha->task_daemon_flags &= ~(FIRMWARE_UP | FIRMWARE_LOADED);
3791 TASK_DAEMON_UNLOCK(ha);
3792
3793 /* Select FPM registers. */
3794 WRT16_IO_REG(ha, ctrl_status, 0x20);
3795
3796 /* FPM Soft Reset. */
3797 WRT16_IO_REG(ha, fpm_diag_config, 0x100);
3798
3799 /* Toggle FPM reset for 2300 */
3800 if (CFG_IST(ha, CFG_CTRL_2363)) {
3801 WRT16_IO_REG(ha, fpm_diag_config, 0);
3802 }
3803
3804 /* Select frame buffer registers. */
3805 WRT16_IO_REG(ha, ctrl_status, 0x10);
3806
3807 /* Reset frame buffer FIFOs. */
3808 if (CFG_IST(ha, CFG_CTRL_2363)) {
3809 WRT16_IO_REG(ha, fb_cmd, 0x00fc);
3810 /* read back fb_cmd until zero or 3 seconds max */
3811 for (cnt = 0; cnt < 300000; cnt++) {
3812 if ((RD16_IO_REG(ha, fb_cmd) & 0xff) == 0) {
3813 break;
3814 }
3815 drv_usecwait(10);
3816 }
3817 } else {
3818 WRT16_IO_REG(ha, fb_cmd, 0xa000);
3819 }
3820
3821 /* Select RISC module registers. */
3822 WRT16_IO_REG(ha, ctrl_status, 0);
3823
3824 /* Reset RISC module. */
3825 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
3826
3827 /* Reset ISP semaphore. */
3828 WRT16_IO_REG(ha, semaphore, 0);
3829
3830 /* Release RISC module. */
3831 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
3832
3833 /* Insure mailbox registers are free. */
3834 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
3835 WRT16_IO_REG(ha, hccr, HC_CLR_HOST_INT);
3836
3837 /* clear the mailbox command pointer. */
3838 INTR_LOCK(ha);
3839 ha->mcp = NULL;
3840 INTR_UNLOCK(ha);
3841
3842 MBX_REGISTER_LOCK(ha);
3843 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags &
3844 ~(MBX_BUSY_FLG | MBX_WANT_FLG | MBX_ABORT | MBX_INTERRUPT));
3845 MBX_REGISTER_UNLOCK(ha);
3846
3847 /* Bus Master is disabled so chip reset is safe. */
3848 if (CFG_IST(ha, CFG_CTRL_2363)) {
3849 WRT16_IO_REG(ha, ctrl_status, ISP_RESET);
3850 drv_usecwait(MILLISEC);
3851
3852 /* Wait for reset to finish. */
3853 for (cnt = 0; cnt < 30000; cnt++) {
3854 if ((RD16_IO_REG(ha, ctrl_status) & ISP_RESET) == 0) {
3855 break;
3856 }
3857 drv_usecwait(MILLISEC);
3858 }
3859 }
3860
3861 /* Wait for RISC to recover from reset. */
3862 for (cnt = 0; cnt < 30000; cnt++) {
3863 if (RD16_IO_REG(ha, mailbox_out[0]) != MBS_ROM_BUSY) {
3864 break;
3865 }
3866 drv_usecwait(MILLISEC);
3867 }
3868
3869 /* restore bus master */
3870 cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
3871 cmd = (uint16_t)(cmd | PCI_COMM_ME);
3872 ql_pci_config_put16(ha, PCI_CONF_COMM, cmd);
3873
3874 /* Disable RISC pause on FPM parity error. */
3875 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
3876
3877 if (CFG_IST(ha, CFG_CTRL_22XX) &&
3878 RD16_IO_REG(ha, mailbox_out[7]) == 4) {
3879 ha->fw_transfer_size = 128;
3880 }
3881
3882 /* Initialize probe registers */
3883 if (CFG_IST(ha, CFG_SBUS_CARD)) {
3884 /* Pause RISC. */
3885 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
3886 for (cnt = 0; cnt < 30000; cnt++) {
3887 if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) != 0) {
3888 break;
3889 } else {
3890 drv_usecwait(MILLISEC);
3891 }
3892 }
3893
3894 /* Select FPM registers. */
3895 WRT16_IO_REG(ha, ctrl_status, 0x30);
3896
3897 /* Set probe register */
3898 WRT16_IO_REG(ha, mailbox_in[23], 0x204c);
3899
3900 /* Select RISC module registers. */
3901 WRT16_IO_REG(ha, ctrl_status, 0);
3902
3903 /* Release RISC module. */
3904 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
3905 }
3906
3907 QL_PRINT_10(ha, "done\n");
3908 }
3909
3910 /*
3911 * ql_reset_24xx_chip
3912 * Reset ISP24xx chip.
3913 *
3914 * Input:
3915 * ha = adapter block pointer.
3916 * All activity on chip must be already stopped.
3917 *
3918 * Context:
3919 * Interrupt or Kernel context, no mailbox commands allowed.
3920 */
3921 static void
3922 ql_reset_24xx_chip(ql_adapter_state_t *ha)
3923 {
3924 uint32_t timer, stat;
3925
3926 QL_PRINT_10(ha, "started\n");
3927
3928 /* Shutdown DMA. */
3929 if (CFG_IST(ha, CFG_MWB_4096_SUPPORT)) {
3930 WRT32_IO_REG(ha, ctrl_status, DMA_SHUTDOWN | MWB_4096_BYTES);
3931 } else {
3932 WRT32_IO_REG(ha, ctrl_status, DMA_SHUTDOWN);
3933 }
3934
3935 /* Wait for DMA to stop. */
3936 for (timer = 0; timer < 30000; timer++) {
3937 if ((RD32_IO_REG(ha, ctrl_status) & DMA_ACTIVE) == 0) {
3938 break;
3939 }
3940 drv_usecwait(100);
3941 }
3942
3943 /* Stop the firmware. */
3944 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3945 WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE);
3946 WRT16_IO_REG(ha, mailbox_in[1], 0);
3947 WRT16_IO_REG(ha, mailbox_in[2], 0);
3948 WRT16_IO_REG(ha, mailbox_in[3], 0);
3949 WRT16_IO_REG(ha, mailbox_in[4], 0);
3950 WRT16_IO_REG(ha, mailbox_in[5], 0);
3951 WRT16_IO_REG(ha, mailbox_in[6], 0);
3952 WRT16_IO_REG(ha, mailbox_in[7], 0);
3953 WRT16_IO_REG(ha, mailbox_in[8], 0);
3954 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
3955 for (timer = 0; timer < 30000; timer++) {
3956 stat = RD32_IO_REG(ha, risc2host);
3957 if (stat & BIT_15) {
3958 if ((stat & 0xff) < 0x12) {
3959 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3960 break;
3961 }
3962 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3963 }
3964 drv_usecwait(100);
3965 }
3966
3967 /* Reset the chip. */
3968 WRT32_IO_REG(ha, ctrl_status, ISP_RESET);
3969 drv_usecwait(100);
3970
3971 /* Wait for RISC to recover from reset. */
3972 for (timer = 30000; timer; timer--) {
3973 ha->rom_status = RD16_IO_REG(ha, mailbox_out[0]);
3974 if (CFG_IST(ha, CFG_CTRL_278083)) {
3975 /* Wait for RISC to recover from reset. */
3976 if ((ha->rom_status & MBS_ROM_STATUS_MASK) !=
3977 MBS_ROM_BUSY) {
3978 break;
3979 }
3980 } else {
3981 /* Wait for idle status from ROM firmware. */
3982 if (ha->rom_status == MBS_ROM_IDLE) {
3983 break;
3984 }
3985 }
3986 drv_usecwait(100);
3987 }
3988
3989 /* Wait for reset to finish. */
3990 for (timer = 0; timer < 30000; timer++) {
3991 if ((RD32_IO_REG(ha, ctrl_status) & ISP_RESET) == 0) {
3992 break;
3993 }
3994 drv_usecwait(100);
3995 }
3996
3997 ha->adapter_stats->revlvl.isp2200 = RD16_IO_REG(ha, mailbox_out[4]);
3998 ha->adapter_stats->revlvl.risc = RD16_IO_REG(ha, mailbox_out[5]);
3999 ha->adapter_stats->revlvl.frmbfr = RD16_IO_REG(ha, mailbox_out[6]);
4000 ha->adapter_stats->revlvl.riscrom = RD16_IO_REG(ha, mailbox_out[8]);
4001
4002 /* Insure mailbox registers are free. */
4003 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
4004 WRT32_IO_REG(ha, hccr, HC24_CLR_HOST_INT);
4005
4006 /* clear the mailbox command pointer. */
4007 INTR_LOCK(ha);
4008 ha->mcp = NULL;
4009 INTR_UNLOCK(ha);
4010
4011 /* Insure mailbox registers are free. */
4012 MBX_REGISTER_LOCK(ha);
4013 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags &
4014 ~(MBX_BUSY_FLG | MBX_WANT_FLG | MBX_ABORT | MBX_INTERRUPT));
4015 MBX_REGISTER_UNLOCK(ha);
4016
4017 if (ha->flags & MPI_RESET_NEEDED) {
4018 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
4019 WRT16_IO_REG(ha, mailbox_in[0], MBC_RESTART_MPI);
4020 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
4021 for (timer = 0; timer < 30000; timer++) {
4022 stat = RD32_IO_REG(ha, risc2host);
4023 if (stat & BIT_15) {
4024 if ((stat & 0xff) < 0x12) {
4025 WRT32_IO_REG(ha, hccr,
4026 HC24_CLR_RISC_INT);
4027 break;
4028 }
4029 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
4030 }
4031 drv_usecwait(100);
4032 }
4033 ADAPTER_STATE_LOCK(ha);
4034 ha->flags &= ~MPI_RESET_NEEDED;
4035 ADAPTER_STATE_UNLOCK(ha);
4036 }
4037
4038 QL_PRINT_10(ha, "done\n");
4039 }
4040
4041 /*
4042 * ql_abort_isp
4043 * Resets ISP and aborts all outstanding commands.
4044 *
4045 * Input:
4046 * ha = adapter state pointer.
4047 * DEVICE_QUEUE_LOCK must be released.
4048 *
4049 * Returns:
4050 * ql local function return status code.
4051 *
4052 * Context:
4053 * Kernel context.
4054 */
4055 int
4056 ql_abort_isp(ql_adapter_state_t *vha)
4057 {
4058 ql_link_t *link, *link2;
4059 uint16_t index;
4060 ql_tgt_t *tq;
4061 ql_lun_t *lq;
4062 int rval = QL_SUCCESS;
4063 ql_adapter_state_t *ha = vha->pha;
4064 boolean_t abort_loop_down = B_FALSE;
4065
4066 QL_PRINT_2(ha, "started\n");
4067
4068 TASK_DAEMON_LOCK(ha);
4069 ha->task_daemon_flags &= ~ISP_ABORT_NEEDED;
4070 if (ha->task_daemon_flags & ABORT_ISP_ACTIVE ||
4071 (ha->flags & ONLINE) == 0 || ha->flags & ADAPTER_SUSPENDED) {
4072 TASK_DAEMON_UNLOCK(ha);
4073 QL_PRINT_2(ha, "already active or suspended tdf=0x%llx, "
4074 "flgs=0x%llx\n", ha->task_daemon_flags, ha->flags);
4075 return (rval);
4076 }
4077
4078 ha->task_daemon_flags |= ABORT_ISP_ACTIVE;
4079 ha->task_daemon_flags &= ~(MARKER_NEEDED | FIRMWARE_UP |
4080 FIRMWARE_LOADED);
4081 for (vha = ha; vha != NULL; vha = vha->vp_next) {
4082 vha->task_daemon_flags &= ~(COMMAND_WAIT_NEEDED |
4083 LOOP_RESYNC_NEEDED);
4084 vha->task_daemon_flags |= LOOP_DOWN;
4085 if (vha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
4086 abort_loop_down = B_TRUE;
4087 vha->loop_down_timer = LOOP_DOWN_TIMER_START;
4088 }
4089 }
4090
4091 TASK_DAEMON_UNLOCK(ha);
4092
4093 ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
4094
4095 if (ha->mailbox_flags & MBX_BUSY_FLG) {
4096 /* Acquire mailbox register lock. */
4097 MBX_REGISTER_LOCK(ha);
4098
4099 /* Wake up mailbox box routine. */
4100 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_ABORT);
4101 cv_broadcast(&ha->cv_mbx_intr);
4102
4103 /* Release mailbox register lock. */
4104 MBX_REGISTER_UNLOCK(ha);
4105
4106 /* Wait for mailbox. */
4107 for (index = 100; index &&
4108 ha->mailbox_flags & MBX_ABORT; index--) {
4109 delay(1);
4110 }
4111 }
4112
4113 /* Wait for commands to end gracefully if not in panic. */
4114 if (ha->flags & PARITY_ERROR) {
4115 ADAPTER_STATE_LOCK(ha);
4116 ha->flags &= ~PARITY_ERROR;
4117 ADAPTER_STATE_UNLOCK(ha);
4118 } else if (ddi_in_panic() == 0) {
4119 ql_cmd_wait(ha);
4120 }
4121
4122 rval = QL_ABORTED;
4123 if (ha->flags & FW_DUMP_NEEDED) {
4124 rval = ql_binary_fw_dump(ha, TRUE);
4125 }
4126
4127 /* Shutdown IP. */
4128 if (ha->flags & IP_INITIALIZED) {
4129 (void) ql_shutdown_ip(ha);
4130 }
4131
4132 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
4133 TASK_DAEMON_LOCK(ha);
4134 ha->task_daemon_flags &= ~ISP_ABORT_NEEDED;
4135 TASK_DAEMON_UNLOCK(ha);
4136 }
4137
4138 /* Reset the chip. */
4139 if (rval != QL_SUCCESS) {
4140 rval = QL_SUCCESS;
4141 ql_reset_chip(ha);
4142 }
4143
4144 /*
4145 * Even though we have waited for outstanding commands to complete,
4146 * except for ones marked SRB_COMMAND_TIMEOUT, and reset the ISP,
4147 * there could still be an interrupt thread active. The interrupt
4148 * lock will prevent us from getting an sp from the outstanding
4149 * cmds array that the ISR may be using.
4150 */
4151
4152 /* Place all commands in outstanding cmd list on device queue. */
4153 ql_requeue_all_cmds(ha);
4154
4155 /*
4156 * Clear per LUN active count, because there should not be
4157 * any IO outstanding at this time.
4158 */
4159 for (vha = ha; vha != NULL; vha = vha->vp_next) {
4160 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
4161 link = vha->dev[index].first;
4162 while (link != NULL) {
4163 tq = link->base_address;
4164 link = link->next;
4165 DEVICE_QUEUE_LOCK(tq);
4166 tq->outcnt = 0;
4167 tq->flags &= ~TQF_QUEUE_SUSPENDED;
4168 for (link2 = tq->lun_queues.first;
4169 link2 != NULL; link2 = link2->next) {
4170 lq = link2->base_address;
4171 lq->lun_outcnt = 0;
4172 lq->flags &= ~LQF_UNTAGGED_PENDING;
4173 }
4174 DEVICE_QUEUE_UNLOCK(tq);
4175 }
4176 }
4177 }
4178
4179 if ((rval = ql_check_isp_firmware(ha)) != QL_SUCCESS) {
4180 if (ha->dev_state != NX_DEV_READY) {
4181 EL(ha, "dev_state not ready\n");
4182 } else if ((rval = ql_mbx_wrap_test(ha, NULL)) == QL_SUCCESS) {
4183 rval = ql_load_isp_firmware(ha);
4184 }
4185 }
4186
4187 if (rval == QL_SUCCESS && (rval = ql_set_cache_line(ha)) ==
4188 QL_SUCCESS && (rval = ql_init_rings(ha)) == QL_SUCCESS &&
4189 (rval = ql_fw_ready(ha, 10)) == QL_SUCCESS) {
4190
4191 /* Enable ISP interrupts. */
4192 if (!(ha->flags & INTERRUPTS_ENABLED)) {
4193 ql_enable_intr(ha);
4194 }
4195
4196 /* If reset abort needed that may have been set. */
4197 TASK_DAEMON_LOCK(ha);
4198 ha->task_daemon_flags &= ~(ISP_ABORT_NEEDED |
4199 ABORT_ISP_ACTIVE);
4200 TASK_DAEMON_UNLOCK(ha);
4201
4202 /* Set loop online, if it really is. */
4203 ql_loop_online(ha);
4204 } else {
4205 /* Enable ISP interrupts. */
4206 if (!(ha->flags & INTERRUPTS_ENABLED)) {
4207 ql_enable_intr(ha);
4208 }
4209
4210 TASK_DAEMON_LOCK(ha);
4211 for (vha = ha; vha != NULL; vha = vha->vp_next) {
4212 vha->task_daemon_flags |= LOOP_DOWN;
4213 }
4214 ha->task_daemon_flags &= ~ISP_ABORT_NEEDED;
4215 TASK_DAEMON_UNLOCK(ha);
4216
4217 ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
4218
4219 ql_abort_queues(ha);
4220
4221 TASK_DAEMON_LOCK(ha);
4222 ha->task_daemon_flags &= ~ABORT_ISP_ACTIVE;
4223 TASK_DAEMON_UNLOCK(ha);
4224 }
4225
4226 for (vha = ha; vha != NULL; vha = vha->vp_next) {
4227 if (!(vha->task_daemon_flags & LOOP_DOWN) &&
4228 abort_loop_down == B_TRUE) {
4229 vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
4230 }
4231 }
4232
4233 if (rval != QL_SUCCESS) {
4234 EL(ha, "failed, rval = %xh\n", rval);
4235 } else {
4236 /*EMPTY*/
4237 QL_PRINT_2(ha, "done\n");
4238 }
4239 return (rval);
4240 }
4241
4242 /*
4243 * ql_requeue_all_cmds
4244 * Requeue all commands.
4245 *
4246 * Input:
4247 * ha = virtual adapter state pointer.
4248 *
4249 * Returns:
4250 * ql local function return status code.
4251 *
4252 * Context:
4253 * Kernel context.
4254 */
4255 void
4256 ql_requeue_all_cmds(ql_adapter_state_t *ha)
4257 {
4258 ql_link_t *link;
4259 ql_tgt_t *tq;
4260 ql_lun_t *lq;
4261 ql_srb_t *sp;
4262 uint16_t index;
4263
4264 /* Place all commands in outstanding cmd list on device queue. */
4265 for (index = 1; index < ha->osc_max_cnt; index++) {
4266 INTR_LOCK(ha);
4267 REQUEST_RING_LOCK(ha);
4268 if ((link = ha->pending_cmds.first) != NULL) {
4269 sp = link->base_address;
4270 ql_remove_link(&ha->pending_cmds, &sp->cmd);
4271
4272 REQUEST_RING_UNLOCK(ha);
4273 index = 0;
4274 } else {
4275 REQUEST_RING_UNLOCK(ha);
4276 if ((sp = ha->outstanding_cmds[index]) == NULL ||
4277 sp == QL_ABORTED_SRB(ha)) {
4278 INTR_UNLOCK(ha);
4279 continue;
4280 }
4281 }
4282
4283 /*
4284 * It's not obvious but the index for commands pulled from
4285 * pending will be zero and that entry in the outstanding array
4286 * is not used so nulling it is "no harm, no foul".
4287 */
4288
4289 ha->outstanding_cmds[index] = NULL;
4290 sp->handle = 0;
4291 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
4292
4293 INTR_UNLOCK(ha);
4294
4295 /* If command timeout. */
4296 if (sp->flags & SRB_COMMAND_TIMEOUT) {
4297 sp->pkt->pkt_reason = CS_TIMEOUT;
4298 sp->flags &= ~SRB_RETRY;
4299 sp->flags |= SRB_ISP_COMPLETED;
4300
4301 /* Call done routine to handle completion. */
4302 ql_done(&sp->cmd, B_FALSE);
4303 continue;
4304 }
4305
4306 /* Acquire target queue lock. */
4307 lq = sp->lun_queue;
4308 tq = lq->target_queue;
4309
4310 /* return any tape IO as exchange dropped due to chip reset */
4311 if (tq->flags & TQF_TAPE_DEVICE) {
4312 sp->pkt->pkt_reason = CS_TRANSPORT;
4313 sp->flags &= ~SRB_RETRY;
4314 sp->flags |= SRB_ISP_COMPLETED;
4315
4316 EL(ha, "rtn seq IO, sp=%ph", sp);
4317
4318 /* Call done routine to handle completion. */
4319 ql_done(&sp->cmd, B_FALSE);
4320 continue;
4321 }
4322
4323 DEVICE_QUEUE_LOCK(tq);
4324
4325 /* Reset watchdog time. */
4326 sp->wdg_q_time = sp->init_wdg_q_time;
4327
4328 /* Place request back on top of device queue. */
4329 sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED |
4330 SRB_RETRY);
4331
4332 ql_add_link_t(&lq->cmd, &sp->cmd);
4333 sp->flags |= SRB_IN_DEVICE_QUEUE;
4334
4335 /* Release target queue lock. */
4336 DEVICE_QUEUE_UNLOCK(tq);
4337 }
4338 }
4339
4340 /*
4341 * ql_vport_control
4342 * Issue Virtual Port Control command.
4343 *
4344 * Input:
4345 * ha = virtual adapter state pointer.
4346 * cmd = control command.
4347 *
4348 * Returns:
4349 * ql local function return status code.
4350 *
4351 * Context:
4352 * Kernel context.
4353 */
4354 int
4355 ql_vport_control(ql_adapter_state_t *ha, uint8_t cmd)
4356 {
4357 ql_mbx_iocb_t *pkt;
4358 uint8_t bit;
4359 int rval;
4360 uint32_t pkt_size;
4361
4362 QL_PRINT_10(ha, "started\n");
4363
4364 if (ha->vp_index != 0) {
4365 pkt_size = sizeof (ql_mbx_iocb_t);
4366 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4367 if (pkt == NULL) {
4368 EL(ha, "failed, kmem_zalloc\n");
4369 return (QL_MEMORY_ALLOC_FAILED);
4370 }
4371
4372 pkt->vpc.entry_type = VP_CONTROL_TYPE;
4373 pkt->vpc.entry_count = 1;
4374 pkt->vpc.command = cmd;
4375 pkt->vpc.vp_count = 1;
4376 pkt->vpc.fcf_index = ha->fcoe_fcf_idx;
4377 bit = (uint8_t)(ha->vp_index - 1);
4378 pkt->vpc.vp_index[bit / 8] = (uint8_t)
4379 (pkt->vpc.vp_index[bit / 8] | BIT_0 << bit % 8);
4380
4381 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
4382 if (rval == QL_SUCCESS && pkt->vpc.status != 0) {
4383 rval = QL_COMMAND_ERROR;
4384 }
4385
4386 kmem_free(pkt, pkt_size);
4387 } else {
4388 rval = QL_SUCCESS;
4389 }
4390
4391 if (rval != QL_SUCCESS) {
4392 EL(ha, "failed, rval = %xh\n", rval);
4393 } else {
4394 /*EMPTY*/
4395 QL_PRINT_10(ha, "done\n");
4396 }
4397 return (rval);
4398 }
4399
4400 /*
4401 * ql_vport_modify
4402 * Issue of Modify Virtual Port command.
4403 *
4404 * Input:
4405 * ha = virtual adapter state pointer.
4406 * cmd = command.
4407 * opt = option.
4408 *
4409 * Context:
4410 * Interrupt or Kernel context, no mailbox commands allowed.
4411 */
4412 int
4413 ql_vport_modify(ql_adapter_state_t *ha, uint8_t cmd, uint8_t opt)
4414 {
4415 ql_mbx_iocb_t *pkt;
4416 int rval;
4417 uint32_t pkt_size;
4418
4419 QL_PRINT_10(ha, "started\n");
4420
4421 if (ha->pha->task_daemon_flags & LOOP_DOWN) {
4422 QL_PRINT_10(ha, "loop_down\n");
4423 return (QL_FUNCTION_FAILED);
4424 }
4425
4426 pkt_size = sizeof (ql_mbx_iocb_t);
4427 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4428 if (pkt == NULL) {
4429 EL(ha, "failed, kmem_zalloc\n");
4430 return (QL_MEMORY_ALLOC_FAILED);
4431 }
4432
4433 pkt->vpm.entry_type = VP_MODIFY_TYPE;
4434 pkt->vpm.entry_count = 1;
4435 pkt->vpm.command = cmd;
4436 pkt->vpm.vp_count = 1;
4437 pkt->vpm.first_vp_index = ha->vp_index;
4438 pkt->vpm.first_options = opt;
4439 pkt->vpm.fcf_index = ha->fcoe_fcf_idx;
4440 bcopy(ha->loginparams.nport_ww_name.raw_wwn, pkt->vpm.first_port_name,
4441 8);
4442 bcopy(ha->loginparams.node_ww_name.raw_wwn, pkt->vpm.first_node_name,
4443 8);
4444
4445 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
4446 if (rval == QL_SUCCESS && pkt->vpm.status != 0) {
4447 EL(ha, "failed, ql_issue_mbx_iocb=%xh, status=%xh\n", rval,
4448 pkt->vpm.status);
4449 rval = QL_COMMAND_ERROR;
4450 }
4451
4452 kmem_free(pkt, pkt_size);
4453
4454 if (rval != QL_SUCCESS) {
4455 EL(ha, "failed, rval = %xh\n", rval);
4456 } else {
4457 /*EMPTY*/
4458 QL_PRINT_10(ha, "done\n");
4459 }
4460 return (rval);
4461 }
4462
4463 /*
4464 * ql_vport_enable
4465 * Enable virtual port.
4466 *
4467 * Input:
4468 * ha = virtual adapter state pointer.
4469 *
4470 * Context:
4471 * Kernel context.
4472 */
4473 int
4474 ql_vport_enable(ql_adapter_state_t *ha)
4475 {
4476 int timer;
4477
4478 QL_PRINT_10(ha, "started\n");
4479
4480 ha->state = FC_PORT_SPEED_MASK(ha->state) | FC_STATE_OFFLINE;
4481 TASK_DAEMON_LOCK(ha);
4482 ha->task_daemon_flags |= LOOP_DOWN;
4483 ha->task_daemon_flags &= ~(FC_STATE_CHANGE | STATE_ONLINE);
4484 TASK_DAEMON_UNLOCK(ha);
4485
4486 ADAPTER_STATE_LOCK(ha);
4487 ha->flags |= VP_ENABLED;
4488 ha->flags &= ~VP_ID_NOT_ACQUIRED;
4489 ADAPTER_STATE_UNLOCK(ha);
4490 ha->fcoe_fcf_idx = 0;
4491
4492 if (ql_vport_modify(ha, VPM_MODIFY_ENABLE, VPO_TARGET_MODE_DISABLED |
4493 VPO_INITIATOR_MODE_ENABLED | VPO_ENABLED) != QL_SUCCESS) {
4494 QL_PRINT_2(ha, "failed to enable virtual port\n");
4495 return (QL_FUNCTION_FAILED);
4496 }
4497 if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
4498 /* Wait for loop to come up. */
4499 for (timer = 0; timer < 3000 &&
4500 !(ha->task_daemon_flags & STATE_ONLINE);
4501 timer++) {
4502 if (ha->flags & VP_ID_NOT_ACQUIRED) {
4503 break;
4504 }
4505 delay(1);
4506 }
4507 }
4508
4509 QL_PRINT_10(ha, "done\n");
4510
4511 return (QL_SUCCESS);
4512 }
4513
4514 /*
4515 * ql_vport_create
4516 * Create virtual port context.
4517 *
4518 * Input:
4519 * ha: parent adapter state pointer.
4520 * index: virtual port index number.
4521 *
4522 * Context:
4523 * Kernel context.
4524 */
4525 ql_adapter_state_t *
4526 ql_vport_create(ql_adapter_state_t *ha, uint8_t index)
4527 {
4528 ql_adapter_state_t *vha;
4529
4530 QL_PRINT_10(ha, "started\n");
4531
4532 /* Inherit the parents data. */
4533 vha = kmem_alloc(sizeof (ql_adapter_state_t), KM_SLEEP);
4534
4535 ADAPTER_STATE_LOCK(ha);
4536 bcopy(ha, vha, sizeof (ql_adapter_state_t));
4537 vha->pi_attrs = NULL;
4538 vha->ub_outcnt = 0;
4539 vha->ub_allocated = 0;
4540 vha->flags = 0;
4541 vha->task_daemon_flags = 0;
4542 ha->vp_next = vha;
4543 vha->pha = ha;
4544 vha->vp_index = index;
4545 ADAPTER_STATE_UNLOCK(ha);
4546
4547 vha->hba.next = NULL;
4548 vha->hba.prev = NULL;
4549 vha->hba.base_address = vha;
4550 vha->state = FC_PORT_SPEED_MASK(ha->state) | FC_STATE_OFFLINE;
4551 vha->dev = kmem_zalloc(sizeof (*vha->dev) * DEVICE_HEAD_LIST_SIZE,
4552 KM_SLEEP);
4553 vha->ub_array = kmem_zalloc(sizeof (*vha->ub_array) * QL_UB_LIMIT,
4554 KM_SLEEP);
4555
4556 QL_PRINT_10(ha, "done\n");
4557
4558 return (vha);
4559 }
4560
4561 /*
4562 * ql_vport_destroy
4563 * Destroys virtual port context.
4564 *
4565 * Input:
4566 * ha = virtual adapter state pointer.
4567 *
4568 * Context:
4569 * Kernel context.
4570 */
4571 void
4572 ql_vport_destroy(ql_adapter_state_t *ha)
4573 {
4574 ql_adapter_state_t *vha;
4575
4576 QL_PRINT_10(ha, "started\n");
4577
4578 /* Remove port from list. */
4579 ADAPTER_STATE_LOCK(ha);
4580 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
4581 if (vha->vp_next == ha) {
4582 vha->vp_next = ha->vp_next;
4583 break;
4584 }
4585 }
4586 ADAPTER_STATE_UNLOCK(ha);
4587
4588 if (ha->ub_array != NULL) {
4589 kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
4590 }
4591 if (ha->dev != NULL) {
4592 kmem_free(ha->dev, sizeof (*vha->dev) * DEVICE_HEAD_LIST_SIZE);
4593 }
4594 kmem_free(ha, sizeof (ql_adapter_state_t));
4595
4596 QL_PRINT_10(ha, "done\n");
4597 }
4598
4599 /*
4600 * ql_mps_reset
4601 * Reset MPS for FCoE functions.
4602 *
4603 * Input:
4604 * ha = virtual adapter state pointer.
4605 *
4606 * Context:
4607 * Kernel context.
4608 */
4609 static void
4610 ql_mps_reset(ql_adapter_state_t *ha)
4611 {
4612 uint32_t data, dctl = 1000;
4613
4614 do {
4615 if (dctl-- == 0 || ql_wrt_risc_ram_word(ha, 0x7c00, 1) !=
4616 QL_SUCCESS) {
4617 return;
4618 }
4619 if (ql_rd_risc_ram_word(ha, 0x7c00, &data) != QL_SUCCESS) {
4620 (void) ql_wrt_risc_ram_word(ha, 0x7c00, 0);
4621 return;
4622 }
4623 } while (!(data & BIT_0));
4624
4625 if (ql_rd_risc_ram_word(ha, 0x7A15, &data) == QL_SUCCESS) {
4626 dctl = (uint16_t)ql_pci_config_get16(ha, 0x54);
4627 if ((data & 0xe0) < (dctl & 0xe0)) {
4628 data &= 0xff1f;
4629 data |= dctl & 0xe0;
4630 (void) ql_wrt_risc_ram_word(ha, 0x7A15, data);
4631 } else if ((data & 0xe0) != (dctl & 0xe0)) {
4632 data &= 0xff1f;
4633 data |= dctl & 0xe0;
4634 (void) ql_wrt_risc_ram_word(ha, 0x7A15, data);
4635 }
4636 }
4637 (void) ql_wrt_risc_ram_word(ha, 0x7c00, 0);
4638 }
4639
4640 /*
4641 * ql_update_dev
4642 * Updates device status on loop reconfigure.
4643 *
4644 * Input:
4645 * ha: adapter state pointer.
4646 * index: list index of device data.
4647 *
4648 * Context:
4649 * Kernel context.
4650 */
4651 static void
4652 ql_update_dev(ql_adapter_state_t *ha, uint32_t index)
4653 {
4654 ql_link_t *link;
4655 ql_tgt_t *tq;
4656 int rval;
4657
4658 QL_PRINT_3(ha, "started\n");
4659
4660 link = ha->dev[index].first;
4661 while (link != NULL) {
4662 tq = link->base_address;
4663 link = link->next;
4664
4665 if (tq->loop_id & PORT_LOST_ID &&
4666 !(tq->flags & (TQF_INITIATOR_DEVICE | TQF_FABRIC_DEVICE))) {
4667
4668 tq->loop_id &= ~PORT_LOST_ID;
4669
4670 if (VALID_DEVICE_ID(ha, tq->loop_id)) {
4671 /* implicit logo due to fw issue */
4672 rval = ql_get_port_database(ha, tq, PDF_NONE);
4673
4674 if (rval == QL_NOT_LOGGED_IN) {
4675 if (tq->master_state ==
4676 PD_STATE_PORT_UNAVAILABLE) {
4677 (void) ql_logout_fabric_port(
4678 ha, tq);
4679 tq->loop_id = PORT_NO_LOOP_ID;
4680 }
4681 } else if (rval == QL_SUCCESS) {
4682 tq->loop_id = PORT_NO_LOOP_ID;
4683 }
4684 }
4685 } else if (ha->topology & QL_NL_PORT &&
4686 tq->flags & TQF_FABRIC_DEVICE) {
4687
4688 tq->loop_id &= ~PORT_LOST_ID;
4689
4690 if (VALID_DEVICE_ID(ha, tq->loop_id)) {
4691 /* implicit logo due to fw issue */
4692 rval = ql_get_port_database(ha, tq, PDF_NONE);
4693
4694 if (rval == QL_NOT_LOGGED_IN) {
4695 if (tq->master_state ==
4696 PD_STATE_PORT_UNAVAILABLE) {
4697 (void) ql_logout_fabric_port(
4698 ha, tq);
4699 /*
4700 * fabric to AL topo change
4701 */
4702 tq->loop_id = PORT_NO_LOOP_ID;
4703 }
4704 } else if (rval == QL_SUCCESS) {
4705 /*
4706 * Normally this is 7fe,
4707 * Don't issue logo, it causes
4708 * logo in single tgt AL.
4709 */
4710 tq->loop_id = PORT_NO_LOOP_ID;
4711 }
4712 }
4713 }
4714 }
4715
4716 QL_PRINT_3(ha, "done\n");
4717 }