1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright 2015 QLogic Corporation */
23
24 /*
25 * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
26 */
27
28 #pragma ident "Copyright 2015 QLogic Corporation; ql_isr.c"
29
30 /*
31 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
32 *
33 * ***********************************************************************
34 * * **
35 * * NOTICE **
36 * * COPYRIGHT (C) 1996-2015 QLOGIC CORPORATION **
37 * * ALL RIGHTS RESERVED **
38 * * **
39 * ***********************************************************************
40 *
41 */
42
43 #include <ql_apps.h>
44 #include <ql_api.h>
45 #include <ql_debug.h>
46 #include <ql_iocb.h>
47 #include <ql_isr.h>
48 #include <ql_init.h>
49 #include <ql_mbx.h>
50 #include <ql_nx.h>
51 #include <ql_xioctl.h>
52 #include <ql_fm.h>
53
54 /*
55 * Local Function Prototypes.
56 */
57 static void ql_clr_risc_intr(ql_adapter_state_t *);
58 static void ql_handle_uncommon_risc_intr(ql_adapter_state_t *, int, uint32_t,
59 uint64_t *);
60 static void ql_mbx_completion(ql_adapter_state_t *, uint16_t, uint64_t *,
61 uint64_t *);
62 static void ql_async_event(ql_adapter_state_t *, ql_response_q_t *, uint32_t,
63 ql_head_t *, uint64_t *, uint64_t *);
64 static void ql_fast_fcp_post(ql_srb_t *, ql_response_q_t *);
65 static void ql_response_pkt(ql_adapter_state_t *, ql_response_q_t *,
66 ql_head_t *, uint64_t *, uint64_t *);
67 static void ql_error_entry(ql_adapter_state_t *, ql_response_q_t *,
68 response_t *, ql_head_t *, uint64_t *, uint64_t *);
69 static int ql_status_entry(ql_adapter_state_t *, ql_response_q_t *,
70 sts_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
71 static int ql_24xx_status_entry(ql_adapter_state_t *, ql_response_q_t *,
72 sts_24xx_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
73 static int ql_status_error(ql_adapter_state_t *, ql_response_q_t *, ql_srb_t *,
74 sts_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
75 static void ql_status_cont_entry(ql_adapter_state_t *, ql_response_q_t *,
76 sts_cont_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
77 static void ql_ip_entry(ql_adapter_state_t *, ql_response_q_t *, ip_entry_t *,
78 ql_head_t *, uint64_t *, uint64_t *);
79 static void ql_ip_rcv_entry(ql_adapter_state_t *, ql_response_q_t *,
80 ip_rcv_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
81 static void ql_ip_rcv_cont_entry(ql_adapter_state_t *, ql_response_q_t *,
82 ip_rcv_cont_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
83 static void ql_ip_24xx_rcv_entry(ql_adapter_state_t *, ql_response_q_t *,
84 ip_rcv_24xx_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
85 static void ql_ms_entry(ql_adapter_state_t *, ql_response_q_t *, ms_entry_t *,
86 ql_head_t *, uint64_t *, uint64_t *);
87 static void ql_report_id_entry(ql_adapter_state_t *, ql_response_q_t *,
88 report_id_acq_t *, ql_head_t *, uint64_t *, uint64_t *);
89 static void ql_els_passthru_entry(ql_adapter_state_t *, ql_response_q_t *,
90 els_passthru_entry_rsp_t *, ql_head_t *, uint64_t *, uint64_t *);
91 static ql_srb_t *ql_verify_preprocessed_cmd(ql_adapter_state_t *,
92 ql_response_q_t *, uint32_t *, uint32_t *, uint64_t *, uint64_t *);
93 static void ql_signal_abort(ql_adapter_state_t *, uint64_t *);
94
95 /*
96 * ql_disable_intr
97 * Disable interrupts.
98 *
99 * Input:
100 * ha: adapter state pointer.
101 *
102 * Context:
103 * Interrupt or Kernel context, no mailbox commands allowed.
104 */
105 void
106 ql_disable_intr(ql_adapter_state_t *ha)
107 {
108 int i, rval;
109
110 QL_PRINT_10(ha, "started\n");
111
112 if (CFG_IST(ha, CFG_CTRL_82XX)) {
113 ql_8021_disable_intrs(ha);
114 } else {
115 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
116 WRT32_IO_REG(ha, ictrl, 0);
117 (void) RD32_IO_REG(ha, ictrl); /* PCI posting */
118 } else {
119 WRT16_IO_REG(ha, ictrl, 0);
120 (void) RD16_IO_REG(ha, ictrl); /* PCI posting */
121 }
122 }
123 if (ha->intr_cap & DDI_INTR_FLAG_MASKABLE) {
124 for (i = 0; i < ha->intr_cnt; i++) {
125 QL_PRINT_10(ha, "intr_set_mask %d\n", i);
126 if ((rval = ddi_intr_set_mask(ha->htable[i])) !=
127 DDI_SUCCESS) {
128 EL(ha, "intr_set_mask status=%xh\n", rval);
129 }
130 }
131 }
132 ADAPTER_STATE_LOCK(ha);
133 ha->flags &= ~INTERRUPTS_ENABLED;
134 ADAPTER_STATE_UNLOCK(ha);
135
136 QL_PRINT_10(ha, "done\n");
137 }
138
139 /*
140 * ql_enaable_intr
141 * Enable interrupts.
142 *
143 * Input:
144 * ha: adapter state pointer.
145 *
146 * Context:
147 * Interrupt or Kernel context, no mailbox commands allowed.
148 */
149 void
150 ql_enable_intr(ql_adapter_state_t *ha)
151 {
152 int i, rval;
153
154 QL_PRINT_10(ha, "started\n");
155
156 if (CFG_IST(ha, CFG_CTRL_82XX)) {
157 ql_8021_enable_intrs(ha);
158 } else {
159 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
160 WRT32_IO_REG(ha, ictrl, ISP_EN_RISC);
161 (void) RD32_IO_REG(ha, ictrl); /* PCI posting */
162 } else {
163 WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC);
164 (void) RD16_IO_REG(ha, ictrl); /* PCI posting */
165 }
166 }
167 if (ha->intr_cap & DDI_INTR_FLAG_MASKABLE) {
168 for (i = 0; i < ha->intr_cnt; i++) {
169 QL_PRINT_10(ha, "intr_clr_mask %d\n", i);
170 if ((rval = ddi_intr_clr_mask(ha->htable[i])) !=
171 DDI_SUCCESS) {
172 EL(ha, "intr_clr_mask status=%xh\n", rval);
173 }
174 }
175 }
176 ADAPTER_STATE_LOCK(ha);
177 ha->flags |= INTERRUPTS_ENABLED;
178 ADAPTER_STATE_UNLOCK(ha);
179
180 QL_PRINT_10(ha, "done\n");
181 }
182
183 /*
184 * ql_clr_risc_intr
185 * Clear firmware interrupt.
186 *
187 * Input:
188 * ha: adapter state pointer.
189 *
190 * Context:
191 * Interrupt or Kernel context, no mailbox commands allowed.
192 */
193 static void
194 ql_clr_risc_intr(ql_adapter_state_t *ha)
195 {
196 QL_PRINT_3(ha, "started\n");
197
198 if (CFG_IST(ha, CFG_CTRL_82XX)) {
199 ql_8021_clr_fw_intr(ha);
200 } else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
201 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
202 RD32_IO_REG(ha, hccr); /* PCI posting. */
203 } else {
204 WRT16_IO_REG(ha, semaphore, 0);
205 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
206 RD16_IO_REG(ha, hccr); /* PCI posting. */
207 }
208
209 QL_PRINT_3(ha, "done\n");
210 }
211
212 /*
213 * ql_isr
214 * Process all INTX intr types.
215 *
216 * Input:
217 * arg1: adapter state pointer.
218 *
219 * Returns:
220 * DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
221 *
222 * Context:
223 * Interrupt or Kernel context, no mailbox commands allowed.
224 */
225 /* ARGSUSED */
226 uint_t
227 ql_isr(caddr_t arg1)
228 {
229 return (ql_isr_aif(arg1, 0));
230 }
231
232 /*
233 * ql_isr_aif
234 * Process mailbox and I/O command completions.
235 *
236 * Input:
237 * arg: adapter state pointer.
238 * arg2: interrupt vector.
239 *
240 * Returns:
241 * DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
242 *
243 * Context:
244 * Interrupt or Kernel context, no mailbox commands allowed.
245 */
246 /* ARGSUSED */
247 uint_t
248 ql_isr_aif(caddr_t arg, caddr_t arg2)
249 {
250 uint32_t mbx, stat;
251 ql_adapter_state_t *ha = (void *)arg;
252 uint64_t set_flags = 0, reset_flags = 0;
253 ql_head_t isr_done_q = {NULL, NULL};
254 uint_t rval = DDI_INTR_UNCLAIMED;
255 ql_response_q_t *rsp_q = NULL;
256 int intr, index = (int)((uintptr_t)arg2);
257
258 QL_PRINT_3(ha, "started, index=%d\n", index);
259
260 /* Exit if not attached. */
261 if (ha == NULL || ha->intr_pri == NULL) {
262 EL(ha, "ha=%p, intr_pri=%p not attached\n", (void *)ha,
263 ha != NULL ? ha->intr_pri : NULL);
264 return (DDI_INTR_UNCLAIMED);
265 }
266
267 /* Exit if chip not powered up. */
268 if (ha->power_level != PM_LEVEL_D0) {
269 EL(ha, "power down exit\n");
270 return (DDI_INTR_UNCLAIMED);
271 }
272 QL_PM_LOCK(ha);
273 ha->pm_busy++;
274 QL_PM_UNLOCK(ha);
275
276 /* Acquire interrupt lock. */
277 if (index > ha->rsp_queues_cnt) {
278 intr = index = 0;
279 } else if (index) {
280 intr = index - 1;
281 } else {
282 intr = 0;
283 }
284 INDX_INTR_LOCK(ha, intr);
285
286 if (index && ha->flags & NO_INTR_HANDSHAKE) {
287 QL_PRINT_3(ha, "MULTI_Q_RSP_UPDATE, index=%xh\n", index);
288 index--;
289 if (index < ha->rsp_queues_cnt) {
290 rsp_q = ha->rsp_queues[index];
291 }
292 if (rsp_q == NULL) {
293 EL(ha, "unsupported MULTI_Q_RSP_UPDATE, index=%d\n",
294 index);
295 rsp_q = ha->rsp_queues[0];
296 }
297
298 if (ha->flags & QUEUE_SHADOW_PTRS) {
299 (void) ddi_dma_sync(rsp_q->rsp_ring.dma_handle,
300 (off_t)rsp_q->rsp_in_shadow_ofst,
301 SHADOW_ENTRY_SIZE, DDI_DMA_SYNC_FORCPU);
302 mbx = ddi_get32(rsp_q->rsp_ring.acc_handle,
303 rsp_q->rsp_in_shadow_ptr);
304 } else {
305 mbx = RD32_MBAR_REG(ha, rsp_q->mbar_rsp_in);
306 }
307
308 if (mbx != rsp_q->rsp_ring_index) {
309 rsp_q->isp_rsp_index = (uint16_t)mbx;
310 ql_response_pkt(ha, rsp_q, &isr_done_q,
311 &set_flags, &reset_flags);
312 /* PCI posting */
313 (void) RD32_MBAR_REG(ha, rsp_q->mbar_rsp_in);
314 } else if (ha->flags & INTERRUPTS_ENABLED) {
315 /*EMPTY*/
316 QL_PRINT_3(ha, "MULTI_Q_RSP_UPDATE mbar_rsp_in "
317 "same as before\n");
318 }
319
320 /* Set interrupt claimed status. */
321 rval = DDI_INTR_CLAIMED;
322
323 } else if (CFG_IST(ha, CFG_CTRL_22XX)) {
324 rsp_q = ha->rsp_queues[0];
325 if (RD16_IO_REG(ha, istatus) & RISC_INT) {
326 rval = DDI_INTR_CLAIMED;
327
328 /* Check for mailbox interrupt. */
329 stat = RD16_IO_REG(ha, semaphore);
330 if (stat & BIT_0) {
331 /* Get mailbox data. */
332 mbx = RD16_IO_REG(ha, mailbox_out[0]);
333 if (mbx > 0x3fff && mbx < 0x8000) {
334 ql_mbx_completion(ha, mbx,
335 &set_flags, &reset_flags);
336 } else if (mbx > 0x7fff && mbx < 0xc000) {
337 ql_async_event(ha, rsp_q, mbx,
338 &isr_done_q, &set_flags,
339 &reset_flags);
340 } else {
341 EL(ha, "22XX unknown interrupt type\n");
342 }
343 } else {
344 rsp_q->isp_rsp_index = RD16_IO_REG(ha, resp_in);
345 if (rsp_q->isp_rsp_index !=
346 rsp_q->rsp_ring_index) {
347 ql_response_pkt(ha, rsp_q,
348 &isr_done_q, &set_flags,
349 &reset_flags);
350 } else {
351 /*EMPTY*/
352 QL_PRINT_10(ha, "22XX isp_rsp_index "
353 "same as before\n");
354 }
355 }
356 /* Clear RISC interrupt */
357 ql_clr_risc_intr(ha);
358 }
359 } else {
360 if (CFG_IST(ha, CFG_CTRL_82XX)) {
361 ql_8021_clr_hw_intr(ha);
362 }
363
364 if (((stat = RD32_IO_REG(ha, risc2host)) & RH_RISC_INT) == 0) {
365 QL_PRINT_10(ha, "done, index=%d, no interrupt "
366 "stat=%xh\n", index, stat);
367 rval = DDI_INTR_UNCLAIMED;
368 } else if (ha->ql_dump_state & QL_DUMPING) {
369 EL(ha, "fw_dump, index=%d, active stat=%xh\n",
370 index, stat);
371 rval = DDI_INTR_CLAIMED;
372 } else if (CFG_IST(ha, CFG_CTRL_82XX) &&
373 RD32_IO_REG(ha, nx_risc_int) == 0) {
374 QL_PRINT_10(ha, "done, index=%d, no nx_risc_int "
375 "stat=%xh\n", index, stat);
376 rval = DDI_INTR_UNCLAIMED;
377 } else {
378 rval = DDI_INTR_CLAIMED;
379 QL_PRINT_3(ha, "index=%d, interrupt stat=%xh\n",
380 index, stat);
381
382 /* Capture FW defined interrupt info */
383 mbx = MSW(stat);
384
385 if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
386 != DDI_FM_OK) {
387 qlc_fm_report_err_impact(ha,
388 QL_FM_EREPORT_ACC_HANDLE_CHECK);
389 }
390
391 switch (stat & 0x1ff) {
392 case ROM_MBX_SUCCESS:
393 case ROM_MBX_ERR:
394 ql_mbx_completion(ha, mbx, &set_flags,
395 &reset_flags);
396 break;
397
398 case MBX_SUCCESS:
399 case MBX_ERR:
400 ql_mbx_completion(ha, mbx, &set_flags,
401 &reset_flags);
402 break;
403
404 case ASYNC_EVENT:
405 ql_async_event(ha, ha->rsp_queues[0],
406 (uint32_t)mbx, &isr_done_q,
407 &set_flags, &reset_flags);
408 break;
409
410 case MULTI_Q_RSP_UPDATE:
411 QL_PRINT_3(ha, "MULTI_Q_RSP_UPDATE mbx=%xh\n",
412 mbx);
413 if (mbx < ha->rsp_queues_cnt) {
414 rsp_q = ha->rsp_queues[mbx];
415 }
416 if (rsp_q == NULL) {
417 EL(ha, "unsupported MULTI_Q_RSP_UPDATE"
418 " mbx=%d\n", mbx);
419 rsp_q = ha->rsp_queues[0];
420 }
421 if (ha->flags & QUEUE_SHADOW_PTRS) {
422 (void) ddi_dma_sync(
423 rsp_q->rsp_ring.dma_handle,
424 (off_t)rsp_q->rsp_in_shadow_ofst,
425 SHADOW_ENTRY_SIZE,
426 DDI_DMA_SYNC_FORCPU);
427 mbx = ddi_get32(
428 rsp_q->rsp_ring.acc_handle,
429 rsp_q->rsp_in_shadow_ptr);
430 } else {
431 mbx = RD32_MBAR_REG(ha,
432 rsp_q->mbar_rsp_in);
433 }
434 /* FALLTHRU */
435
436 case RESP_UPDATE:
437 /* Clear RISC interrupt */
438 ql_clr_risc_intr(ha);
439
440 if (rsp_q == NULL) {
441 rsp_q = ha->rsp_queues[0];
442 }
443 if (mbx != rsp_q->rsp_ring_index) {
444 rsp_q->isp_rsp_index = (uint16_t)mbx;
445 ql_response_pkt(ha, rsp_q, &isr_done_q,
446 &set_flags, &reset_flags);
447 } else {
448 /*EMPTY*/
449 QL_PRINT_3(ha, "response "
450 "ring index same as before\n");
451 }
452 break;
453
454 case SCSI_FAST_POST_16:
455 stat = (stat & 0xffff0000) | MBA_CMPLT_1_16BIT;
456 ql_async_event(ha, ha->rsp_queues[0],
457 stat, &isr_done_q, &set_flags,
458 &reset_flags);
459 break;
460
461 case SCSI_FAST_POST_32:
462 stat = (stat & 0xffff0000) | MBA_CMPLT_1_32BIT;
463 ql_async_event(ha, ha->rsp_queues[0],
464 stat, &isr_done_q, &set_flags,
465 &reset_flags);
466 break;
467
468 case CTIO_FAST_POST:
469 stat = (stat & 0xffff0000) |
470 MBA_CTIO_COMPLETION;
471 ql_async_event(ha, ha->rsp_queues[0],
472 stat, &isr_done_q, &set_flags,
473 &reset_flags);
474 break;
475
476 case IP_FAST_POST_XMT:
477 stat = (stat & 0xffff0000) | MBA_IP_COMPLETION;
478 ql_async_event(ha, ha->rsp_queues[0],
479 stat, &isr_done_q, &set_flags,
480 &reset_flags);
481 break;
482
483 case IP_FAST_POST_RCV:
484 stat = (stat & 0xffff0000) | MBA_IP_RECEIVE;
485 ql_async_event(ha, ha->rsp_queues[0],
486 stat, &isr_done_q, &set_flags,
487 &reset_flags);
488 break;
489
490 case IP_FAST_POST_BRD:
491 stat = (stat & 0xffff0000) | MBA_IP_BROADCAST;
492 ql_async_event(ha, ha->rsp_queues[0],
493 stat, &isr_done_q, &set_flags,
494 &reset_flags);
495 break;
496
497 case IP_FAST_POST_RCV_ALN:
498 stat = (stat & 0xffff0000) |
499 MBA_IP_HDR_DATA_SPLIT;
500 ql_async_event(ha, ha->rsp_queues[0],
501 stat, &isr_done_q, &set_flags,
502 &reset_flags);
503 break;
504
505 case ATIO_UPDATE:
506 EL(ha, "unsupported ATIO queue update"
507 " interrupt, status=%xh\n", stat);
508 break;
509
510 case ATIO_RESP_UPDATE:
511 EL(ha, "unsupported ATIO response queue "
512 "update interrupt, status=%xh\n", stat);
513 break;
514
515 default:
516 ql_handle_uncommon_risc_intr(ha, intr, stat,
517 &set_flags);
518 break;
519 }
520 }
521
522 /* Clear RISC interrupt */
523 if (rval == DDI_INTR_CLAIMED && rsp_q == NULL) {
524 ql_clr_risc_intr(ha);
525 }
526
527 /* A0 chip delay */
528 if (CFG_IST(ha, CFG_CTRL_83XX) && ha->rev_id == 1 &&
529 ha->iflags & (IFLG_INTR_LEGACY | IFLG_INTR_FIXED)) {
530 drv_usecwait(4);
531 }
532 }
533
534 /* Process claimed interrupts during polls. */
535 if (rval == DDI_INTR_UNCLAIMED && ha->intr_claimed == B_TRUE) {
536 ha->intr_claimed = B_FALSE;
537 rval = DDI_INTR_CLAIMED;
538 }
539
540 /* Release interrupt lock. */
541 INDX_INTR_UNLOCK(ha, intr);
542
543 if (set_flags || reset_flags) {
544 ql_awaken_task_daemon(ha, NULL, set_flags, reset_flags);
545 }
546
547 if (isr_done_q.first != NULL) {
548 ql_done(isr_done_q.first, B_FALSE);
549 }
550
551 QL_PM_LOCK(ha);
552 if (ha->pm_busy) {
553 ha->pm_busy--;
554 }
555 QL_PM_UNLOCK(ha);
556
557 if (rval == DDI_INTR_CLAIMED) {
558 QL_PRINT_3(ha, "done\n");
559 ha->idle_timer = 0;
560 ha->xioctl->TotalInterrupts++;
561 } else {
562 /*EMPTY*/
563 QL_PRINT_10(ha, "interrupt not claimed\n");
564 }
565
566 return (rval);
567 }
568
569 /*
570 * ql_handle_uncommon_risc_intr
571 * Handle an uncommon RISC interrupt.
572 *
573 * Input:
574 * ha: adapter state pointer.
575 * intr: interrupt index.
576 * stat: interrupt status
577 * set_flags: task daemon flags to set.
578 *
579 * Context:
580 * Interrupt or Kernel context, no mailbox commands allowed.
581 */
582 static void
583 ql_handle_uncommon_risc_intr(ql_adapter_state_t *ha, int intr, uint32_t stat,
584 uint64_t *set_flags)
585 {
586 uint16_t hccr_reg;
587
588 hccr_reg = RD16_IO_REG(ha, hccr);
589
590 if (stat & RH_RISC_PAUSED ||
591 (hccr_reg & (BIT_15 | BIT_13 | BIT_11 | BIT_8))) {
592
593 ADAPTER_STATE_LOCK(ha);
594 ha->flags |= PARITY_ERROR;
595 ADAPTER_STATE_UNLOCK(ha);
596
597 if (ha->parity_pause_errors == 0 ||
598 ha->parity_hccr_err != hccr_reg ||
599 ha->parity_stat_err != stat) {
600 cmn_err(CE_WARN, "qlc(%d): isr, Internal Parity/"
601 "Pause Error - hccr=%xh, stat=%xh, count=%d",
602 ha->instance, hccr_reg, stat,
603 ha->parity_pause_errors);
604 ha->parity_hccr_err = hccr_reg;
605 ha->parity_stat_err = stat;
606 }
607
608 EL(ha, "parity/pause error, isp_abort_needed\n");
609
610 INDX_INTR_UNLOCK(ha, intr);
611 if (ql_binary_fw_dump(ha, FALSE) != QL_SUCCESS) {
612 ql_reset_chip(ha);
613 }
614 INDX_INTR_LOCK(ha, intr);
615
616 if (ha->parity_pause_errors == 0) {
617 ha->log_parity_pause = B_TRUE;
618 }
619
620 if (ha->parity_pause_errors < 0xffffffff) {
621 ha->parity_pause_errors++;
622 }
623
624 *set_flags |= ISP_ABORT_NEEDED;
625
626 /* Disable ISP interrupts. */
627 ql_disable_intr(ha);
628 } else {
629 EL(ha, "UNKNOWN interrupt status=%xh, hccr=%xh\n",
630 stat, hccr_reg);
631 }
632 }
633
634 /*
635 * ql_mbx_completion
636 * Processes mailbox completions.
637 *
638 * Input:
639 * ha: adapter state pointer.
640 * mb0: Mailbox 0 contents.
641 * set_flags: task daemon flags to set.
642 * reset_flags: task daemon flags to reset.
643 *
644 * Context:
645 * Interrupt context.
646 */
647 /* ARGSUSED */
648 static void
649 ql_mbx_completion(ql_adapter_state_t *ha, uint16_t mb0, uint64_t *set_flags,
650 uint64_t *reset_flags)
651 {
652 uint32_t index;
653 uint16_t cnt;
654
655 QL_PRINT_3(ha, "started\n");
656
657 /* Load return mailbox registers. */
658 MBX_REGISTER_LOCK(ha);
659
660 if (ha->mcp != NULL) {
661 ha->mcp->mb[0] = mb0;
662 index = ha->mcp->in_mb & ~MBX_0;
663
664 for (cnt = 1; cnt < MAX_MBOX_COUNT && index != 0; cnt++) {
665 index >>= 1;
666 if (index & MBX_0) {
667 ha->mcp->mb[cnt] = RD16_IO_REG(ha,
668 mailbox_out[cnt]);
669 }
670 }
671
672 } else {
673 EL(ha, "mcp == NULL\n");
674 }
675
676 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_INTERRUPT);
677 if (ha->flags & INTERRUPTS_ENABLED) {
678 cv_broadcast(&ha->cv_mbx_intr);
679 }
680
681 MBX_REGISTER_UNLOCK(ha);
682
683 QL_PRINT_3(ha, "done\n");
684 }
685
686 /*
687 * ql_async_event
688 * Processes asynchronous events.
689 *
690 * Input:
691 * ha: adapter state pointer.
692 * rsp_q: response queue structure pointer.
693 * mbx: Mailbox 0 register.
694 * done_q: head pointer to done queue.
695 * set_flags: task daemon flags to set.
696 * reset_flags: task daemon flags to reset.
697 *
698 * Context:
699 * Interrupt or Kernel context, no mailbox commands allowed.
700 */
701 static void
702 ql_async_event(ql_adapter_state_t *ha, ql_response_q_t *rsp_q, uint32_t mbx,
703 ql_head_t *done_q, uint64_t *set_flags, uint64_t *reset_flags)
704 {
705 uint32_t index, handles[5];
706 uint16_t cnt, handle_cnt, mb[MAX_MBOX_COUNT];
707 ql_srb_t *sp;
708 port_id_t s_id;
709 ql_tgt_t *tq;
710 ql_adapter_state_t *vha;
711
712 QL_PRINT_3(ha, "started\n");
713
714 /* Setup to process fast completion. */
715 mb[0] = LSW(mbx);
716 switch (mb[0]) {
717 case MBA_SCSI_COMPLETION:
718 handles[0] = SHORT_TO_LONG(RD16_IO_REG(ha, mailbox_out[1]),
719 RD16_IO_REG(ha, mailbox_out[2]));
720 handle_cnt = 1;
721 break;
722
723 case MBA_CMPLT_1_16BIT:
724 handles[0] = MSW(mbx);
725 handle_cnt = 1;
726 mb[0] = MBA_SCSI_COMPLETION;
727 break;
728
729 case MBA_CMPLT_2_16BIT:
730 handles[0] = (uint32_t)RD16_IO_REG(ha, mailbox_out[1]);
731 handles[1] = (uint32_t)RD16_IO_REG(ha, mailbox_out[2]);
732 handle_cnt = 2;
733 mb[0] = MBA_SCSI_COMPLETION;
734 break;
735
736 case MBA_CMPLT_3_16BIT:
737 handles[0] = (uint32_t)RD16_IO_REG(ha, mailbox_out[1]);
738 handles[1] = (uint32_t)RD16_IO_REG(ha, mailbox_out[2]);
739 handles[2] = (uint32_t)RD16_IO_REG(ha, mailbox_out[3]);
740 handle_cnt = 3;
741 mb[0] = MBA_SCSI_COMPLETION;
742 break;
743
744 case MBA_CMPLT_4_16BIT:
745 handles[0] = (uint32_t)RD16_IO_REG(ha, mailbox_out[1]);
746 handles[1] = (uint32_t)RD16_IO_REG(ha, mailbox_out[2]);
747 handles[2] = (uint32_t)RD16_IO_REG(ha, mailbox_out[3]);
748 handles[3] = (uint32_t)RD16_IO_REG(ha, mailbox_out[6]);
749 handle_cnt = 4;
750 mb[0] = MBA_SCSI_COMPLETION;
751 break;
752
753 case MBA_CMPLT_5_16BIT:
754 handles[0] = (uint32_t)RD16_IO_REG(ha, mailbox_out[1]);
755 handles[1] = (uint32_t)RD16_IO_REG(ha, mailbox_out[2]);
756 handles[2] = (uint32_t)RD16_IO_REG(ha, mailbox_out[3]);
757 handles[3] = (uint32_t)RD16_IO_REG(ha, mailbox_out[6]);
758 handles[4] = (uint32_t)RD16_IO_REG(ha, mailbox_out[7]);
759 handle_cnt = 5;
760 mb[0] = MBA_SCSI_COMPLETION;
761 break;
762
763 case MBA_CMPLT_1_32BIT:
764 handles[0] = SHORT_TO_LONG(MSW(mbx),
765 RD16_IO_REG(ha, mailbox_out[2]));
766 handle_cnt = 1;
767 mb[0] = MBA_SCSI_COMPLETION;
768 break;
769
770 case MBA_CMPLT_2_32BIT:
771 handles[0] = SHORT_TO_LONG(
772 RD16_IO_REG(ha, mailbox_out[1]),
773 RD16_IO_REG(ha, mailbox_out[2]));
774 handles[1] = SHORT_TO_LONG(
775 RD16_IO_REG(ha, mailbox_out[6]),
776 RD16_IO_REG(ha, mailbox_out[7]));
777 handle_cnt = 2;
778 mb[0] = MBA_SCSI_COMPLETION;
779 break;
780
781 case MBA_CTIO_COMPLETION:
782 case MBA_IP_COMPLETION:
783 handles[0] = CFG_IST(ha, CFG_CTRL_22XX) ? SHORT_TO_LONG(
784 RD16_IO_REG(ha, mailbox_out[1]),
785 RD16_IO_REG(ha, mailbox_out[2])) :
786 SHORT_TO_LONG(MSW(mbx), RD16_IO_REG(ha, mailbox_out[2]));
787 handle_cnt = 1;
788 mb[0] = MBA_SCSI_COMPLETION;
789 break;
790
791 default:
792 break;
793 }
794
795 /* Handle asynchronous event */
796 switch (mb[0]) {
797 case MBA_SCSI_COMPLETION:
798 QL_PRINT_5(ha, "Fast post completion\n");
799
800 if ((ha->flags & ONLINE) == 0) {
801 break;
802 }
803
804 for (cnt = 0; cnt < handle_cnt; cnt++) {
805 QL_PRINT_5(ha, "Fast post completion, handle=%xh\n",
806 handles[cnt]);
807
808 /* Get handle. */
809 index = handles[cnt] & OSC_INDEX_MASK;
810
811 /* Validate handle. */
812 sp = index < ha->osc_max_cnt ?
813 ha->outstanding_cmds[index] : NULL;
814
815 if (sp == QL_ABORTED_SRB(ha)) {
816 EL(ha, "QL_ABORTED_SRB handle=%xh\n",
817 handles[cnt]);
818 ha->outstanding_cmds[index] = NULL;
819 continue;
820 }
821 if (sp != NULL && sp->handle == handles[cnt]) {
822 ha->outstanding_cmds[index] = NULL;
823 sp->handle = 0;
824 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
825
826 /* Set completed status. */
827 sp->flags |= SRB_ISP_COMPLETED;
828
829 /* Set completion status */
830 sp->pkt->pkt_reason = CS_COMPLETE;
831
832 if (!(sp->flags & SRB_FCP_CMD_PKT)) {
833 /* Place block on done queue */
834 ql_add_link_b(done_q, &sp->cmd);
835 } else {
836 ql_fast_fcp_post(sp, rsp_q);
837 }
838 } else if (handles[cnt] != QL_FCA_BRAND) {
839 if (sp == NULL) {
840 EL(ha, "%xh unknown IOCB handle=%xh\n",
841 mb[0], handles[cnt]);
842 } else {
843 EL(ha, "%xh mismatch IOCB handle "
844 "pkt=%xh, sp=%xh\n", mb[0],
845 handles[cnt], sp->handle);
846 }
847
848 EL(ha, "%xh Fast post, mbx1=%xh, mbx2=%xh, "
849 "mbx3=%xh, mbx6=%xh, mbx7=%xh\n", mb[0],
850 RD16_IO_REG(ha, mailbox_out[1]),
851 RD16_IO_REG(ha, mailbox_out[2]),
852 RD16_IO_REG(ha, mailbox_out[3]),
853 RD16_IO_REG(ha, mailbox_out[6]),
854 RD16_IO_REG(ha, mailbox_out[7]));
855
856 ADAPTER_STATE_LOCK(ha);
857 ha->flags |= FW_DUMP_NEEDED;
858 ADAPTER_STATE_UNLOCK(ha);
859
860 if (!(ha->task_daemon_flags &
861 ISP_ABORT_NEEDED)) {
862 EL(ha, "%xh ISP Invalid handle, "
863 "isp_abort_needed\n", mb[0]);
864 *set_flags |= ISP_ABORT_NEEDED;
865 }
866 }
867 }
868 break;
869
870 case MBA_RESET: /* Reset */
871 EL(ha, "%xh Reset received\n", mb[0]);
872 *set_flags |= MARKER_NEEDED;
873 break;
874
875 case MBA_SYSTEM_ERR: /* System Error */
876 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
877 mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
878 mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
879 mb[7] = RD16_IO_REG(ha, mailbox_out[7]);
880
881 EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx1=%xh, "
882 "mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh, mbx6=%xh,\n "
883 "mbx7=%xh, mbx8=%xh, mbx9=%xh, mbx10=%xh, mbx11=%xh, "
884 "mbx12=%xh,\n", mb[0], mb[1], mb[2], mb[3],
885 RD16_IO_REG(ha, mailbox_out[4]),
886 RD16_IO_REG(ha, mailbox_out[5]),
887 RD16_IO_REG(ha, mailbox_out[6]), mb[7],
888 RD16_IO_REG(ha, mailbox_out[8]),
889 RD16_IO_REG(ha, mailbox_out[9]),
890 RD16_IO_REG(ha, mailbox_out[10]),
891 RD16_IO_REG(ha, mailbox_out[11]),
892 RD16_IO_REG(ha, mailbox_out[12]));
893
894 EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx13=%xh, "
895 "mbx14=%xh, mbx15=%xh, mbx16=%xh, mbx17=%xh, mbx18=%xh,\n"
896 "mbx19=%xh, mbx20=%xh, mbx21=%xh, mbx22=%xh, mbx23=%xh\n",
897 mb[0], RD16_IO_REG(ha, mailbox_out[13]),
898 RD16_IO_REG(ha, mailbox_out[14]),
899 RD16_IO_REG(ha, mailbox_out[15]),
900 RD16_IO_REG(ha, mailbox_out[16]),
901 RD16_IO_REG(ha, mailbox_out[17]),
902 RD16_IO_REG(ha, mailbox_out[18]),
903 RD16_IO_REG(ha, mailbox_out[19]),
904 RD16_IO_REG(ha, mailbox_out[20]),
905 RD16_IO_REG(ha, mailbox_out[21]),
906 RD16_IO_REG(ha, mailbox_out[22]),
907 RD16_IO_REG(ha, mailbox_out[23]));
908
909 if (ha->reg_off->mbox_cnt > 24) {
910 EL(ha, "%xh ISP System Error, mbx24=%xh, mbx25=%xh, "
911 "mbx26=%xh,\n mbx27=%xh, mbx28=%xh, mbx29=%xh, "
912 "mbx30=%xh, mbx31=%xh\n", mb[0],
913 RD16_IO_REG(ha, mailbox_out[24]),
914 RD16_IO_REG(ha, mailbox_out[25]),
915 RD16_IO_REG(ha, mailbox_out[26]),
916 RD16_IO_REG(ha, mailbox_out[27]),
917 RD16_IO_REG(ha, mailbox_out[28]),
918 RD16_IO_REG(ha, mailbox_out[29]),
919 RD16_IO_REG(ha, mailbox_out[30]),
920 RD16_IO_REG(ha, mailbox_out[31]));
921 }
922
923 ADAPTER_STATE_LOCK(ha);
924 ha->flags |= FW_DUMP_NEEDED;
925 ADAPTER_STATE_UNLOCK(ha);
926
927 /* Signal task daemon to store error log. */
928 if (ha->errlog[0] == 0) {
929 ha->errlog[3] = mb[3];
930 ha->errlog[2] = mb[2];
931 ha->errlog[1] = mb[1];
932 ha->errlog[0] = FLASH_ERRLOG_AEN_8002;
933 }
934
935 if (CFG_IST(ha, CFG_CTRL_81XX) && mb[7] & SE_MPI_RISC) {
936 ADAPTER_STATE_LOCK(ha);
937 ha->flags |= MPI_RESET_NEEDED;
938 ADAPTER_STATE_UNLOCK(ha);
939 }
940
941 *set_flags |= ISP_ABORT_NEEDED;
942 ha->xioctl->ControllerErrorCount++;
943 break;
944
945 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
946 EL(ha, "%xh Request Transfer Error received, "
947 "isp_abort_needed\n", mb[0]);
948
949 /* Signal task daemon to store error log. */
950 if (ha->errlog[0] == 0) {
951 ha->errlog[3] = RD16_IO_REG(ha, mailbox_out[3]);
952 ha->errlog[2] = RD16_IO_REG(ha, mailbox_out[2]);
953 ha->errlog[1] = RD16_IO_REG(ha, mailbox_out[1]);
954 ha->errlog[0] = FLASH_ERRLOG_AEN_8003;
955 }
956
957 *set_flags |= ISP_ABORT_NEEDED;
958 ha->xioctl->ControllerErrorCount++;
959
960 (void) qlc_fm_report_err_impact(ha,
961 QL_FM_EREPORT_MBA_REQ_TRANSFER_ERR);
962
963 break;
964
965 case MBA_RSP_TRANSFER_ERR: /* Response Xfer Err */
966 EL(ha, "%xh Response Transfer Error received,"
967 " isp_abort_needed\n", mb[0]);
968
969 /* Signal task daemon to store error log. */
970 if (ha->errlog[0] == 0) {
971 ha->errlog[3] = RD16_IO_REG(ha, mailbox_out[3]);
972 ha->errlog[2] = RD16_IO_REG(ha, mailbox_out[2]);
973 ha->errlog[1] = RD16_IO_REG(ha, mailbox_out[1]);
974 ha->errlog[0] = FLASH_ERRLOG_AEN_8004;
975 }
976
977 *set_flags |= ISP_ABORT_NEEDED;
978 ha->xioctl->ControllerErrorCount++;
979
980 (void) qlc_fm_report_err_impact(ha,
981 QL_FM_EREPORT_MBA_RSP_TRANSFER_ERR);
982
983 break;
984
985 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
986 EL(ha, "%xh Request Queue Wake-up "
987 "received, mbx1=%xh\n", mb[0],
988 RD16_IO_REG(ha, mailbox_out[1]));
989 break;
990
991 case MBA_MENLO_ALERT: /* Menlo Alert Notification */
992 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
993 mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
994 mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
995
996 EL(ha, "%xh Menlo Alert Notification received, mbx1=%xh,"
997 " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
998
999 switch (mb[1]) {
1000 case MLA_LOGIN_OPERATIONAL_FW:
1001 ADAPTER_STATE_LOCK(ha);
1002 ha->flags |= MENLO_LOGIN_OPERATIONAL;
1003 ADAPTER_STATE_UNLOCK(ha);
1004 break;
1005 case MLA_PANIC_RECOVERY:
1006 case MLA_LOGIN_DIAGNOSTIC_FW:
1007 case MLA_LOGIN_GOLDEN_FW:
1008 case MLA_REJECT_RESPONSE:
1009 default:
1010 break;
1011 }
1012 break;
1013
1014 case MBA_LIP_F8: /* Received a LIP F8. */
1015 case MBA_LIP_RESET: /* LIP reset occurred. */
1016 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
1017 if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
1018 EL(ha, "%xh DCBX_STARTED received, mbx1=%xh, mbx2=%xh"
1019 "\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1020 RD16_IO_REG(ha, mailbox_out[2]));
1021 } else {
1022 EL(ha, "%xh LIP received\n", mb[0]);
1023 }
1024
1025 ADAPTER_STATE_LOCK(ha);
1026 ha->flags &= ~POINT_TO_POINT;
1027 ADAPTER_STATE_UNLOCK(ha);
1028
1029 if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1030 *set_flags |= LOOP_DOWN;
1031 }
1032 ql_port_state(ha, FC_STATE_OFFLINE,
1033 FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1034
1035 if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1036 ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1037 }
1038
1039 ha->adapter_stats->lip_count++;
1040
1041 /* Update AEN queue. */
1042 ha->xioctl->TotalLipResets++;
1043 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1044 ql_enqueue_aen(ha, mb[0], NULL);
1045 }
1046 break;
1047
1048 case MBA_LOOP_UP:
1049 if (!CFG_IST(ha, CFG_CTRL_22XX)) {
1050 ha->iidma_rate = RD16_IO_REG(ha, mailbox_out[1]);
1051 if (ha->iidma_rate == IIDMA_RATE_1GB) {
1052 ha->state = FC_PORT_STATE_MASK(
1053 ha->state) | FC_STATE_1GBIT_SPEED;
1054 index = 1;
1055 } else if (ha->iidma_rate == IIDMA_RATE_2GB) {
1056 ha->state = FC_PORT_STATE_MASK(
1057 ha->state) | FC_STATE_2GBIT_SPEED;
1058 index = 2;
1059 } else if (ha->iidma_rate == IIDMA_RATE_4GB) {
1060 ha->state = FC_PORT_STATE_MASK(
1061 ha->state) | FC_STATE_4GBIT_SPEED;
1062 index = 4;
1063 } else if (ha->iidma_rate == IIDMA_RATE_8GB) {
1064 ha->state = FC_PORT_STATE_MASK(
1065 ha->state) | FC_STATE_8GBIT_SPEED;
1066 index = 8;
1067 } else if (ha->iidma_rate == IIDMA_RATE_10GB) {
1068 ha->state = FC_PORT_STATE_MASK(
1069 ha->state) | FC_STATE_10GBIT_SPEED;
1070 index = 10;
1071 } else if (ha->iidma_rate == IIDMA_RATE_16GB) {
1072 ha->state = FC_PORT_STATE_MASK(
1073 ha->state) | FC_STATE_16GBIT_SPEED;
1074 index = 16;
1075 } else if (ha->iidma_rate == IIDMA_RATE_32GB) {
1076 ha->state = FC_PORT_STATE_MASK(
1077 ha->state) | FC_STATE_32GBIT_SPEED;
1078 index = 32;
1079 } else {
1080 ha->state = FC_PORT_STATE_MASK(
1081 ha->state);
1082 index = 0;
1083 }
1084 } else {
1085 ha->iidma_rate = IIDMA_RATE_1GB;
1086 ha->state = FC_PORT_STATE_MASK(ha->state) |
1087 FC_STATE_FULL_SPEED;
1088 index = 1;
1089 }
1090
1091 for (vha = ha; vha != NULL; vha = vha->vp_next) {
1092 vha->state = FC_PORT_STATE_MASK(vha->state) |
1093 FC_PORT_SPEED_MASK(ha->state);
1094 }
1095 EL(ha, "%d GB %xh Loop Up received\n", index, mb[0]);
1096
1097 /* Update AEN queue. */
1098 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1099 ql_enqueue_aen(ha, mb[0], NULL);
1100 }
1101 break;
1102
1103 case MBA_LOOP_DOWN:
1104 EL(ha, "%xh Loop Down received, mbx1=%xh, mbx2=%xh, mbx3=%xh, "
1105 "mbx4=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1106 RD16_IO_REG(ha, mailbox_out[2]),
1107 RD16_IO_REG(ha, mailbox_out[3]),
1108 RD16_IO_REG(ha, mailbox_out[4]));
1109
1110 if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1111 *set_flags |= LOOP_DOWN;
1112 }
1113 ql_port_state(ha, FC_STATE_OFFLINE,
1114 FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1115
1116 if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1117 ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1118 }
1119
1120 if (CFG_IST(ha, CFG_CTRL_252780818283)) {
1121 ha->sfp_stat = RD16_IO_REG(ha, mailbox_out[2]);
1122 }
1123
1124 /* Update AEN queue. */
1125 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1126 ql_enqueue_aen(ha, mb[0], NULL);
1127 }
1128 break;
1129
1130 case MBA_PORT_UPDATE:
1131 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1132 mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1133 mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1134 RD16_IO_REG(ha, mailbox_out[3]) : 0);
1135
1136 /* Locate port state structure. */
1137 for (vha = ha; vha != NULL; vha = vha->vp_next) {
1138 if (vha->vp_index == LSB(mb[3])) {
1139 break;
1140 }
1141 }
1142 if (vha == NULL) {
1143 break;
1144 }
1145
1146 if (mb[1] == 0xffff &&
1147 mb[2] == 7 && (MSB(mb[3]) == 0xe || MSB(mb[3]) == 0x1a ||
1148 MSB(mb[3]) == 0x1c || MSB(mb[3]) == 0x1d ||
1149 MSB(mb[3]) == 0x1e)) {
1150 EL(ha, "%xh Port Database Update, Loop down "
1151 "received, mbx1=%xh, mbx2=%xh, mbx3=%xh\n",
1152 mb[0], mb[1], mb[2], mb[3]);
1153 /*
1154 * received FLOGI reject
1155 * received FLOGO
1156 * FCF configuration changed
1157 * FIP Clear Virtual Link received
1158 * FCF timeout
1159 */
1160 if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1161 *set_flags |= LOOP_DOWN;
1162 }
1163 ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE |
1164 COMMAND_WAIT_NEEDED | LOOP_DOWN);
1165 if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1166 ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1167 }
1168 /*
1169 * In N port 2 N port topology the FW provides a port
1170 * database entry at loop_id 0x7fe which we use to
1171 * acquire the Ports WWPN.
1172 */
1173 } else if ((mb[1] != 0x7fe) &&
1174 ((FC_PORT_STATE_MASK(vha->state) != FC_STATE_OFFLINE ||
1175 (CFG_IST(ha, CFG_ISP_FW_TYPE_2) &&
1176 (mb[2] != 6 || mb[3] != 0))))) {
1177 EL(ha, "%xh Port Database Update, Login/Logout "
1178 "received, mbx1=%xh, mbx2=%xh, mbx3=%xh\n",
1179 mb[0], mb[1], mb[2], mb[3]);
1180 } else {
1181 EL(ha, "%xh Port Database Update received, mbx1=%xh,"
1182 " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2],
1183 mb[3]);
1184 *set_flags |= LOOP_RESYNC_NEEDED;
1185 *set_flags &= ~LOOP_DOWN;
1186 *reset_flags |= LOOP_DOWN;
1187 *reset_flags &= ~LOOP_RESYNC_NEEDED;
1188 vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
1189 TASK_DAEMON_LOCK(ha);
1190 vha->task_daemon_flags |= LOOP_RESYNC_NEEDED;
1191 vha->task_daemon_flags &= ~LOOP_DOWN;
1192 TASK_DAEMON_UNLOCK(ha);
1193 ADAPTER_STATE_LOCK(ha);
1194 vha->flags &= ~ABORT_CMDS_LOOP_DOWN_TMO;
1195 ADAPTER_STATE_UNLOCK(ha);
1196 }
1197
1198 /* Update AEN queue. */
1199 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1200 ql_enqueue_aen(ha, mb[0], NULL);
1201 }
1202 break;
1203
1204 case MBA_RSCN_UPDATE:
1205 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1206 mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1207 mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1208 RD16_IO_REG(ha, mailbox_out[3]) : 0);
1209
1210 /* Locate port state structure. */
1211 for (vha = ha; vha != NULL; vha = vha->vp_next) {
1212 if (vha->vp_index == LSB(mb[3])) {
1213 break;
1214 }
1215 }
1216
1217 if (vha == NULL) {
1218 break;
1219 }
1220
1221 if (LSB(mb[1]) == vha->d_id.b.domain &&
1222 MSB(mb[2]) == vha->d_id.b.area &&
1223 LSB(mb[2]) == vha->d_id.b.al_pa) {
1224 EL(ha, "%xh RSCN match adapter, mbx1=%xh, mbx2=%xh, "
1225 "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1226 } else {
1227 EL(ha, "%xh RSCN received, mbx1=%xh, mbx2=%xh, "
1228 "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1229 if (FC_PORT_STATE_MASK(vha->state) !=
1230 FC_STATE_OFFLINE) {
1231 ql_rcv_rscn_els(vha, &mb[0], done_q);
1232 TASK_DAEMON_LOCK(ha);
1233 vha->task_daemon_flags |= RSCN_UPDATE_NEEDED;
1234 TASK_DAEMON_UNLOCK(ha);
1235 *set_flags |= RSCN_UPDATE_NEEDED;
1236 }
1237 }
1238
1239 /* Update AEN queue. */
1240 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1241 ql_enqueue_aen(ha, mb[0], NULL);
1242 }
1243 break;
1244
1245 case MBA_LIP_ERROR: /* Loop initialization errors. */
1246 EL(ha, "%xh LIP error received, mbx1=%xh\n", mb[0],
1247 RD16_IO_REG(ha, mailbox_out[1]));
1248 break;
1249
1250 case MBA_IP_RECEIVE:
1251 case MBA_IP_BROADCAST:
1252 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1253 mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1254 mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
1255
1256 EL(ha, "%xh IP packet/broadcast received, mbx1=%xh, "
1257 "mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1258
1259 /* Locate device queue. */
1260 s_id.b.al_pa = LSB(mb[2]);
1261 s_id.b.area = MSB(mb[2]);
1262 s_id.b.domain = LSB(mb[1]);
1263 if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
1264 EL(ha, "Unknown IP device=%xh\n", s_id.b24);
1265 break;
1266 }
1267
1268 cnt = (uint16_t)(CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
1269 CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb24.buf_size[0],
1270 ha->ip_init_ctrl_blk.cb24.buf_size[1]) :
1271 CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb.buf_size[0],
1272 ha->ip_init_ctrl_blk.cb.buf_size[1]));
1273
1274 tq->ub_sequence_length = mb[3];
1275 tq->ub_total_seg_cnt = (uint8_t)(mb[3] / cnt);
1276 if (mb[3] % cnt) {
1277 tq->ub_total_seg_cnt++;
1278 }
1279 cnt = (uint16_t)(tq->ub_total_seg_cnt + 10);
1280
1281 for (index = 10; index < ha->reg_off->mbox_cnt && index < cnt;
1282 index++) {
1283 mb[index] = RD16_IO_REG(ha, mailbox_out[index]);
1284 }
1285
1286 tq->ub_seq_id = ++ha->ub_seq_id;
1287 tq->ub_seq_cnt = 0;
1288 tq->ub_frame_ro = 0;
1289 tq->ub_loop_id = (uint16_t)(mb[0] == MBA_IP_BROADCAST ?
1290 (CFG_IST(ha, CFG_CTRL_24XX) ? BROADCAST_24XX_HDL :
1291 IP_BROADCAST_LOOP_ID) : tq->loop_id);
1292 ha->rcv_dev_q = tq;
1293
1294 for (cnt = 10; cnt < ha->reg_off->mbox_cnt &&
1295 tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
1296 if (ql_ub_frame_hdr(ha, tq, mb[cnt], done_q) !=
1297 QL_SUCCESS) {
1298 EL(ha, "ql_ub_frame_hdr failed, "
1299 "isp_abort_needed\n");
1300 *set_flags |= ISP_ABORT_NEEDED;
1301 break;
1302 }
1303 }
1304 break;
1305
1306 case MBA_IP_LOW_WATER_MARK:
1307 case MBA_IP_RCV_BUFFER_EMPTY:
1308 EL(ha, "%xh IP low water mark / RCV buffer empty received\n",
1309 mb[0]);
1310 *set_flags |= NEED_UNSOLICITED_BUFFERS;
1311 break;
1312
1313 case MBA_IP_HDR_DATA_SPLIT:
1314 EL(ha, "%xh IP HDR data split received\n", mb[0]);
1315 break;
1316
1317 case MBA_ERROR_LOGGING_DISABLED:
1318 EL(ha, "%xh error logging disabled received, "
1319 "mbx1=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1320 break;
1321
1322 case MBA_POINT_TO_POINT:
1323 /* case MBA_DCBX_COMPLETED: */
1324 if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
1325 EL(ha, "%xh DCBX completed received\n", mb[0]);
1326 } else {
1327 EL(ha, "%xh Point to Point Mode received\n", mb[0]);
1328 }
1329 ADAPTER_STATE_LOCK(ha);
1330 ha->flags |= POINT_TO_POINT;
1331 ADAPTER_STATE_UNLOCK(ha);
1332 if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1333 *set_flags |= LOOP_DOWN;
1334 }
1335 if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1336 ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1337 }
1338 ql_port_state(ha, FC_STATE_OFFLINE,
1339 FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1340 break;
1341
1342 case MBA_FCF_CONFIG_ERROR:
1343 EL(ha, "%xh FCF configuration Error received, mbx1=%xh\n",
1344 mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1345 break;
1346
1347 case MBA_DCBX_PARAM_CHANGED:
1348 EL(ha, "%xh DCBX parameters changed received, mbx1=%xh\n",
1349 mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1350 break;
1351
1352 case MBA_CHG_IN_CONNECTION:
1353 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1354 if (mb[1] == 2) {
1355 EL(ha, "%xh Change In Connection received, "
1356 "mbx1=%xh\n", mb[0], mb[1]);
1357 ADAPTER_STATE_LOCK(ha);
1358 ha->flags &= ~POINT_TO_POINT;
1359 ADAPTER_STATE_UNLOCK(ha);
1360 if (ha->topology & QL_N_PORT) {
1361 ha->topology = (uint8_t)(ha->topology &
1362 ~QL_N_PORT);
1363 ha->topology = (uint8_t)(ha->topology |
1364 QL_NL_PORT);
1365 }
1366 } else {
1367 EL(ha, "%xh Change In Connection received, "
1368 "mbx1=%xh, isp_abort_needed\n", mb[0], mb[1]);
1369 *set_flags |= ISP_ABORT_NEEDED;
1370 }
1371 break;
1372
1373 case MBA_ZIO_UPDATE:
1374 EL(ha, "%xh ZIO response received\n", mb[0]);
1375
1376 rsp_q->isp_rsp_index = RD16_IO_REG(ha, resp_in);
1377 ql_response_pkt(ha, rsp_q, done_q, set_flags, reset_flags);
1378 break;
1379
1380 case MBA_PORT_BYPASS_CHANGED:
1381 EL(ha, "%xh Port Bypass Changed received, mbx1=%xh\n",
1382 mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1383 /*
1384 * Event generated when there is a transition on
1385 * port bypass of crystal+.
1386 * Mailbox 1: Bit 0 - External.
1387 * Bit 2 - Internal.
1388 * When the bit is 0, the port is bypassed.
1389 *
1390 * For now we will generate a LIP for all cases.
1391 */
1392 *set_flags |= HANDLE_PORT_BYPASS_CHANGE;
1393 break;
1394
1395 case MBA_RECEIVE_ERROR:
1396 EL(ha, "%xh Receive Error received, mbx1=%xh, mbx2=%xh\n",
1397 mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1398 RD16_IO_REG(ha, mailbox_out[2]));
1399 break;
1400
1401 case MBA_LS_RJT_SENT:
1402 EL(ha, "%xh LS_RJT Response Sent ELS=%xh\n", mb[0],
1403 RD16_IO_REG(ha, mailbox_out[1]));
1404 break;
1405
1406 case MBA_FW_RESTART_COMP:
1407 EL(ha, "%xh firmware restart complete received mb1=%xh\n",
1408 mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1409 break;
1410
1411 /*
1412 * MBA_IDC_COMPLETE & MBA_IDC_NOTIFICATION: We won't get another
1413 * IDC async event until we ACK the current one.
1414 */
1415 case MBA_IDC_COMPLETE:
1416 mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1417 EL(ha, "%xh MBA_IDC_COMPLETE received, mbx2=%xh\n", mb[0],
1418 mb[2]);
1419 switch (mb[2]) {
1420 case IDC_OPC_FLASH_ACC:
1421 case IDC_OPC_RESTART_MPI:
1422 case IDC_OPC_PORT_RESET_MBC:
1423 case IDC_OPC_SET_PORT_CONFIG_MBC:
1424 ADAPTER_STATE_LOCK(ha);
1425 ha->flags |= IDC_RESTART_NEEDED;
1426 ADAPTER_STATE_UNLOCK(ha);
1427 break;
1428 default:
1429 EL(ha, "unknown IDC completion opcode=%xh\n", mb[2]);
1430 break;
1431 }
1432 break;
1433
1434 case MBA_IDC_NOTIFICATION:
1435 for (cnt = 1; cnt < 8; cnt++) {
1436 ha->idc_mb[cnt] = RD16_IO_REG(ha, mailbox_out[cnt]);
1437 }
1438 EL(ha, "%xh MBA_IDC_REQ_NOTIFICATION received, mbx1=%xh, "
1439 "mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh, mbx6=%xh, "
1440 "mbx7=%xh\n", mb[0], ha->idc_mb[1], ha->idc_mb[2],
1441 ha->idc_mb[3], ha->idc_mb[4], ha->idc_mb[5], ha->idc_mb[6],
1442 ha->idc_mb[7]);
1443
1444 ADAPTER_STATE_LOCK(ha);
1445 switch (ha->idc_mb[2]) {
1446 case IDC_OPC_DRV_START:
1447 ha->flags |= IDC_RESTART_NEEDED;
1448 break;
1449 case IDC_OPC_FLASH_ACC:
1450 case IDC_OPC_RESTART_MPI:
1451 case IDC_OPC_PORT_RESET_MBC:
1452 case IDC_OPC_SET_PORT_CONFIG_MBC:
1453 ha->flags |= IDC_STALL_NEEDED;
1454 break;
1455 default:
1456 EL(ha, "unknown IDC request opcode=%xh\n",
1457 ha->idc_mb[2]);
1458 break;
1459 }
1460 /*
1461 * If there is a timeout value associated with this IDC
1462 * notification then there is an implied requirement
1463 * that we return an ACK.
1464 */
1465 if (ha->idc_mb[1] & IDC_TIMEOUT_MASK) {
1466 ha->flags |= IDC_ACK_NEEDED;
1467 }
1468 ADAPTER_STATE_UNLOCK(ha);
1469
1470 ql_awaken_task_daemon(ha, NULL, 0, 0);
1471 break;
1472
1473 case MBA_IDC_TIME_EXTENDED:
1474 EL(ha, "%xh MBA_IDC_TIME_EXTENDED received, mbx2=%xh\n",
1475 mb[0], RD16_IO_REG(ha, mailbox_out[2]));
1476 break;
1477
1478 default:
1479 EL(ha, "%xh UNKNOWN event received, mbx1=%xh, mbx2=%xh, "
1480 "mbx3=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1481 RD16_IO_REG(ha, mailbox_out[2]),
1482 RD16_IO_REG(ha, mailbox_out[3]));
1483 break;
1484 }
1485
1486 QL_PRINT_3(ha, "done\n");
1487 }
1488
1489 /*
1490 * ql_fast_fcp_post
1491 * Fast path for good SCSI I/O completion.
1492 *
1493 * Input:
1494 * sp: SRB pointer.
1495 * rsp_q: response queue structure pointer.
1496 *
1497 * Context:
1498 * Interrupt or Kernel context, no mailbox commands allowed.
1499 */
1500 static void
1501 ql_fast_fcp_post(ql_srb_t *sp, ql_response_q_t *rsp_q)
1502 {
1503 ql_adapter_state_t *ha = sp->ha;
1504 ql_lun_t *lq = sp->lun_queue;
1505 ql_tgt_t *tq = lq->target_queue;
1506
1507 QL_PRINT_3(ha, "started\n");
1508
1509 /* Acquire device queue lock. */
1510 DEVICE_QUEUE_LOCK(tq);
1511
1512 /* Decrement outstanding commands on device. */
1513 if (tq->outcnt != 0) {
1514 tq->outcnt--;
1515 }
1516
1517 if (sp->flags & SRB_FCP_CMD_PKT) {
1518 if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_UNTAGGED) {
1519 /*
1520 * Clear the flag for this LUN so that
1521 * untagged commands can be submitted
1522 * for it.
1523 */
1524 lq->flags &= ~LQF_UNTAGGED_PENDING;
1525 }
1526
1527 if (lq->lun_outcnt != 0) {
1528 lq->lun_outcnt--;
1529 }
1530 }
1531
1532 /* Reset port down retry count on good completion. */
1533 tq->port_down_retry_count = ha->port_down_retry_count;
1534 tq->qfull_retry_count = ha->qfull_retry_count;
1535 ha->pha->timeout_cnt = 0;
1536
1537 /* Remove command from watchdog queue. */
1538 if (sp->flags & SRB_WATCHDOG_ENABLED) {
1539 ql_remove_link(&tq->wdg, &sp->wdg);
1540 sp->flags &= ~SRB_WATCHDOG_ENABLED;
1541 }
1542
1543 if (lq->cmd.first != NULL) {
1544 ql_next(ha, lq);
1545 } else {
1546 /* Release LU queue specific lock. */
1547 DEVICE_QUEUE_UNLOCK(tq);
1548 if (ha->pha->pending_cmds.first != NULL) {
1549 ql_start_iocb(ha, NULL);
1550 }
1551 }
1552
1553 /* Sync buffers if required. */
1554 if (sp->flags & SRB_MS_PKT) {
1555 (void) ddi_dma_sync(sp->pkt->pkt_resp_dma, 0, 0,
1556 DDI_DMA_SYNC_FORCPU);
1557 }
1558
1559 /* Map ISP completion codes. */
1560 sp->pkt->pkt_expln = FC_EXPLN_NONE;
1561 sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
1562 sp->pkt->pkt_state = FC_PKT_SUCCESS;
1563
1564 (void) qlc_fm_check_pkt_dma_handle(ha, sp);
1565
1566 /* Now call the pkt completion callback */
1567 if (sp->flags & SRB_POLL) {
1568 sp->flags &= ~SRB_POLL;
1569 } else if (ha->completion_thds == 1 && sp->pkt->pkt_comp &&
1570 !(ha->flags & POLL_INTR)) {
1571 INDX_INTR_UNLOCK(ha, rsp_q->rsp_q_number);
1572 (*sp->pkt->pkt_comp)(sp->pkt);
1573 INDX_INTR_LOCK(ha, rsp_q->rsp_q_number);
1574 } else {
1575 ql_io_comp(sp);
1576 }
1577
1578 if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
1579 != DDI_FM_OK) {
1580 qlc_fm_report_err_impact(ha,
1581 QL_FM_EREPORT_ACC_HANDLE_CHECK);
1582 }
1583
1584 QL_PRINT_3(ha, "done\n");
1585 }
1586
1587 /*
1588 * ql_response_pkt
1589 * Processes response entry.
1590 *
1591 * Input:
1592 * ha: adapter state pointer.
1593 * rsp_q: response queue structure pointer.
1594 * done_q: head pointer to done queue.
1595 * set_flags: task daemon flags to set.
1596 * reset_flags: task daemon flags to reset.
1597 *
1598 * Context:
1599 * Interrupt or Kernel context, no mailbox commands allowed.
1600 */
1601 static void
1602 ql_response_pkt(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
1603 ql_head_t *done_q, uint64_t *set_flags, uint64_t *reset_flags)
1604 {
1605 response_t *pkt;
1606 uint32_t dma_sync_size_1 = 0;
1607 uint32_t dma_sync_size_2 = 0;
1608 int status = 0;
1609
1610 QL_PRINT_3(ha, "started\n");
1611
1612 if (rsp_q->isp_rsp_index >= rsp_q->rsp_entry_cnt) {
1613 EL(ha, "index error = %xh, isp_abort_needed",
1614 rsp_q->isp_rsp_index);
1615 *set_flags |= ISP_ABORT_NEEDED;
1616 return;
1617 }
1618
1619 if ((ha->flags & ONLINE) == 0) {
1620 QL_PRINT_10(ha, "not onlne, done\n");
1621 return;
1622 }
1623
1624 /* Calculate size of response queue entries to sync. */
1625 if (rsp_q->isp_rsp_index > rsp_q->rsp_ring_index) {
1626 dma_sync_size_1 = (uint32_t)
1627 ((uint32_t)(rsp_q->isp_rsp_index - rsp_q->rsp_ring_index) *
1628 RESPONSE_ENTRY_SIZE);
1629 } else if (rsp_q->isp_rsp_index == 0) {
1630 dma_sync_size_1 = (uint32_t)
1631 ((uint32_t)(rsp_q->rsp_entry_cnt - rsp_q->rsp_ring_index) *
1632 RESPONSE_ENTRY_SIZE);
1633 } else {
1634 /* Responses wrap around the Q */
1635 dma_sync_size_1 = (uint32_t)
1636 ((uint32_t)(rsp_q->rsp_entry_cnt - rsp_q->rsp_ring_index) *
1637 RESPONSE_ENTRY_SIZE);
1638 dma_sync_size_2 = (uint32_t)
1639 (rsp_q->isp_rsp_index * RESPONSE_ENTRY_SIZE);
1640 }
1641
1642 /* Sync DMA buffer. */
1643 (void) ddi_dma_sync(rsp_q->rsp_ring.dma_handle,
1644 (off_t)(rsp_q->rsp_ring_index * RESPONSE_ENTRY_SIZE),
1645 dma_sync_size_1, DDI_DMA_SYNC_FORCPU);
1646 if (dma_sync_size_2) {
1647 (void) ddi_dma_sync(rsp_q->rsp_ring.dma_handle, 0,
1648 dma_sync_size_2, DDI_DMA_SYNC_FORCPU);
1649 }
1650
1651 if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
1652 != DDI_FM_OK) {
1653 qlc_fm_report_err_impact(ha,
1654 QL_FM_EREPORT_ACC_HANDLE_CHECK);
1655 }
1656
1657 while (rsp_q->rsp_ring_index != rsp_q->isp_rsp_index) {
1658 pkt = rsp_q->rsp_ring_ptr;
1659
1660 QL_PRINT_5(ha, "ha->rsp_rg_idx=%xh, mbx[5]=%xh\n",
1661 rsp_q->rsp_ring_index, rsp_q->isp_rsp_index);
1662 QL_DUMP_5((uint8_t *)rsp_q->rsp_ring_ptr, 8,
1663 RESPONSE_ENTRY_SIZE);
1664
1665 /* Adjust ring index. */
1666 rsp_q->rsp_ring_index++;
1667 if (rsp_q->rsp_ring_index == rsp_q->rsp_entry_cnt) {
1668 rsp_q->rsp_ring_index = 0;
1669 rsp_q->rsp_ring_ptr = rsp_q->rsp_ring.bp;
1670 } else {
1671 rsp_q->rsp_ring_ptr++;
1672 }
1673
1674 /* Process packet. */
1675 if (rsp_q->status_srb != NULL &&
1676 pkt->entry_type != STATUS_CONT_TYPE) {
1677 ql_add_link_b(done_q, &rsp_q->status_srb->cmd);
1678 rsp_q->status_srb = NULL;
1679 }
1680
1681 pkt->entry_status = (uint8_t)
1682 (CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
1683 pkt->entry_status & 0x3c : pkt->entry_status & 0x7e);
1684
1685 if (pkt->entry_status != 0 ||
1686 pkt->entry_type == ABORTED_ENTRY_TYPE) {
1687 ql_error_entry(ha, rsp_q,
1688 pkt, done_q,
1689 set_flags, reset_flags);
1690 } else {
1691 switch (pkt->entry_type) {
1692 case STATUS_TYPE:
1693 status |= CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
1694 ql_24xx_status_entry(ha, rsp_q,
1695 (sts_24xx_entry_t *)pkt, done_q,
1696 set_flags, reset_flags) :
1697 ql_status_entry(ha, rsp_q,
1698 (sts_entry_t *)pkt,
1699 done_q, set_flags, reset_flags);
1700 break;
1701 case STATUS_CONT_TYPE:
1702 ql_status_cont_entry(ha, rsp_q,
1703 (sts_cont_entry_t *)pkt, done_q,
1704 set_flags, reset_flags);
1705 break;
1706 case IP_TYPE:
1707 case IP_A64_TYPE:
1708 case IP_CMD_TYPE:
1709 ql_ip_entry(ha, rsp_q,
1710 (ip_entry_t *)pkt, done_q,
1711 set_flags, reset_flags);
1712 break;
1713 case IP_RECEIVE_TYPE:
1714 ql_ip_rcv_entry(ha, rsp_q,
1715 (ip_rcv_entry_t *)pkt, done_q,
1716 set_flags, reset_flags);
1717 break;
1718 case IP_RECEIVE_CONT_TYPE:
1719 ql_ip_rcv_cont_entry(ha, rsp_q,
1720 (ip_rcv_cont_entry_t *)pkt, done_q,
1721 set_flags, reset_flags);
1722 break;
1723 case IP_24XX_RECEIVE_TYPE:
1724 ql_ip_24xx_rcv_entry(ha, rsp_q,
1725 (ip_rcv_24xx_entry_t *)pkt, done_q,
1726 set_flags, reset_flags);
1727 break;
1728 case MS_TYPE:
1729 ql_ms_entry(ha, rsp_q,
1730 (ms_entry_t *)pkt, done_q,
1731 set_flags, reset_flags);
1732 break;
1733 case REPORT_ID_TYPE:
1734 ql_report_id_entry(ha, rsp_q,
1735 (report_id_acq_t *)pkt, done_q,
1736 set_flags, reset_flags);
1737 break;
1738 case ELS_PASSTHRU_TYPE:
1739 ql_els_passthru_entry(ha, rsp_q,
1740 (els_passthru_entry_rsp_t *)pkt, done_q,
1741 set_flags, reset_flags);
1742 break;
1743 case IP_BUF_POOL_TYPE:
1744 case MARKER_TYPE:
1745 case VP_MODIFY_TYPE:
1746 case VP_CONTROL_TYPE:
1747 break;
1748 default:
1749 EL(ha, "Unknown IOCB entry type=%xh\n",
1750 pkt->entry_type);
1751 break;
1752 }
1753 }
1754 }
1755
1756 /* Inform RISC of processed responses. */
1757
1758 if (ha->flags & MULTI_QUEUE) {
1759 WR32_MBAR_REG(ha, rsp_q->mbar_rsp_out, rsp_q->rsp_ring_index);
1760 } else {
1761 WRT16_IO_REG(ha, resp_out, rsp_q->rsp_ring_index);
1762 }
1763
1764 if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
1765 != DDI_FM_OK) {
1766 qlc_fm_report_err_impact(ha,
1767 QL_FM_EREPORT_ACC_HANDLE_CHECK);
1768 }
1769
1770 /* RESET packet received delay for possible async event. */
1771 if (status & BIT_0) {
1772 drv_usecwait(500000);
1773 }
1774
1775 QL_PRINT_3(ha, "done\n");
1776 }
1777
1778 /*
1779 * ql_error_entry
1780 * Processes error entry.
1781 *
1782 * Input:
1783 * ha: adapter state pointer.
1784 * rsp_q: response queue structure pointer.
1785 * pkt: entry pointer.
1786 * done_q: head pointer to done queue.
1787 * set_flags: task daemon flags to set.
1788 * reset_flags: task daemon flags to reset.
1789 *
1790 * Context:
1791 * Interrupt or Kernel context, no mailbox commands allowed.
1792 */
1793 /* ARGSUSED */
1794 static void
1795 ql_error_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q, response_t *pkt,
1796 ql_head_t *done_q, uint64_t *set_flags, uint64_t *reset_flags)
1797 {
1798 ql_srb_t *sp = NULL;
1799 uint32_t index, resp_identifier;
1800
1801 if (pkt->entry_type == ABORTED_ENTRY_TYPE) {
1802 resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle,
1803 &pkt->handle);
1804 index = resp_identifier & OSC_INDEX_MASK;
1805 if (index < ha->osc_max_cnt) {
1806 if (ha->outstanding_cmds[index] ==
1807 QL_ABORTED_SRB(ha)) {
1808 EL(ha, "Aborted command sp=QL_ABORTED_SRB, "
1809 "handle=%xh\n", resp_identifier);
1810 ha->outstanding_cmds[index] = NULL;
1811 } else {
1812 EL(ha, "Aborted command sp=%ph, handle=%xh\n",
1813 (void *) ha->outstanding_cmds[index],
1814 resp_identifier);
1815 }
1816 } else {
1817 EL(ha, "Aborted command handle=%xh, out of range "
1818 "index=%xh\n", resp_identifier, index);
1819 }
1820 return;
1821 }
1822
1823 QL_PRINT_2(ha, "started, packet:\n");
1824 QL_DUMP_2((uint8_t *)pkt, 8, RESPONSE_ENTRY_SIZE);
1825
1826 if (pkt->entry_status & BIT_6) {
1827 EL(ha, "Request Queue DMA error\n");
1828 } else if (pkt->entry_status & BIT_5) {
1829 EL(ha, "Invalid Entry Order\n");
1830 } else if (pkt->entry_status & BIT_4) {
1831 EL(ha, "Invalid Entry Count\n");
1832 } else if (pkt->entry_status & BIT_3) {
1833 EL(ha, "Invalid Entry Parameter\n");
1834 } else if (pkt->entry_status & BIT_2) {
1835 EL(ha, "Invalid Entry Type\n");
1836 } else if (pkt->entry_status & BIT_1) {
1837 EL(ha, "Busy\n");
1838 } else {
1839 EL(ha, "UNKNOWN flag = %xh error\n", pkt->entry_status);
1840 }
1841
1842 /* Validate the response entry handle. */
1843 resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle, &pkt->handle);
1844 index = resp_identifier & OSC_INDEX_MASK;
1845 if (index < ha->osc_max_cnt) {
1846 /* the index seems reasonable */
1847 if ((sp = ha->outstanding_cmds[index]) == NULL) {
1848 sp = ql_verify_preprocessed_cmd(ha, rsp_q,
1849 (uint32_t *)&pkt->handle,
1850 (uint32_t *)&resp_identifier, set_flags,
1851 reset_flags);
1852 }
1853 if (sp != NULL) {
1854 if (sp == QL_ABORTED_SRB(ha)) {
1855 EL(ha, "QL_ABORTED_SRB handle=%xh\n",
1856 resp_identifier);
1857 sp = NULL;
1858 ha->outstanding_cmds[index] = NULL;
1859 } else if (sp->handle == resp_identifier) {
1860 /* Neo, you're the one... */
1861 ha->outstanding_cmds[index] = NULL;
1862 sp->handle = 0;
1863 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1864 } else {
1865 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1866 resp_identifier, sp->handle);
1867 sp = NULL;
1868 ql_signal_abort(ha, set_flags);
1869 }
1870 }
1871 } else {
1872 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1873 index, resp_identifier);
1874 ql_signal_abort(ha, set_flags);
1875 }
1876
1877 if (sp != NULL) {
1878 /* Bad payload or header */
1879 if (pkt->entry_status & (BIT_5 + BIT_4 + BIT_3 + BIT_2)) {
1880 /* Bad payload or header, set error status. */
1881 sp->pkt->pkt_reason = CS_BAD_PAYLOAD;
1882 } else if (pkt->entry_status & BIT_1) /* FULL flag */ {
1883 sp->pkt->pkt_reason = CS_QUEUE_FULL;
1884 } else {
1885 /* Set error status. */
1886 sp->pkt->pkt_reason = CS_UNKNOWN;
1887 }
1888
1889 /* Set completed status. */
1890 sp->flags |= SRB_ISP_COMPLETED;
1891
1892 /* Place command on done queue. */
1893 ql_add_link_b(done_q, &sp->cmd);
1894
1895 }
1896 QL_PRINT_3(ha, "done\n");
1897 }
1898
1899 /*
1900 * ql_status_entry
1901 * Processes received ISP2200-2300 status entry.
1902 *
1903 * Input:
1904 * ha: adapter state pointer.
1905 * rsp_q: response queue structure pointer.
1906 * pkt: entry pointer.
1907 * done_q: done queue pointer.
1908 * set_flags: task daemon flags to set.
1909 * reset_flags: task daemon flags to reset.
1910 *
1911 * Returns:
1912 * BIT_0 = CS_RESET status received.
1913 *
1914 * Context:
1915 * Interrupt or Kernel context, no mailbox commands allowed.
1916 */
1917 /* ARGSUSED */
1918 static int
1919 ql_status_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
1920 sts_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
1921 uint64_t *reset_flags)
1922 {
1923 ql_srb_t *sp = NULL;
1924 uint32_t index, resp_identifier;
1925 uint16_t comp_status;
1926 int rval = 0;
1927
1928 QL_PRINT_3(ha, "started\n");
1929
1930 /* Validate the response entry handle. */
1931 resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle, &pkt->handle);
1932 index = resp_identifier & OSC_INDEX_MASK;
1933 if (index < ha->osc_max_cnt) {
1934 /* the index seems reasonable */
1935 if ((sp = ha->outstanding_cmds[index]) == NULL) {
1936 sp = ql_verify_preprocessed_cmd(ha, rsp_q,
1937 (uint32_t *)&pkt->handle,
1938 (uint32_t *)&resp_identifier, set_flags,
1939 reset_flags);
1940 }
1941 if (sp != NULL) {
1942 if (sp == QL_ABORTED_SRB(ha)) {
1943 EL(ha, "QL_ABORTED_SRB handle=%xh\n",
1944 resp_identifier);
1945 sp = NULL;
1946 ha->outstanding_cmds[index] = NULL;
1947 } else if (sp->handle == resp_identifier) {
1948 /* Neo, you're the one... */
1949 ha->outstanding_cmds[index] = NULL;
1950 sp->handle = 0;
1951 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1952 } else {
1953 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1954 resp_identifier, sp->handle);
1955 sp = NULL;
1956 ql_signal_abort(ha, set_flags);
1957 }
1958 }
1959 } else {
1960 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1961 index, resp_identifier);
1962 ql_signal_abort(ha, set_flags);
1963 }
1964
1965 if (sp != NULL) {
1966 comp_status = (uint16_t)ddi_get16(rsp_q->rsp_ring.acc_handle,
1967 &pkt->comp_status);
1968
1969 /*
1970 * We dont care about SCSI QFULLs.
1971 */
1972 if (comp_status == CS_QUEUE_FULL) {
1973 EL(ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
1974 sp->lun_queue->target_queue->d_id.b24,
1975 sp->lun_queue->lun_no);
1976 comp_status = CS_COMPLETE;
1977 }
1978
1979 /*
1980 * 2300 firmware marks completion status as data underrun
1981 * for scsi qfulls. Make it transport complete.
1982 */
1983 if (CFG_IST(ha, CFG_CTRL_2363) &&
1984 comp_status == CS_DATA_UNDERRUN &&
1985 pkt->scsi_status_l != STATUS_GOOD) {
1986 comp_status = CS_COMPLETE;
1987 }
1988
1989 /*
1990 * Workaround T3 issue where we do not get any data xferred
1991 * but get back a good status.
1992 */
1993 if ((pkt->state_flags_h & SF_XFERRED_DATA) == 0 &&
1994 comp_status == CS_COMPLETE &&
1995 pkt->scsi_status_l == STATUS_GOOD &&
1996 (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
1997 pkt->residual_length == 0 &&
1998 sp->fcp &&
1999 sp->fcp->fcp_data_len != 0 &&
2000 (pkt->state_flags_l & (SF_DATA_OUT | SF_DATA_IN)) ==
2001 SF_DATA_OUT) {
2002 comp_status = CS_ABORTED;
2003 }
2004
2005 if (sp->flags & SRB_MS_PKT) {
2006 /*
2007 * Ideally it should never be true. But there
2008 * is a bug in FW which upon receiving invalid
2009 * parameters in MS IOCB returns it as
2010 * status entry and not as ms entry type.
2011 */
2012 ql_ms_entry(ha, rsp_q, (ms_entry_t *)pkt, done_q,
2013 set_flags, reset_flags);
2014 QL_PRINT_3(ha, "ql_ms_entry done\n");
2015 return (0);
2016 }
2017
2018 /*
2019 * Fast path to good SCSI I/O completion
2020 */
2021 if (comp_status == CS_COMPLETE &&
2022 pkt->scsi_status_l == STATUS_GOOD &&
2023 (pkt->scsi_status_h & FCP_RSP_MASK) == 0) {
2024 /* Set completed status. */
2025 sp->flags |= SRB_ISP_COMPLETED;
2026 sp->pkt->pkt_reason = comp_status;
2027 ql_fast_fcp_post(sp, rsp_q);
2028 QL_PRINT_3(ha, "ql_fast_fcp_post done\n");
2029 return (0);
2030 }
2031 rval = ql_status_error(ha, rsp_q, sp, pkt, done_q, set_flags,
2032 reset_flags);
2033 }
2034 QL_PRINT_3(ha, "done\n");
2035
2036 return (rval);
2037 }
2038
2039 /*
2040 * ql_24xx_status_entry
2041 * Processes received ISP24xx status entry.
2042 *
2043 * Input:
2044 * ha: adapter state pointer.
2045 * rsp_q: response queue structure pointer.
2046 * pkt: entry pointer.
2047 * done_q: done queue pointer.
2048 * set_flags: task daemon flags to set.
2049 * reset_flags: task daemon flags to reset.
2050 *
2051 * Returns:
2052 * BIT_0 = CS_RESET status received.
2053 *
2054 * Context:
2055 * Interrupt or Kernel context, no mailbox commands allowed.
2056 */
2057 /* ARGSUSED */
2058 static int
2059 ql_24xx_status_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
2060 sts_24xx_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
2061 uint64_t *reset_flags)
2062 {
2063 ql_srb_t *sp = NULL;
2064 uint16_t comp_status;
2065 uint32_t index, resp_identifier;
2066 int rval = 0;
2067
2068 QL_PRINT_3(ha, "started\n");
2069
2070 /* Validate the response entry handle. */
2071 resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle, &pkt->handle);
2072 index = resp_identifier & OSC_INDEX_MASK;
2073 if (index < ha->osc_max_cnt) {
2074 /* the index seems reasonable */
2075 if ((sp = ha->outstanding_cmds[index]) == NULL) {
2076 sp = ql_verify_preprocessed_cmd(ha, rsp_q,
2077 (uint32_t *)&pkt->handle,
2078 (uint32_t *)&resp_identifier, set_flags,
2079 reset_flags);
2080 }
2081 if (sp != NULL) {
2082 if (sp == QL_ABORTED_SRB(ha)) {
2083 EL(ha, "QL_ABORTED_SRB handle=%xh\n",
2084 resp_identifier);
2085 sp = NULL;
2086 ha->outstanding_cmds[index] = NULL;
2087 } else if (sp->handle == resp_identifier) {
2088 /* Neo, you're the one... */
2089 ha->outstanding_cmds[index] = NULL;
2090 sp->handle = 0;
2091 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2092 } else {
2093 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
2094 resp_identifier, sp->handle);
2095 sp = NULL;
2096 ql_signal_abort(ha, set_flags);
2097 }
2098 }
2099 } else {
2100 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
2101 index, resp_identifier);
2102 ql_signal_abort(ha, set_flags);
2103 }
2104
2105 if (sp != NULL) {
2106 comp_status = (uint16_t)ddi_get16(rsp_q->rsp_ring.acc_handle,
2107 &pkt->comp_status);
2108
2109 /* We dont care about SCSI QFULLs. */
2110 if (comp_status == CS_QUEUE_FULL) {
2111 EL(sp->ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
2112 sp->lun_queue->target_queue->d_id.b24,
2113 sp->lun_queue->lun_no);
2114 comp_status = CS_COMPLETE;
2115 }
2116
2117 /*
2118 * 2300 firmware marks completion status as data underrun
2119 * for scsi qfulls. Make it transport complete.
2120 */
2121 if (comp_status == CS_DATA_UNDERRUN &&
2122 pkt->scsi_status_l != STATUS_GOOD) {
2123 comp_status = CS_COMPLETE;
2124 }
2125
2126 /*
2127 * Workaround T3 issue where we do not get any data xferred
2128 * but get back a good status.
2129 */
2130 if (comp_status == CS_COMPLETE &&
2131 pkt->scsi_status_l == STATUS_GOOD &&
2132 (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
2133 pkt->residual_length != 0 &&
2134 sp->fcp &&
2135 sp->fcp->fcp_data_len != 0 &&
2136 sp->fcp->fcp_cntl.cntl_write_data) {
2137 comp_status = CS_ABORTED;
2138 }
2139
2140 /*
2141 * Fast path to good SCSI I/O completion
2142 */
2143 if (comp_status == CS_COMPLETE &&
2144 pkt->scsi_status_l == STATUS_GOOD &&
2145 (pkt->scsi_status_h & FCP_RSP_MASK) == 0) {
2146 /* Set completed status. */
2147 sp->flags |= SRB_ISP_COMPLETED;
2148 sp->pkt->pkt_reason = comp_status;
2149 ql_fast_fcp_post(sp, rsp_q);
2150 QL_PRINT_3(ha, "ql_fast_fcp_post done\n");
2151 return (0);
2152 }
2153 rval = ql_status_error(ha, rsp_q, sp, (sts_entry_t *)pkt,
2154 done_q, set_flags, reset_flags);
2155 }
2156 QL_PRINT_3(ha, "done\n");
2157
2158 return (rval);
2159 }
2160
2161 /*
2162 * ql_verify_preprocessed_cmd
2163 * Handles preprocessed cmds..
2164 *
2165 * Input:
2166 * ha: adapter state pointer.
2167 * rsp_q: response queue structure pointer.
2168 * pkt_handle: handle pointer.
2169 * resp_identifier: resp_identifier pointer.
2170 * set_flags: task daemon flags to set.
2171 * reset_flags: task daemon flags to reset.
2172 *
2173 * Returns:
2174 * srb pointer or NULL
2175 *
2176 * Context:
2177 * Interrupt or Kernel context, no mailbox commands allowed.
2178 */
2179 /* ARGSUSED */
2180 ql_srb_t *
2181 ql_verify_preprocessed_cmd(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
2182 uint32_t *pkt_handle, uint32_t *resp_identifier, uint64_t *set_flags,
2183 uint64_t *reset_flags)
2184 {
2185 ql_srb_t *sp = NULL;
2186 uint32_t index;
2187 uint32_t get_handle = 10;
2188
2189 while (get_handle) {
2190 /* Get handle. */
2191 *resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle,
2192 pkt_handle);
2193 index = *resp_identifier & OSC_INDEX_MASK;
2194 /* Validate handle. */
2195 if (index < ha->osc_max_cnt) {
2196 sp = ha->outstanding_cmds[index];
2197 }
2198
2199 if (sp != NULL) {
2200 EL(ha, "sp=%xh, resp_id=%xh, get=%d, index=%xh\n", sp,
2201 *resp_identifier, get_handle, index);
2202 break;
2203 } else {
2204 get_handle -= 1;
2205 drv_usecwait(10000);
2206 if (get_handle == 1 && rsp_q->rsp_ring.dma_handle) {
2207 /* Last chance, Sync whole DMA buffer. */
2208 (void) ddi_dma_sync(rsp_q->rsp_ring.dma_handle,
2209 0, 0, DDI_DMA_SYNC_FORCPU);
2210 EL(ha, "last chance DMA sync, index=%xh\n",
2211 index);
2212 }
2213 }
2214 }
2215 QL_PRINT_3(ha, "done\n");
2216
2217 return (sp);
2218 }
2219
2220
2221 /*
2222 * ql_status_error
2223 * Processes received ISP status entry error.
2224 *
2225 * Input:
2226 * ha: adapter state pointer.
2227 * rsp_q: response queue structure pointer.
2228 * sp: SRB pointer.
2229 * pkt: entry pointer.
2230 * done_q: done queue pointer.
2231 * set_flags: task daemon flags to set.
2232 * reset_flags: task daemon flags to reset.
2233 *
2234 * Returns:
2235 * BIT_0 = CS_RESET status received.
2236 *
2237 * Context:
2238 * Interrupt or Kernel context, no mailbox commands allowed.
2239 */
2240 /* ARGSUSED */
2241 static int
2242 ql_status_error(ql_adapter_state_t *ha, ql_response_q_t *rsp_q, ql_srb_t *sp,
2243 sts_entry_t *pkt23, ql_head_t *done_q, uint64_t *set_flags,
2244 uint64_t *reset_flags)
2245 {
2246 uint32_t sense_sz = 0;
2247 uint32_t cnt;
2248 ql_tgt_t *tq;
2249 fcp_rsp_t *fcpr;
2250 struct fcp_rsp_info *rsp;
2251 int rval = 0;
2252
2253 struct {
2254 uint8_t *rsp_info;
2255 uint8_t *req_sense_data;
2256 uint32_t residual_length;
2257 uint32_t fcp_residual_length;
2258 uint32_t rsp_info_length;
2259 uint32_t req_sense_length;
2260 uint16_t comp_status;
2261 uint8_t state_flags_l;
2262 uint8_t state_flags_h;
2263 uint8_t scsi_status_l;
2264 uint8_t scsi_status_h;
2265 } sts;
2266
2267 QL_PRINT_3(ha, "started\n");
2268
2269 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2270 sts_24xx_entry_t *pkt24 = (sts_24xx_entry_t *)pkt23;
2271
2272 /* Setup status. */
2273 sts.comp_status = (uint16_t)ddi_get16(
2274 rsp_q->rsp_ring.acc_handle, &pkt24->comp_status);
2275 sts.scsi_status_l = pkt24->scsi_status_l;
2276 sts.scsi_status_h = pkt24->scsi_status_h;
2277
2278 /* Setup firmware residuals. */
2279 sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2280 ddi_get32(rsp_q->rsp_ring.acc_handle,
2281 (uint32_t *)&pkt24->residual_length) : 0;
2282
2283 /* Setup FCP residuals. */
2284 sts.fcp_residual_length = sts.scsi_status_h &
2285 (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2286 ddi_get32(rsp_q->rsp_ring.acc_handle,
2287 (uint32_t *)&pkt24->fcp_rsp_residual_count) : 0;
2288
2289 if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2290 (sts.scsi_status_h & FCP_RESID_UNDER) &&
2291 (sts.residual_length != pkt24->fcp_rsp_residual_count)) {
2292
2293 EL(sp->ha, "mismatch resid's: fw=%xh, pkt=%xh\n",
2294 sts.residual_length,
2295 pkt24->fcp_rsp_residual_count);
2296 sts.scsi_status_h = (uint8_t)
2297 (sts.scsi_status_h & ~FCP_RESID_UNDER);
2298 }
2299
2300 /* Setup state flags. */
2301 sts.state_flags_l = pkt24->state_flags_l;
2302 sts.state_flags_h = pkt24->state_flags_h;
2303
2304 if (sp->fcp->fcp_data_len &&
2305 (sts.comp_status != CS_DATA_UNDERRUN ||
2306 sts.residual_length != sp->fcp->fcp_data_len)) {
2307 sts.state_flags_h = (uint8_t)
2308 (sts.state_flags_h | SF_GOT_BUS |
2309 SF_GOT_TARGET | SF_SENT_CMD |
2310 SF_XFERRED_DATA | SF_GOT_STATUS);
2311 } else {
2312 sts.state_flags_h = (uint8_t)
2313 (sts.state_flags_h | SF_GOT_BUS |
2314 SF_GOT_TARGET | SF_SENT_CMD |
2315 SF_GOT_STATUS);
2316 }
2317 if (sp->fcp->fcp_cntl.cntl_write_data) {
2318 sts.state_flags_l = (uint8_t)
2319 (sts.state_flags_l | SF_DATA_OUT);
2320 } else if (sp->fcp->fcp_cntl.cntl_read_data) {
2321 sts.state_flags_l = (uint8_t)
2322 (sts.state_flags_l | SF_DATA_IN);
2323 }
2324 if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) {
2325 sts.state_flags_l = (uint8_t)
2326 (sts.state_flags_l | SF_HEAD_OF_Q);
2327 } else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) {
2328 sts.state_flags_l = (uint8_t)
2329 (sts.state_flags_l | SF_ORDERED_Q);
2330 } else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) {
2331 sts.state_flags_l = (uint8_t)
2332 (sts.state_flags_l | SF_SIMPLE_Q);
2333 }
2334
2335 /* Setup FCP response info. */
2336 sts.rsp_info = &pkt24->rsp_sense_data[0];
2337 if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2338 sts.rsp_info_length = ddi_get32(
2339 rsp_q->rsp_ring.acc_handle,
2340 (uint32_t *)&pkt24->fcp_rsp_data_length);
2341 if (sts.rsp_info_length >
2342 sizeof (struct fcp_rsp_info)) {
2343 sts.rsp_info_length =
2344 sizeof (struct fcp_rsp_info);
2345 }
2346 for (cnt = 0; cnt < sts.rsp_info_length; cnt += 4) {
2347 ql_chg_endian(sts.rsp_info + cnt, 4);
2348 }
2349 } else {
2350 sts.rsp_info_length = 0;
2351 }
2352
2353 /* Setup sense data. */
2354 sts.req_sense_data =
2355 &pkt24->rsp_sense_data[sts.rsp_info_length];
2356 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2357 sts.req_sense_length =
2358 ddi_get32(rsp_q->rsp_ring.acc_handle,
2359 (uint32_t *)&pkt24->fcp_sense_length);
2360 sts.state_flags_h = (uint8_t)
2361 (sts.state_flags_h | SF_ARQ_DONE);
2362 sense_sz = (uint32_t)
2363 (((uintptr_t)pkt24 + sizeof (sts_24xx_entry_t)) -
2364 (uintptr_t)sts.req_sense_data);
2365 for (cnt = 0; cnt < sense_sz; cnt += 4) {
2366 ql_chg_endian(sts.req_sense_data + cnt, 4);
2367 }
2368 } else {
2369 sts.req_sense_length = 0;
2370 }
2371 } else {
2372 /* Setup status. */
2373 sts.comp_status = (uint16_t)ddi_get16(
2374 rsp_q->rsp_ring.acc_handle, &pkt23->comp_status);
2375 sts.scsi_status_l = pkt23->scsi_status_l;
2376 sts.scsi_status_h = pkt23->scsi_status_h;
2377
2378 /* Setup firmware residuals. */
2379 sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2380 ddi_get32(rsp_q->rsp_ring.acc_handle,
2381 (uint32_t *)&pkt23->residual_length) : 0;
2382
2383 /* Setup FCP residuals. */
2384 sts.fcp_residual_length = sts.scsi_status_h &
2385 (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2386 sts.residual_length : 0;
2387
2388 /* Setup state flags. */
2389 sts.state_flags_l = pkt23->state_flags_l;
2390 sts.state_flags_h = pkt23->state_flags_h;
2391
2392 /* Setup FCP response info. */
2393 sts.rsp_info = &pkt23->rsp_info[0];
2394 if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2395 sts.rsp_info_length = ddi_get16(
2396 rsp_q->rsp_ring.acc_handle,
2397 (uint16_t *)&pkt23->rsp_info_length);
2398 if (sts.rsp_info_length >
2399 sizeof (struct fcp_rsp_info)) {
2400 sts.rsp_info_length =
2401 sizeof (struct fcp_rsp_info);
2402 }
2403 } else {
2404 sts.rsp_info_length = 0;
2405 }
2406
2407 /* Setup sense data. */
2408 sts.req_sense_data = &pkt23->req_sense_data[0];
2409 sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2410 ddi_get16(rsp_q->rsp_ring.acc_handle,
2411 (uint16_t *)&pkt23->req_sense_length) : 0;
2412 }
2413
2414 bzero(sp->pkt->pkt_resp, sp->pkt->pkt_rsplen);
2415
2416 fcpr = (fcp_rsp_t *)sp->pkt->pkt_resp;
2417 rsp = (struct fcp_rsp_info *)(sp->pkt->pkt_resp +
2418 sizeof (fcp_rsp_t));
2419
2420 tq = sp->lun_queue->target_queue;
2421
2422 fcpr->fcp_u.fcp_status.scsi_status = sts.scsi_status_l;
2423 if (sts.scsi_status_h & FCP_RSP_LEN_VALID) {
2424 fcpr->fcp_u.fcp_status.rsp_len_set = 1;
2425 }
2426 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2427 fcpr->fcp_u.fcp_status.sense_len_set = 1;
2428 }
2429 if (sts.scsi_status_h & FCP_RESID_OVER) {
2430 fcpr->fcp_u.fcp_status.resid_over = 1;
2431 }
2432 if (sts.scsi_status_h & FCP_RESID_UNDER) {
2433 fcpr->fcp_u.fcp_status.resid_under = 1;
2434 }
2435 fcpr->fcp_u.fcp_status.reserved_1 = 0;
2436
2437 /* Set ISP completion status */
2438 sp->pkt->pkt_reason = sts.comp_status;
2439
2440 /* Update statistics. */
2441 if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) &&
2442 (sp->pkt->pkt_rsplen > sizeof (fcp_rsp_t))) {
2443
2444 sense_sz = sp->pkt->pkt_rsplen - (uint32_t)sizeof (fcp_rsp_t);
2445 if (sense_sz > sts.rsp_info_length) {
2446 sense_sz = sts.rsp_info_length;
2447 }
2448
2449 /* copy response information data. */
2450 if (sense_sz) {
2451 ddi_rep_get8(rsp_q->rsp_ring.acc_handle,
2452 (uint8_t *)rsp, sts.rsp_info, sense_sz,
2453 DDI_DEV_AUTOINCR);
2454 }
2455 fcpr->fcp_response_len = sense_sz;
2456
2457 rsp = (struct fcp_rsp_info *)((caddr_t)rsp +
2458 fcpr->fcp_response_len);
2459
2460 switch (*(sts.rsp_info + 3)) {
2461 case FCP_NO_FAILURE:
2462 break;
2463 case FCP_DL_LEN_MISMATCH:
2464 ha->adapter_stats->d_stats[lobyte(
2465 tq->loop_id)].dl_len_mismatches++;
2466 break;
2467 case FCP_CMND_INVALID:
2468 break;
2469 case FCP_DATA_RO_MISMATCH:
2470 ha->adapter_stats->d_stats[lobyte(
2471 tq->loop_id)].data_ro_mismatches++;
2472 break;
2473 case FCP_TASK_MGMT_NOT_SUPPTD:
2474 break;
2475 case FCP_TASK_MGMT_FAILED:
2476 ha->adapter_stats->d_stats[lobyte(
2477 tq->loop_id)].task_mgmt_failures++;
2478 break;
2479 default:
2480 break;
2481 }
2482 } else {
2483 /*
2484 * EL(sp->ha, "scsi_h=%xh, pkt_rsplen=%xh\n",
2485 * sts.scsi_status_h, sp->pkt->pkt_rsplen);
2486 */
2487 fcpr->fcp_response_len = 0;
2488 }
2489
2490 /* Set reset status received. */
2491 if (sts.comp_status == CS_RESET && LOOP_READY(ha)) {
2492 *set_flags |= MARKER_NEEDED;
2493 rval |= BIT_0;
2494 }
2495
2496 if (!(tq->flags & TQF_TAPE_DEVICE) &&
2497 (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) ||
2498 ha->loop_down_abort_time < LOOP_DOWN_TIMER_START) &&
2499 ha->task_daemon_flags & LOOP_DOWN) {
2500 EL(sp->ha, "Loop Not Ready Retry, d_id=%xh, lun=%xh\n",
2501 tq->d_id.b24, sp->lun_queue->lun_no);
2502
2503 /* Set retry status. */
2504 sp->flags |= SRB_RETRY;
2505 } else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2506 tq->port_down_retry_count != 0 &&
2507 (sts.comp_status == CS_INCOMPLETE ||
2508 sts.comp_status == CS_PORT_UNAVAILABLE ||
2509 sts.comp_status == CS_PORT_LOGGED_OUT ||
2510 sts.comp_status == CS_PORT_CONFIG_CHG ||
2511 sts.comp_status == CS_PORT_BUSY)) {
2512 EL(sp->ha, "Port Down Retry=%xh, d_id=%xh, lun=%xh, count=%d"
2513 "\n", sts.comp_status, tq->d_id.b24, sp->lun_queue->lun_no,
2514 tq->port_down_retry_count);
2515
2516 /* Set retry status. */
2517 sp->flags |= SRB_RETRY;
2518
2519 if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2520 /* Acquire device queue lock. */
2521 DEVICE_QUEUE_LOCK(tq);
2522
2523 tq->flags |= TQF_QUEUE_SUSPENDED;
2524
2525 /* Decrement port down count. */
2526 if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
2527 tq->port_down_retry_count--;
2528 }
2529
2530 DEVICE_QUEUE_UNLOCK(tq);
2531
2532 if ((ha->task_daemon_flags & ABORT_ISP_ACTIVE)
2533 == 0 &&
2534 (sts.comp_status == CS_PORT_LOGGED_OUT ||
2535 sts.comp_status == CS_PORT_UNAVAILABLE)) {
2536 sp->ha->adapter_stats->d_stats[lobyte(
2537 tq->loop_id)].logouts_recvd++;
2538 ql_send_logo(sp->ha, tq, done_q);
2539 }
2540
2541 ADAPTER_STATE_LOCK(ha);
2542 if (ha->port_retry_timer == 0) {
2543 if ((ha->port_retry_timer =
2544 ha->port_down_retry_delay) == 0) {
2545 *set_flags |=
2546 PORT_RETRY_NEEDED;
2547 }
2548 }
2549 ADAPTER_STATE_UNLOCK(ha);
2550 }
2551 } else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2552 (sts.comp_status == CS_RESET ||
2553 (sts.comp_status == CS_QUEUE_FULL && tq->qfull_retry_count != 0) ||
2554 (sts.comp_status == CS_ABORTED && !(sp->flags & SRB_ABORTING)))) {
2555 if (sts.comp_status == CS_RESET) {
2556 EL(sp->ha, "Reset Retry, d_id=%xh, lun=%xh\n",
2557 tq->d_id.b24, sp->lun_queue->lun_no);
2558 } else if (sts.comp_status == CS_QUEUE_FULL) {
2559 EL(sp->ha, "Queue Full Retry, d_id=%xh, lun=%xh, "
2560 "cnt=%d\n", tq->d_id.b24, sp->lun_queue->lun_no,
2561 tq->qfull_retry_count);
2562 if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2563 tq->flags |= TQF_QUEUE_SUSPENDED;
2564
2565 tq->qfull_retry_count--;
2566
2567 ADAPTER_STATE_LOCK(ha);
2568 if (ha->port_retry_timer == 0) {
2569 if ((ha->port_retry_timer =
2570 ha->qfull_retry_delay) ==
2571 0) {
2572 *set_flags |=
2573 PORT_RETRY_NEEDED;
2574 }
2575 }
2576 ADAPTER_STATE_UNLOCK(ha);
2577 }
2578 } else {
2579 EL(sp->ha, "Abort Retry, d_id=%xh, lun=%xh\n",
2580 tq->d_id.b24, sp->lun_queue->lun_no);
2581
2582 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2) && LOOP_READY(ha)) {
2583 *set_flags |= MARKER_NEEDED;
2584 rval |= BIT_0;
2585 }
2586 }
2587
2588 /* Set retry status. */
2589 sp->flags |= SRB_RETRY;
2590 } else {
2591 fcpr->fcp_resid =
2592 sts.fcp_residual_length > sp->fcp->fcp_data_len ?
2593 sp->fcp->fcp_data_len : sts.fcp_residual_length;
2594
2595 if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2596 (sts.scsi_status_h & FCP_RESID_UNDER) == 0) {
2597
2598 if (sts.scsi_status_l == STATUS_CHECK) {
2599 sp->pkt->pkt_reason = CS_COMPLETE;
2600 } else {
2601 EL(ha, "transport error - "
2602 "underrun & invalid resid\n");
2603 EL(ha, "ssh=%xh, ssl=%xh\n",
2604 sts.scsi_status_h, sts.scsi_status_l);
2605 sp->pkt->pkt_reason = CS_FCP_RESPONSE_ERROR;
2606 }
2607 }
2608
2609 /* Ignore firmware underrun error. */
2610 if (sts.comp_status == CS_DATA_UNDERRUN &&
2611 (sts.scsi_status_h & FCP_RESID_UNDER ||
2612 (sts.scsi_status_l != STATUS_CHECK &&
2613 sts.scsi_status_l != STATUS_GOOD))) {
2614 sp->pkt->pkt_reason = CS_COMPLETE;
2615 }
2616
2617 if (sp->pkt->pkt_reason != CS_COMPLETE) {
2618 ha->xioctl->DeviceErrorCount++;
2619 EL(sp->ha, "Cmplt status err = %xh, d_id=%xh, lun=%xh,"
2620 " pkt_reason=%xh, spf=%xh, sp=%ph\n",
2621 sts.comp_status, tq->d_id.b24,
2622 sp->lun_queue->lun_no, sp->pkt->pkt_reason,
2623 sp->flags, sp);
2624 }
2625
2626 /* Set target request sense data. */
2627 if (sts.scsi_status_l == STATUS_CHECK) {
2628 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2629
2630 if (sp->pkt->pkt_reason == CS_COMPLETE &&
2631 sts.req_sense_data[2] != KEY_NO_SENSE &&
2632 sts.req_sense_data[2] !=
2633 KEY_UNIT_ATTENTION) {
2634 ha->xioctl->DeviceErrorCount++;
2635 }
2636
2637 sense_sz = sts.req_sense_length;
2638
2639 /* Insure data does not exceed buf. */
2640 if (sp->pkt->pkt_rsplen <=
2641 (uint32_t)sizeof (fcp_rsp_t) +
2642 fcpr->fcp_response_len) {
2643 sp->request_sense_length = 0;
2644 } else {
2645 sp->request_sense_length = (uint32_t)
2646 (sp->pkt->pkt_rsplen -
2647 sizeof (fcp_rsp_t) -
2648 fcpr->fcp_response_len);
2649 }
2650
2651 if (sense_sz <
2652 sp->request_sense_length) {
2653 sp->request_sense_length =
2654 sense_sz;
2655 }
2656
2657 sp->request_sense_ptr = (caddr_t)rsp;
2658
2659 sense_sz = (uint32_t)
2660 (((uintptr_t)pkt23 +
2661 sizeof (sts_entry_t)) -
2662 (uintptr_t)sts.req_sense_data);
2663 if (sp->request_sense_length <
2664 sense_sz) {
2665 sense_sz =
2666 sp->request_sense_length;
2667 }
2668
2669 fcpr->fcp_sense_len = sense_sz;
2670
2671 /* Move sense data. */
2672 ddi_rep_get8(rsp_q->rsp_ring.acc_handle,
2673 (uint8_t *)sp->request_sense_ptr,
2674 sts.req_sense_data,
2675 (size_t)sense_sz,
2676 DDI_DEV_AUTOINCR);
2677
2678 sp->request_sense_ptr += sense_sz;
2679 sp->request_sense_length -= sense_sz;
2680 if (sp->request_sense_length != 0 &&
2681 !(CFG_IST(ha, CFG_CTRL_82XX))) {
2682 rsp_q->status_srb = sp;
2683 }
2684 }
2685
2686 if (sense_sz != 0) {
2687 EL(sp->ha, "check condition sense data, "
2688 "d_id=%xh, lun=%xh\n%2xh%3xh%3xh%3xh"
2689 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
2690 "%3xh%3xh%3xh%3xh%3xh\n", tq->d_id.b24,
2691 sp->lun_queue->lun_no,
2692 sts.req_sense_data[0],
2693 sts.req_sense_data[1],
2694 sts.req_sense_data[2],
2695 sts.req_sense_data[3],
2696 sts.req_sense_data[4],
2697 sts.req_sense_data[5],
2698 sts.req_sense_data[6],
2699 sts.req_sense_data[7],
2700 sts.req_sense_data[8],
2701 sts.req_sense_data[9],
2702 sts.req_sense_data[10],
2703 sts.req_sense_data[11],
2704 sts.req_sense_data[12],
2705 sts.req_sense_data[13],
2706 sts.req_sense_data[14],
2707 sts.req_sense_data[15],
2708 sts.req_sense_data[16],
2709 sts.req_sense_data[17]);
2710 } else {
2711 EL(sp->ha, "check condition, d_id=%xh, lun=%xh"
2712 "\n", tq->d_id.b24, sp->lun_queue->lun_no);
2713 }
2714 }
2715 }
2716
2717 /* Set completed status. */
2718 sp->flags |= SRB_ISP_COMPLETED;
2719
2720 /* Place command on done queue. */
2721 if (rsp_q->status_srb == NULL) {
2722 ql_add_link_b(done_q, &sp->cmd);
2723 }
2724
2725 QL_PRINT_3(ha, "done\n");
2726
2727 return (rval);
2728 }
2729
2730 /*
2731 * ql_status_cont_entry
2732 * Processes status continuation entry.
2733 *
2734 * Input:
2735 * ha: adapter state pointer.
2736 * rsp_q: response queue structure pointer.
2737 * pkt: entry pointer.
2738 * done_q: done queue pointer.
2739 * set_flags: task daemon flags to set.
2740 * reset_flags: task daemon flags to reset.
2741 *
2742 * Context:
2743 * Interrupt or Kernel context, no mailbox commands allowed.
2744 */
2745 /* ARGSUSED */
2746 static void
2747 ql_status_cont_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
2748 sts_cont_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
2749 uint64_t *reset_flags)
2750 {
2751 uint32_t sense_sz, index;
2752 ql_srb_t *sp = rsp_q->status_srb;
2753
2754 QL_PRINT_3(ha, "started\n");
2755
2756 if (sp != NULL && sp->request_sense_length) {
2757 if (sp->request_sense_length > sizeof (pkt->req_sense_data)) {
2758 sense_sz = sizeof (pkt->req_sense_data);
2759 } else {
2760 sense_sz = sp->request_sense_length;
2761 }
2762
2763 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2764 for (index = 0; index < sense_sz; index += 4) {
2765 ql_chg_endian((uint8_t *)
2766 &pkt->req_sense_data[0] + index, 4);
2767 }
2768 }
2769
2770 /* Move sense data. */
2771 ddi_rep_get8(rsp_q->rsp_ring.acc_handle,
2772 (uint8_t *)sp->request_sense_ptr,
2773 (uint8_t *)&pkt->req_sense_data[0], (size_t)sense_sz,
2774 DDI_DEV_AUTOINCR);
2775
2776 sp->request_sense_ptr += sense_sz;
2777 sp->request_sense_length -= sense_sz;
2778
2779 /* Place command on done queue. */
2780 if (sp->request_sense_length == 0) {
2781 ql_add_link_b(done_q, &sp->cmd);
2782 rsp_q->status_srb = NULL;
2783 }
2784 }
2785
2786 QL_PRINT_3(ha, "done\n");
2787 }
2788
2789 /*
2790 * ql_ip_entry
2791 * Processes received ISP IP entry.
2792 *
2793 * Input:
2794 * ha: adapter state pointer.
2795 * rsp_q: response queue structure pointer.
2796 * pkt: entry pointer.
2797 * done_q: done queue pointer.
2798 * set_flags: task daemon flags to set.
2799 * reset_flags: task daemon flags to reset.
2800 *
2801 * Context:
2802 * Interrupt or Kernel context, no mailbox commands allowed.
2803 */
2804 /* ARGSUSED */
2805 static void
2806 ql_ip_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q, ip_entry_t *pkt23,
2807 ql_head_t *done_q, uint64_t *set_flags, uint64_t *reset_flags)
2808 {
2809 ql_srb_t *sp = NULL;
2810 uint32_t index, resp_identifier;
2811 ql_tgt_t *tq;
2812
2813 QL_PRINT_3(ha, "started\n");
2814
2815 /* Validate the response entry handle. */
2816 resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle,
2817 &pkt23->handle);
2818 index = resp_identifier & OSC_INDEX_MASK;
2819 if (index < ha->osc_max_cnt) {
2820 /* the index seems reasonable */
2821 if ((sp = ha->outstanding_cmds[index]) == NULL) {
2822 sp = ql_verify_preprocessed_cmd(ha, rsp_q,
2823 (uint32_t *)&pkt23->handle,
2824 (uint32_t *)&resp_identifier, set_flags,
2825 reset_flags);
2826 }
2827 if (sp != NULL) {
2828 if (sp == QL_ABORTED_SRB(ha)) {
2829 EL(ha, "QL_ABORTED_SRB handle=%xh\n",
2830 resp_identifier);
2831 sp = NULL;
2832 ha->outstanding_cmds[index] = NULL;
2833 } else if (sp->handle == resp_identifier) {
2834 /* Neo, you're the one... */
2835 ha->outstanding_cmds[index] = NULL;
2836 sp->handle = 0;
2837 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2838 } else {
2839 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
2840 resp_identifier, sp->handle);
2841 sp = NULL;
2842 ql_signal_abort(ha, set_flags);
2843 }
2844 }
2845 } else {
2846 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
2847 index, resp_identifier);
2848 ql_signal_abort(ha, set_flags);
2849 }
2850
2851 if (sp != NULL) {
2852 tq = sp->lun_queue->target_queue;
2853
2854 /* Set ISP completion status */
2855 if (CFG_IST(ha, CFG_CTRL_24XX)) {
2856 ip_cmd_entry_t *pkt24 = (ip_cmd_entry_t *)pkt23;
2857
2858 sp->pkt->pkt_reason = ddi_get16(
2859 rsp_q->rsp_ring.acc_handle, &pkt24->hdl_status);
2860 } else {
2861 sp->pkt->pkt_reason = ddi_get16(
2862 rsp_q->rsp_ring.acc_handle, &pkt23->comp_status);
2863 }
2864
2865 if (ha->task_daemon_flags & LOOP_DOWN) {
2866 EL(ha, "Loop Not Ready Retry, d_id=%xh\n",
2867 tq->d_id.b24);
2868
2869 /* Set retry status. */
2870 sp->flags |= SRB_RETRY;
2871
2872 } else if (tq->port_down_retry_count &&
2873 (sp->pkt->pkt_reason == CS_INCOMPLETE ||
2874 sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE ||
2875 sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
2876 sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
2877 sp->pkt->pkt_reason == CS_PORT_BUSY)) {
2878 EL(ha, "Port Down Retry=%xh, d_id=%xh, count=%d\n",
2879 sp->pkt->pkt_reason, tq->d_id.b24,
2880 tq->port_down_retry_count);
2881
2882 /* Set retry status. */
2883 sp->flags |= SRB_RETRY;
2884
2885 if (sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
2886 sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE) {
2887 ha->adapter_stats->d_stats[lobyte(
2888 tq->loop_id)].logouts_recvd++;
2889 ql_send_logo(ha, tq, done_q);
2890 }
2891
2892 /* Acquire device queue lock. */
2893 DEVICE_QUEUE_LOCK(tq);
2894
2895 if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2896 tq->flags |= TQF_QUEUE_SUSPENDED;
2897
2898 tq->port_down_retry_count--;
2899
2900 ADAPTER_STATE_LOCK(ha);
2901 if (ha->port_retry_timer == 0) {
2902 if ((ha->port_retry_timer =
2903 ha->port_down_retry_delay) == 0) {
2904 *set_flags |=
2905 PORT_RETRY_NEEDED;
2906 }
2907 }
2908 ADAPTER_STATE_UNLOCK(ha);
2909 }
2910
2911 /* Release device queue specific lock. */
2912 DEVICE_QUEUE_UNLOCK(tq);
2913
2914 } else if (sp->pkt->pkt_reason == CS_RESET) {
2915 EL(ha, "Reset Retry, d_id=%xh\n", tq->d_id.b24);
2916
2917 /* Set retry status. */
2918 sp->flags |= SRB_RETRY;
2919 } else {
2920 if (sp->pkt->pkt_reason != CS_COMPLETE) {
2921 EL(ha, "Cmplt status err=%xh, d_id=%xh\n",
2922 sp->pkt->pkt_reason, tq->d_id.b24);
2923 }
2924 }
2925
2926 /* Set completed status. */
2927 sp->flags |= SRB_ISP_COMPLETED;
2928
2929 ql_add_link_b(done_q, &sp->cmd);
2930
2931 }
2932 QL_PRINT_3(ha, "done\n");
2933 }
2934
2935 /*
2936 * ql_ip_rcv_entry
2937 * Processes received ISP IP buffers entry.
2938 *
2939 * Input:
2940 * ha: adapter state pointer.
2941 * rsp_q: response queue structure pointer.
2942 * pkt: entry pointer.
2943 * done_q: done queue pointer.
2944 * set_flags: task daemon flags to set.
2945 * reset_flags: task daemon flags to reset.
2946 *
2947 * Context:
2948 * Interrupt or Kernel context, no mailbox commands allowed.
2949 */
2950 /* ARGSUSED */
2951 static void
2952 ql_ip_rcv_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
2953 ip_rcv_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
2954 uint64_t *reset_flags)
2955 {
2956 port_id_t s_id;
2957 uint16_t index;
2958 uint8_t cnt;
2959 ql_tgt_t *tq;
2960
2961 QL_PRINT_3(ha, "started\n");
2962
2963 /* Locate device queue. */
2964 s_id.b.al_pa = pkt->s_id[0];
2965 s_id.b.area = pkt->s_id[1];
2966 s_id.b.domain = pkt->s_id[2];
2967 if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
2968 EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
2969 return;
2970 }
2971
2972 tq->ub_sequence_length = (uint16_t)ddi_get16(
2973 rsp_q->rsp_ring.acc_handle, &pkt->seq_length);
2974 tq->ub_total_seg_cnt = pkt->segment_count;
2975 tq->ub_seq_id = ++ha->ub_seq_id;
2976 tq->ub_seq_cnt = 0;
2977 tq->ub_frame_ro = 0;
2978 tq->ub_loop_id = pkt->loop_id;
2979 ha->rcv_dev_q = tq;
2980
2981 for (cnt = 0; cnt < IP_RCVBUF_HANDLES && tq->ub_seq_cnt <
2982 tq->ub_total_seg_cnt; cnt++) {
2983
2984 index = (uint16_t)ddi_get16(rsp_q->rsp_ring.acc_handle,
2985 &pkt->buffer_handle[cnt]);
2986
2987 if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2988 EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2989 *set_flags |= ISP_ABORT_NEEDED;
2990 break;
2991 }
2992 }
2993
2994 QL_PRINT_3(ha, "done\n");
2995 }
2996
2997 /*
2998 * ql_ip_rcv_cont_entry
2999 * Processes received ISP IP buffers continuation entry.
3000 *
3001 * Input:
3002 * ha: adapter state pointer.
3003 * rsp_q: response queue structure pointer.
3004 * pkt: entry pointer.
3005 * done_q: done queue pointer.
3006 * set_flags: task daemon flags to set.
3007 * reset_flags: task daemon flags to reset.
3008 *
3009 * Context:
3010 * Interrupt or Kernel context, no mailbox commands allowed.
3011 */
3012 /* ARGSUSED */
3013 static void
3014 ql_ip_rcv_cont_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
3015 ip_rcv_cont_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
3016 uint64_t *reset_flags)
3017 {
3018 uint16_t index;
3019 uint8_t cnt;
3020 ql_tgt_t *tq;
3021
3022 QL_PRINT_3(ha, "started\n");
3023
3024 if ((tq = ha->rcv_dev_q) == NULL) {
3025 EL(ha, "No IP receive device\n");
3026 return;
3027 }
3028
3029 for (cnt = 0; cnt < IP_RCVBUF_CONT_HANDLES &&
3030 tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
3031
3032 index = (uint16_t)ddi_get16(rsp_q->rsp_ring.acc_handle,
3033 &pkt->buffer_handle[cnt]);
3034
3035 if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
3036 EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
3037 *set_flags |= ISP_ABORT_NEEDED;
3038 break;
3039 }
3040 }
3041
3042 QL_PRINT_3(ha, "done\n");
3043 }
3044
3045 /*
3046 * ip_rcv_24xx_entry_t
3047 * Processes received ISP24xx IP buffers entry.
3048 *
3049 * Input:
3050 * ha: adapter state pointer.
3051 * rsp_q: response queue structure pointer.
3052 * pkt: entry pointer.
3053 * done_q: done queue pointer.
3054 * set_flags: task daemon flags to set.
3055 * reset_flags: task daemon flags to reset.
3056 *
3057 * Context:
3058 * Interrupt or Kernel context, no mailbox commands allowed.
3059 */
3060 /* ARGSUSED */
3061 static void
3062 ql_ip_24xx_rcv_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
3063 ip_rcv_24xx_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
3064 uint64_t *reset_flags)
3065 {
3066 port_id_t s_id;
3067 uint16_t index;
3068 uint8_t cnt;
3069 ql_tgt_t *tq;
3070
3071 QL_PRINT_3(ha, "started\n");
3072
3073 /* Locate device queue. */
3074 s_id.b.al_pa = pkt->s_id[0];
3075 s_id.b.area = pkt->s_id[1];
3076 s_id.b.domain = pkt->s_id[2];
3077 if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
3078 EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
3079 return;
3080 }
3081
3082 if (tq->ub_total_seg_cnt == 0) {
3083 tq->ub_sequence_length = (uint16_t)ddi_get16(
3084 rsp_q->rsp_ring.acc_handle, &pkt->seq_length);
3085 tq->ub_total_seg_cnt = pkt->segment_count;
3086 tq->ub_seq_id = ++ha->ub_seq_id;
3087 tq->ub_seq_cnt = 0;
3088 tq->ub_frame_ro = 0;
3089 tq->ub_loop_id = (uint16_t)ddi_get16(
3090 rsp_q->rsp_ring.acc_handle, &pkt->n_port_hdl);
3091 }
3092
3093 for (cnt = 0; cnt < IP_24XX_RCVBUF_HANDLES && tq->ub_seq_cnt <
3094 tq->ub_total_seg_cnt; cnt++) {
3095
3096 index = (uint16_t)ddi_get16(rsp_q->rsp_ring.acc_handle,
3097 &pkt->buffer_handle[cnt]);
3098
3099 if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
3100 EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
3101 *set_flags |= ISP_ABORT_NEEDED;
3102 break;
3103 }
3104 }
3105
3106 QL_PRINT_3(ha, "done\n");
3107 }
3108
3109 /*
3110 * ql_ms_entry
3111 * Processes received Name/Management/CT Pass-Through entry.
3112 *
3113 * Input:
3114 * ha: adapter state pointer.
3115 * rsp_q: response queue structure pointer.
3116 * pkt23: entry pointer.
3117 * done_q: done queue pointer.
3118 * set_flags: task daemon flags to set.
3119 * reset_flags: task daemon flags to reset.
3120 *
3121 * Context:
3122 * Interrupt or Kernel context, no mailbox commands allowed.
3123 */
3124 /* ARGSUSED */
3125 static void
3126 ql_ms_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q, ms_entry_t *pkt23,
3127 ql_head_t *done_q, uint64_t *set_flags, uint64_t *reset_flags)
3128 {
3129 ql_srb_t *sp = NULL;
3130 uint32_t index, cnt, resp_identifier;
3131 ql_tgt_t *tq;
3132 ct_passthru_entry_t *pkt24 = (ct_passthru_entry_t *)pkt23;
3133
3134 QL_PRINT_3(ha, "started\n");
3135
3136 /* Validate the response entry handle. */
3137 resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle,
3138 &pkt23->handle);
3139 index = resp_identifier & OSC_INDEX_MASK;
3140 if (index < ha->osc_max_cnt) {
3141 /* the index seems reasonable */
3142 if ((sp = ha->outstanding_cmds[index]) == NULL) {
3143 sp = ql_verify_preprocessed_cmd(ha, rsp_q,
3144 (uint32_t *)&pkt23->handle,
3145 (uint32_t *)&resp_identifier, set_flags,
3146 reset_flags);
3147 }
3148 if (sp != NULL) {
3149 if (sp == QL_ABORTED_SRB(ha)) {
3150 EL(ha, "QL_ABORTED_SRB handle=%xh\n",
3151 resp_identifier);
3152 sp = NULL;
3153 ha->outstanding_cmds[index] = NULL;
3154 } else if (sp->handle == resp_identifier) {
3155 /* Neo, you're the one... */
3156 ha->outstanding_cmds[index] = NULL;
3157 sp->handle = 0;
3158 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
3159 } else {
3160 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
3161 resp_identifier, sp->handle);
3162 sp = NULL;
3163 ql_signal_abort(ha, set_flags);
3164 }
3165 }
3166 } else {
3167 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
3168 index, resp_identifier);
3169 ql_signal_abort(ha, set_flags);
3170 }
3171
3172 if (sp != NULL) {
3173 if (!(sp->flags & SRB_MS_PKT)) {
3174 EL(ha, "Not SRB_MS_PKT flags=%xh, isp_abort_needed",
3175 sp->flags);
3176 *set_flags |= ISP_ABORT_NEEDED;
3177 return;
3178 }
3179
3180 tq = sp->lun_queue->target_queue;
3181
3182 /* Set ISP completion status */
3183 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
3184 sp->pkt->pkt_reason = ddi_get16(
3185 rsp_q->rsp_ring.acc_handle, &pkt24->status);
3186 } else {
3187 sp->pkt->pkt_reason = ddi_get16(
3188 rsp_q->rsp_ring.acc_handle, &pkt23->comp_status);
3189 }
3190
3191 if (sp->pkt->pkt_reason == CS_RESOUCE_UNAVAILABLE &&
3192 sp->retry_count) {
3193 EL(ha, "Resouce Unavailable Retry = %d\n",
3194 sp->retry_count);
3195
3196 /* Set retry status. */
3197 sp->retry_count--;
3198 sp->flags |= SRB_RETRY;
3199
3200 /* Acquire device queue lock. */
3201 DEVICE_QUEUE_LOCK(tq);
3202
3203 if (!(tq->flags & TQF_QUEUE_SUSPENDED)) {
3204 tq->flags |= TQF_QUEUE_SUSPENDED;
3205
3206 ADAPTER_STATE_LOCK(ha);
3207 if (ha->port_retry_timer == 0) {
3208 ha->port_retry_timer = 2;
3209 }
3210 ADAPTER_STATE_UNLOCK(ha);
3211 }
3212
3213 /* Release device queue specific lock. */
3214 DEVICE_QUEUE_UNLOCK(tq);
3215
3216 } else if (tq->port_down_retry_count &&
3217 (sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
3218 sp->pkt->pkt_reason == CS_PORT_BUSY)) {
3219 EL(ha, "Port Down Retry\n");
3220
3221 /* Set retry status. */
3222 sp->flags |= SRB_RETRY;
3223
3224 /* Acquire device queue lock. */
3225 DEVICE_QUEUE_LOCK(tq);
3226
3227 if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
3228 tq->flags |= TQF_QUEUE_SUSPENDED;
3229
3230 tq->port_down_retry_count--;
3231
3232 ADAPTER_STATE_LOCK(ha);
3233 if (ha->port_retry_timer == 0) {
3234 if ((ha->port_retry_timer =
3235 ha->port_down_retry_delay) == 0) {
3236 *set_flags |=
3237 PORT_RETRY_NEEDED;
3238 }
3239 }
3240 ADAPTER_STATE_UNLOCK(ha);
3241 }
3242 /* Release device queue specific lock. */
3243 DEVICE_QUEUE_UNLOCK(tq);
3244
3245 } else if (sp->pkt->pkt_reason == CS_RESET) {
3246 EL(ha, "Reset Retry\n");
3247
3248 /* Set retry status. */
3249 sp->flags |= SRB_RETRY;
3250
3251 } else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2) &&
3252 sp->pkt->pkt_reason == CS_DATA_UNDERRUN) {
3253 cnt = ddi_get32(rsp_q->rsp_ring.acc_handle,
3254 &pkt24->resp_byte_count);
3255 if (cnt < sizeof (fc_ct_header_t)) {
3256 EL(ha, "Data underrun\n");
3257 } else {
3258 sp->pkt->pkt_reason = CS_COMPLETE;
3259 }
3260
3261 } else if (sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE ||
3262 sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT) {
3263 EL(ha, "Port unavailable %xh\n", sp->pkt->pkt_reason);
3264 DEVICE_QUEUE_LOCK(tq);
3265 tq->flags |= TQF_LOGIN_NEEDED;
3266 DEVICE_QUEUE_UNLOCK(tq);
3267 sp->pkt->pkt_reason = CS_TIMEOUT;
3268
3269 } else if (sp->pkt->pkt_reason != CS_COMPLETE) {
3270 EL(ha, "status err=%xh\n", sp->pkt->pkt_reason);
3271 }
3272
3273 if (sp->pkt->pkt_reason == CS_COMPLETE) {
3274 /*EMPTY*/
3275 QL_PRINT_3(ha, "ct_cmdrsp=%x%02xh resp\n",
3276 sp->pkt->pkt_cmd[8], sp->pkt->pkt_cmd[9]);
3277 QL_DUMP_3(sp->pkt->pkt_resp, 8, sp->pkt->pkt_rsplen);
3278 }
3279
3280 /* For nameserver restore command, management change header. */
3281 if ((sp->flags & SRB_RETRY) == 0) {
3282 tq->d_id.b24 == FS_NAME_SERVER ?
3283 ql_cthdr_endian(sp->pkt->pkt_cmd_acc,
3284 sp->pkt->pkt_cmd, B_TRUE) :
3285 ql_cthdr_endian(sp->pkt->pkt_resp_acc,
3286 sp->pkt->pkt_resp, B_TRUE);
3287 }
3288
3289 /* Set completed status. */
3290 sp->flags |= SRB_ISP_COMPLETED;
3291
3292 /* Place command on done queue. */
3293 ql_add_link_b(done_q, &sp->cmd);
3294
3295 }
3296 QL_PRINT_3(ha, "done\n");
3297 }
3298
3299 /*
3300 * ql_report_id_entry
3301 * Processes received Name/Management/CT Pass-Through entry.
3302 *
3303 * Input:
3304 * ha: adapter state pointer.
3305 * rsp_q: response queue structure pointer.
3306 * pkt: entry pointer.
3307 * done_q: done queue pointer.
3308 * set_flags: task daemon flags to set.
3309 * reset_flags: task daemon flags to reset.
3310 *
3311 * Context:
3312 * Interrupt or Kernel context, no mailbox commands allowed.
3313 */
3314 /* ARGSUSED */
3315 static void
3316 ql_report_id_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
3317 report_id_acq_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
3318 uint64_t *reset_flags)
3319 {
3320 ql_adapter_state_t *vha;
3321
3322 QL_PRINT_3(ha, "started\n");
3323
3324 EL(ha, "format=%d, index=%d, status=%d\n",
3325 pkt->format, pkt->vp_index, pkt->vp_status);
3326
3327 if (pkt->format == 1) {
3328 /* Locate port state structure. */
3329 for (vha = ha; vha != NULL; vha = vha->vp_next) {
3330 if (vha->vp_index == pkt->vp_index) {
3331 break;
3332 }
3333 }
3334 if (vha != NULL) {
3335 if (pkt->vp_status == CS_COMPLETE ||
3336 pkt->vp_status == CS_PORT_ID_CHANGE) {
3337 if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
3338 vha->fcoe_fcf_idx = pkt->fcf_index;
3339 }
3340 if (vha->vp_index != 0) {
3341 *set_flags |= LOOP_RESYNC_NEEDED;
3342 *reset_flags &= ~LOOP_RESYNC_NEEDED;
3343 vha->loop_down_timer =
3344 LOOP_DOWN_TIMER_OFF;
3345 TASK_DAEMON_LOCK(ha);
3346 vha->task_daemon_flags |=
3347 LOOP_RESYNC_NEEDED;
3348 vha->task_daemon_flags &= ~LOOP_DOWN;
3349 TASK_DAEMON_UNLOCK(ha);
3350 }
3351 ADAPTER_STATE_LOCK(ha);
3352 vha->flags &= ~VP_ID_NOT_ACQUIRED;
3353 ADAPTER_STATE_UNLOCK(ha);
3354 } else {
3355 /* FA-WWPN failure. */
3356 if (pkt->vp_status == CS_INCOMPLETE &&
3357 pkt->ls_rjt_reason_code == 0xff &&
3358 pkt->ls_rjt_explanation == 0x44) {
3359 *set_flags |= ISP_ABORT_NEEDED;
3360 }
3361 if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
3362 EL(ha, "sts sc=%d, rjt_rea=%xh, "
3363 "rjt_exp=%xh, rjt_sc=%xh\n",
3364 pkt->status_subcode,
3365 pkt->ls_rjt_reason_code,
3366 pkt->ls_rjt_explanation,
3367 pkt->ls_rjt_subcode);
3368 }
3369 ADAPTER_STATE_LOCK(ha);
3370 vha->flags |= VP_ID_NOT_ACQUIRED;
3371 ADAPTER_STATE_UNLOCK(ha);
3372 }
3373 }
3374 }
3375
3376 QL_PRINT_3(ha, "done\n");
3377 }
3378
3379 /*
3380 * ql_els_entry
3381 * Processes received ELS Pass-Through entry.
3382 *
3383 * Input:
3384 * ha: adapter state pointer.
3385 * rsp_q: response queue structure pointer.
3386 * pkt23: entry pointer.
3387 * done_q: done queue pointer.
3388 * set_flags: task daemon flags to set.
3389 * reset_flags: task daemon flags to reset.
3390 *
3391 * Context:
3392 * Interrupt or Kernel context, no mailbox commands allowed.
3393 */
3394 /* ARGSUSED */
3395 static void
3396 ql_els_passthru_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
3397 els_passthru_entry_rsp_t *rsp, ql_head_t *done_q, uint64_t *set_flags,
3398 uint64_t *reset_flags)
3399 {
3400 ql_tgt_t *tq;
3401 port_id_t s_id;
3402 ql_srb_t *srb = NULL;
3403 uint32_t index, resp_identifier;
3404
3405 QL_PRINT_3(ha, "started\n");
3406
3407 /* Validate the response entry handle. */
3408 resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle, &rsp->handle);
3409 index = resp_identifier & OSC_INDEX_MASK;
3410 if (index < ha->osc_max_cnt) {
3411 /* the index seems reasonable */
3412 if ((srb = ha->outstanding_cmds[index]) == NULL) {
3413 srb = ql_verify_preprocessed_cmd(ha, rsp_q,
3414 (uint32_t *)&rsp->handle,
3415 (uint32_t *)&resp_identifier, set_flags,
3416 reset_flags);
3417 }
3418 if (srb != NULL) {
3419 if (srb == QL_ABORTED_SRB(ha)) {
3420 EL(ha, "QL_ABORTED_SRB handle=%xh\n",
3421 resp_identifier);
3422 srb = NULL;
3423 ha->outstanding_cmds[index] = NULL;
3424 } else if (srb->handle == resp_identifier) {
3425 /* Neo, you're the one... */
3426 ha->outstanding_cmds[index] = NULL;
3427 srb->handle = 0;
3428 srb->flags &= ~SRB_IN_TOKEN_ARRAY;
3429 } else {
3430 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
3431 resp_identifier, srb->handle);
3432 srb = NULL;
3433 ql_signal_abort(ha, set_flags);
3434 }
3435 }
3436 } else {
3437 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
3438 index, resp_identifier);
3439 ql_signal_abort(ha, set_flags);
3440 }
3441
3442 if (srb != NULL) {
3443 if (!(srb->flags & SRB_ELS_PKT)) {
3444 EL(ha, "Not SRB_ELS_PKT flags=%xh, isp_abort_needed\n",
3445 srb->flags);
3446 *set_flags |= ISP_ABORT_NEEDED;
3447 return;
3448 }
3449
3450 (void) ddi_dma_sync(srb->pkt->pkt_resp_dma, 0, 0,
3451 DDI_DMA_SYNC_FORKERNEL);
3452
3453 /* Set ISP completion status */
3454 srb->pkt->pkt_reason = ddi_get16(rsp_q->rsp_ring.acc_handle,
3455 &rsp->comp_status);
3456
3457 if (srb->pkt->pkt_reason != CS_COMPLETE) {
3458 la_els_rjt_t rjt;
3459
3460 EL(ha, "srb=%ph,status err=%xh\n",
3461 srb, srb->pkt->pkt_reason);
3462
3463 if (srb->pkt->pkt_reason == CS_LOGIN_LOGOUT_ERROR) {
3464 EL(ha, "e1=%xh e2=%xh\n",
3465 rsp->error_subcode1, rsp->error_subcode2);
3466 }
3467
3468 srb->pkt->pkt_state = FC_PKT_TRAN_ERROR;
3469
3470 /* Build RJT in the response. */
3471 rjt.ls_code.ls_code = LA_ELS_RJT;
3472 rjt.reason = FC_REASON_NO_CONNECTION;
3473
3474 ddi_rep_put8(srb->pkt->pkt_resp_acc, (uint8_t *)&rjt,
3475 (uint8_t *)srb->pkt->pkt_resp,
3476 sizeof (rjt), DDI_DEV_AUTOINCR);
3477
3478 srb->pkt->pkt_state = FC_PKT_TRAN_ERROR;
3479 srb->pkt->pkt_reason = FC_REASON_NO_CONNECTION;
3480 }
3481
3482 if (srb->pkt->pkt_reason == CS_COMPLETE) {
3483 uint8_t opcode;
3484 uint16_t loop_id;
3485
3486 /* Indicate ISP completion */
3487 srb->flags |= SRB_ISP_COMPLETED;
3488
3489 loop_id = ddi_get16(rsp_q->rsp_ring.acc_handle,
3490 &rsp->n_port_hdl);
3491
3492 /* tq is obtained from lun_queue */
3493 tq = srb->lun_queue->target_queue;
3494
3495 if (ha->topology & QL_N_PORT) {
3496 /* on plogi success assume the chosen s_id */
3497 opcode = ddi_get8(rsp_q->rsp_ring.acc_handle,
3498 &rsp->els_cmd_opcode);
3499
3500 EL(ha, "els opcode=%x srb=%ph,pkt=%ph, tq=%ph"
3501 ", portid=%xh, tqlpid=%xh, loop_id=%xh\n",
3502 opcode, srb, srb->pkt, tq, tq->d_id.b24,
3503 tq->loop_id, loop_id);
3504
3505 if (opcode == LA_ELS_PLOGI) {
3506 s_id.b.al_pa = rsp->s_id_7_0;
3507 s_id.b.area = rsp->s_id_15_8;
3508 s_id.b.domain = rsp->s_id_23_16;
3509
3510 ha->d_id.b24 = s_id.b24;
3511 EL(ha, "Set port's source ID %xh\n",
3512 ha->d_id.b24);
3513 }
3514 }
3515 ql_isp_els_handle_rsp_endian(ha, srb);
3516
3517 if (ha != srb->ha) {
3518 EL(ha, "ha=%x srb->ha=%x\n", ha, srb->ha);
3519 }
3520
3521 if (tq != NULL) {
3522 tq->logout_sent = 0;
3523 tq->flags &= ~TQF_NEED_AUTHENTICATION;
3524
3525 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
3526 tq->flags |= TQF_IIDMA_NEEDED;
3527 }
3528 srb->pkt->pkt_state = FC_PKT_SUCCESS;
3529 }
3530 }
3531
3532 /* Remove command from watchdog queue. */
3533 if (srb->flags & SRB_WATCHDOG_ENABLED) {
3534 tq = srb->lun_queue->target_queue;
3535
3536 DEVICE_QUEUE_LOCK(tq);
3537 ql_remove_link(&tq->wdg, &srb->wdg);
3538 srb->flags &= ~SRB_WATCHDOG_ENABLED;
3539 DEVICE_QUEUE_UNLOCK(tq);
3540 }
3541
3542 /* invoke the callback */
3543 ql_io_comp(srb);
3544 }
3545 QL_PRINT_3(ha, "done\n");
3546 }
3547
3548 /*
3549 * ql_signal_abort
3550 * Signal to the task daemon that a condition warranting an
3551 * isp reset has been detected.
3552 *
3553 * Input:
3554 * ha: adapter state pointer.
3555 * set_flags: task daemon flags to set.
3556 *
3557 * Context:
3558 * Interrupt or Kernel context, no mailbox commands allowed.
3559 */
3560 static void
3561 ql_signal_abort(ql_adapter_state_t *ha, uint64_t *set_flags)
3562 {
3563 if (!CFG_IST(ha, CFG_CTRL_82XX) &&
3564 !(ha->task_daemon_flags & (ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE))) {
3565 *set_flags |= ISP_ABORT_NEEDED;
3566 }
3567 }