2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright 2010 QLogic Corporation */
23
24 /*
25 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26 */
27
28 #pragma ident "Copyright 2010 QLogic Corporation; ql_isr.c"
29
30 /*
31 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
32 *
33 * ***********************************************************************
34 * * **
35 * * NOTICE **
36 * * COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION **
37 * * ALL RIGHTS RESERVED **
38 * * **
39 * ***********************************************************************
40 *
41 */
42
43 #include <ql_apps.h>
44 #include <ql_api.h>
45 #include <ql_debug.h>
46 #include <ql_iocb.h>
47 #include <ql_isr.h>
48 #include <ql_init.h>
49 #include <ql_mbx.h>
50 #include <ql_nx.h>
51 #include <ql_xioctl.h>
52
53 /*
54 * Local Function Prototypes.
55 */
56 static void ql_handle_uncommon_risc_intr(ql_adapter_state_t *, uint32_t,
57 uint32_t *);
58 static void ql_spurious_intr(ql_adapter_state_t *, int);
59 static void ql_mbx_completion(ql_adapter_state_t *, uint16_t, uint32_t *,
60 uint32_t *, int);
61 static void ql_async_event(ql_adapter_state_t *, uint32_t, ql_head_t *,
62 uint32_t *, uint32_t *, int);
63 static void ql_fast_fcp_post(ql_srb_t *);
64 static void ql_response_pkt(ql_adapter_state_t *, ql_head_t *, uint32_t *,
65 uint32_t *, int);
66 static void ql_error_entry(ql_adapter_state_t *, response_t *, ql_head_t *,
67 uint32_t *, uint32_t *);
68 static int ql_status_entry(ql_adapter_state_t *, sts_entry_t *, ql_head_t *,
69 uint32_t *, uint32_t *);
70 static int ql_24xx_status_entry(ql_adapter_state_t *, sts_24xx_entry_t *,
71 ql_head_t *, uint32_t *, uint32_t *);
72 static int ql_status_error(ql_adapter_state_t *, ql_srb_t *, sts_entry_t *,
73 ql_head_t *, uint32_t *, uint32_t *);
74 static void ql_status_cont_entry(ql_adapter_state_t *, sts_cont_entry_t *,
75 ql_head_t *, uint32_t *, uint32_t *);
76 static void ql_ip_entry(ql_adapter_state_t *, ip_entry_t *, ql_head_t *,
77 uint32_t *, uint32_t *);
78 static void ql_ip_rcv_entry(ql_adapter_state_t *, ip_rcv_entry_t *,
79 ql_head_t *, uint32_t *, uint32_t *);
80 static void ql_ip_rcv_cont_entry(ql_adapter_state_t *,
81 ip_rcv_cont_entry_t *, ql_head_t *, uint32_t *, uint32_t *);
82 static void ql_ip_24xx_rcv_entry(ql_adapter_state_t *, ip_rcv_24xx_entry_t *,
83 ql_head_t *, uint32_t *, uint32_t *);
84 static void ql_ms_entry(ql_adapter_state_t *, ms_entry_t *, ql_head_t *,
85 uint32_t *, uint32_t *);
86 static void ql_report_id_entry(ql_adapter_state_t *, report_id_1_t *,
87 ql_head_t *, uint32_t *, uint32_t *);
88 static void ql_els_passthru_entry(ql_adapter_state_t *,
89 els_passthru_entry_rsp_t *, ql_head_t *, uint32_t *, uint32_t *);
90 static ql_srb_t *ql_verify_preprocessed_cmd(ql_adapter_state_t *, uint32_t *,
91 uint32_t *, uint32_t *);
92 static void ql_signal_abort(ql_adapter_state_t *ha, uint32_t *set_flags);
93
94 /*
95 * Spurious interrupt counter
96 */
97 uint32_t ql_spurious_cnt = 4;
98 uint32_t ql_max_intr_loop = 16;
99
100 /*
101 * ql_isr
102 * Process all INTX intr types.
103 *
104 * Input:
105 * arg1: adapter state pointer.
106 *
107 * Returns:
108 * DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
109 *
110 * Context:
111 * Interrupt or Kernel context, no mailbox commands allowed.
112 */
113 /* ARGSUSED */
114 uint_t
115 ql_isr(caddr_t arg1)
116 {
117 return (ql_isr_aif(arg1, 0));
118 }
119
120 /*
121 * ql_isr_default
122 * Process unknown/unvectored intr types
123 *
124 * Input:
125 * arg1: adapter state pointer.
126 * arg2: interrupt vector.
127 *
128 * Returns:
129 * DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
130 *
131 * Context:
132 * Interrupt or Kernel context, no mailbox commands allowed.
133 */
134 /* ARGSUSED */
135 uint_t
136 ql_isr_default(caddr_t arg1, caddr_t arg2)
137 {
138 ql_adapter_state_t *ha = (void *)arg1;
139
140 EL(ha, "isr_default called: idx=%x\n", arg2);
141 return (ql_isr_aif(arg1, arg2));
142 }
143
144 /*
145 * ql_isr_aif
146 * Process mailbox and I/O command completions.
147 *
148 * Input:
149 * arg: adapter state pointer.
150 * intvec: interrupt vector.
151 *
152 * Returns:
153 * DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
154 *
155 * Context:
156 * Interrupt or Kernel context, no mailbox commands allowed.
157 */
158 /* ARGSUSED */
159 uint_t
160 ql_isr_aif(caddr_t arg, caddr_t intvec)
161 {
162 uint16_t mbx;
163 uint32_t stat;
164 ql_adapter_state_t *ha = (void *)arg;
165 uint32_t set_flags = 0;
166 uint32_t reset_flags = 0;
167 ql_head_t isr_done_q = {NULL, NULL};
168 uint_t rval = DDI_INTR_UNCLAIMED;
169 int spurious_intr = 0;
170 boolean_t intr = B_FALSE, daemon = B_FALSE;
171 int intr_loop = 4;
172 boolean_t clear_spurious = B_TRUE;
173
174 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
175
176 QL_PM_LOCK(ha);
177 if (ha->power_level != PM_LEVEL_D0) {
178 /*
179 * Looks like we are about to go down soon, exit early.
180 */
181 QL_PM_UNLOCK(ha);
182 QL_PRINT_3(CE_CONT, "(%d): power down exit\n", ha->instance);
183 return (DDI_INTR_UNCLAIMED);
184 }
185 ha->busy++;
186 QL_PM_UNLOCK(ha);
187
188 /* Acquire interrupt lock. */
189 INTR_LOCK(ha);
190
191 if (CFG_IST(ha, CFG_CTRL_2200)) {
192 while (RD16_IO_REG(ha, istatus) & RISC_INT) {
193 /* Reset idle timer. */
194 ha->idle_timer = 0;
195 rval = DDI_INTR_CLAIMED;
196 if (intr_loop) {
197 intr_loop--;
198 }
199
200 /* Special Fast Post 2200. */
201 stat = 0;
202 if (ha->task_daemon_flags & FIRMWARE_LOADED &&
203 ha->flags & ONLINE) {
204 ql_srb_t *sp;
205
206 mbx = RD16_IO_REG(ha, mailbox_out[23]);
207
208 if ((mbx & 3) == MBX23_SCSI_COMPLETION) {
209 /* Release mailbox registers. */
210 WRT16_IO_REG(ha, semaphore, 0);
211
212 if (intr_loop) {
213 WRT16_IO_REG(ha, hccr,
214 HC_CLR_RISC_INT);
215 }
216
217 /* Get handle. */
218 mbx >>= 4;
219 stat = mbx & OSC_INDEX_MASK;
220
221 /* Validate handle. */
222 sp = stat < MAX_OUTSTANDING_COMMANDS ?
223 ha->outstanding_cmds[stat] : NULL;
224
225 if (sp != NULL && (sp->handle & 0xfff)
226 == mbx) {
227 ha->outstanding_cmds[stat] =
228 NULL;
229 sp->handle = 0;
230 sp->flags &=
231 ~SRB_IN_TOKEN_ARRAY;
232
233 /* Set completed status. */
234 sp->flags |= SRB_ISP_COMPLETED;
235
236 /* Set completion status */
237 sp->pkt->pkt_reason =
238 CS_COMPLETE;
239
240 ql_fast_fcp_post(sp);
241 } else if (mbx !=
242 (QL_FCA_BRAND & 0xfff)) {
243 if (sp == NULL) {
244 EL(ha, "unknown IOCB"
245 " handle=%xh\n",
246 mbx);
247 } else {
248 EL(ha, "mismatch IOCB"
249 " handle pkt=%xh, "
250 "sp=%xh\n", mbx,
251 sp->handle & 0xfff);
252 }
253
254 (void) ql_binary_fw_dump(ha,
255 FALSE);
256
257 if (!(ha->task_daemon_flags &
258 (ISP_ABORT_NEEDED |
259 ABORT_ISP_ACTIVE))) {
260 EL(ha, "ISP Invalid "
261 "handle, "
262 "isp_abort_needed"
263 "\n");
264 set_flags |=
265 ISP_ABORT_NEEDED;
266 }
267 }
268 }
269 }
270
271 if (stat == 0) {
272 /* Check for mailbox interrupt. */
273 mbx = RD16_IO_REG(ha, semaphore);
274 if (mbx & BIT_0) {
275 /* Release mailbox registers. */
276 WRT16_IO_REG(ha, semaphore, 0);
277
278 /* Get mailbox data. */
279 mbx = RD16_IO_REG(ha, mailbox_out[0]);
280 if (mbx > 0x3fff && mbx < 0x8000) {
281 ql_mbx_completion(ha, mbx,
282 &set_flags, &reset_flags,
283 intr_loop);
284 } else if (mbx > 0x7fff &&
285 mbx < 0xc000) {
286 ql_async_event(ha, mbx,
287 &isr_done_q, &set_flags,
288 &reset_flags, intr_loop);
289 } else {
290 EL(ha, "UNKNOWN interrupt "
291 "type\n");
292 intr = B_TRUE;
293 }
294 } else {
295 ha->isp_rsp_index = RD16_IO_REG(ha,
296 resp_in);
297
298 if (ha->isp_rsp_index !=
299 ha->rsp_ring_index) {
300 ql_response_pkt(ha,
301 &isr_done_q, &set_flags,
302 &reset_flags, intr_loop);
303 } else if (++spurious_intr ==
304 MAX_SPURIOUS_INTR) {
305 /*
306 * Process excessive
307 * spurious intrrupts
308 */
309 ql_spurious_intr(ha,
310 intr_loop);
311 EL(ha, "excessive spurious "
312 "interrupts, "
313 "isp_abort_needed\n");
314 set_flags |= ISP_ABORT_NEEDED;
315 } else {
316 intr = B_TRUE;
317 }
318 }
319 }
320
321 /* Clear RISC interrupt */
322 if (intr || intr_loop == 0) {
323 intr = B_FALSE;
324 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
325 }
326
327 if (set_flags != 0 || reset_flags != 0) {
328 TASK_DAEMON_LOCK(ha);
329 ha->task_daemon_flags |= set_flags;
330 ha->task_daemon_flags &= ~reset_flags;
331 TASK_DAEMON_UNLOCK(ha);
332 set_flags = 0;
333 reset_flags = 0;
334 daemon = B_TRUE;
335 }
336 }
337 } else {
338 uint32_t ql_max_intr_loop_cnt = 0;
339
340 if (CFG_IST(ha, CFG_CTRL_8021)) {
341 ql_8021_clr_hw_intr(ha);
342 intr_loop = 1;
343 }
344 while (((stat = RD32_IO_REG(ha, risc2host)) & RH_RISC_INT) &&
345 (++ql_max_intr_loop_cnt < ql_max_intr_loop)) {
346
347 clear_spurious = B_TRUE; /* assume ok */
348
349 /* Capture FW defined interrupt info */
350 mbx = MSW(stat);
351
352 /* Reset idle timer. */
353 ha->idle_timer = 0;
354 rval = DDI_INTR_CLAIMED;
355
356 if (CFG_IST(ha, CFG_CTRL_8021) &&
357 (RD32_IO_REG(ha, nx_risc_int) == 0 ||
358 intr_loop == 0)) {
359 break;
360 }
361
362 if (intr_loop) {
363 intr_loop--;
364 }
365
366 switch (stat & 0x1ff) {
367 case ROM_MBX_SUCCESS:
368 case ROM_MBX_ERR:
369 ql_mbx_completion(ha, mbx, &set_flags,
370 &reset_flags, intr_loop);
371
372 /* Release mailbox registers. */
373 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
374 WRT16_IO_REG(ha, semaphore, 0);
375 }
376 break;
377
378 case MBX_SUCCESS:
379 case MBX_ERR:
380 /* Sun FW, Release mailbox registers. */
381 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
382 WRT16_IO_REG(ha, semaphore, 0);
383 }
384 ql_mbx_completion(ha, mbx, &set_flags,
385 &reset_flags, intr_loop);
386 break;
387
388 case ASYNC_EVENT:
389 /* Sun FW, Release mailbox registers. */
390 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
391 WRT16_IO_REG(ha, semaphore, 0);
392 }
393 ql_async_event(ha, (uint32_t)mbx, &isr_done_q,
394 &set_flags, &reset_flags, intr_loop);
395 break;
396
397 case RESP_UPDATE:
398 if (mbx != ha->rsp_ring_index) {
399 ha->isp_rsp_index = mbx;
400 ql_response_pkt(ha, &isr_done_q,
401 &set_flags, &reset_flags,
402 intr_loop);
403 } else if (++spurious_intr ==
404 ql_spurious_cnt) {
405 /* Process excessive spurious intr. */
406 ql_spurious_intr(ha, intr_loop);
407 EL(ha, "excessive spurious "
408 "interrupts, isp_abort_needed\n");
409 set_flags |= ISP_ABORT_NEEDED;
410 clear_spurious = B_FALSE;
411 } else {
412 QL_PRINT_10(CE_CONT, "(%d): response "
413 "ring index same as before\n",
414 ha->instance);
415 intr = B_TRUE;
416 clear_spurious = B_FALSE;
417 }
418 break;
419
420 case SCSI_FAST_POST_16:
421 stat = (stat & 0xffff0000) | MBA_CMPLT_1_16BIT;
422 ql_async_event(ha, stat, &isr_done_q,
423 &set_flags, &reset_flags, intr_loop);
424 break;
425
426 case SCSI_FAST_POST_32:
427 stat = (stat & 0xffff0000) | MBA_CMPLT_1_32BIT;
428 ql_async_event(ha, stat, &isr_done_q,
429 &set_flags, &reset_flags, intr_loop);
430 break;
431
432 case CTIO_FAST_POST:
433 stat = (stat & 0xffff0000) |
434 MBA_CTIO_COMPLETION;
435 ql_async_event(ha, stat, &isr_done_q,
436 &set_flags, &reset_flags, intr_loop);
437 break;
438
439 case IP_FAST_POST_XMT:
440 stat = (stat & 0xffff0000) | MBA_IP_COMPLETION;
441 ql_async_event(ha, stat, &isr_done_q,
442 &set_flags, &reset_flags, intr_loop);
443 break;
444
445 case IP_FAST_POST_RCV:
446 stat = (stat & 0xffff0000) | MBA_IP_RECEIVE;
447 ql_async_event(ha, stat, &isr_done_q,
448 &set_flags, &reset_flags, intr_loop);
449 break;
450
451 case IP_FAST_POST_BRD:
452 stat = (stat & 0xffff0000) | MBA_IP_BROADCAST;
453 ql_async_event(ha, stat, &isr_done_q,
454 &set_flags, &reset_flags, intr_loop);
455 break;
456
457 case IP_FAST_POST_RCV_ALN:
458 stat = (stat & 0xffff0000) |
459 MBA_IP_HDR_DATA_SPLIT;
460 ql_async_event(ha, stat, &isr_done_q,
461 &set_flags, &reset_flags, intr_loop);
462 break;
463
464 case ATIO_UPDATE:
465 EL(ha, "unsupported ATIO queue update"
466 " interrupt, status=%xh\n", stat);
467 intr = B_TRUE;
468 break;
469
470 case ATIO_RESP_UPDATE:
471 EL(ha, "unsupported ATIO response queue "
472 "update interrupt, status=%xh\n", stat);
473 intr = B_TRUE;
474 break;
475
476 default:
477 ql_handle_uncommon_risc_intr(ha, stat,
478 &set_flags);
479 intr = B_TRUE;
480 break;
481 }
482
483 /* Clear RISC interrupt */
484 if (intr || intr_loop == 0) {
485 intr = B_FALSE;
486 if (CFG_IST(ha, CFG_CTRL_8021)) {
487 ql_8021_clr_fw_intr(ha);
488 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
489 WRT32_IO_REG(ha, hccr,
490 HC24_CLR_RISC_INT);
491 } else {
492 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
493 }
494 }
495
496 if (set_flags != 0 || reset_flags != 0) {
497 TASK_DAEMON_LOCK(ha);
498 ha->task_daemon_flags |= set_flags;
499 ha->task_daemon_flags &= ~reset_flags;
500 TASK_DAEMON_UNLOCK(ha);
501 set_flags = 0;
502 reset_flags = 0;
503 daemon = B_TRUE;
504 }
505
506 if (ha->flags & PARITY_ERROR) {
507 EL(ha, "parity/pause exit\n");
508 mbx = RD16_IO_REG(ha, hccr); /* PCI posting */
509 break;
510 }
511
512 if (clear_spurious) {
513 spurious_intr = 0;
514 }
515 }
516 }
517
518 /* Process claimed interrupts during polls. */
519 if (rval == DDI_INTR_UNCLAIMED && ha->intr_claimed == B_TRUE) {
520 ha->intr_claimed = B_FALSE;
521 rval = DDI_INTR_CLAIMED;
522 }
523
524 /* Release interrupt lock. */
525 INTR_UNLOCK(ha);
526
527 if (daemon) {
528 ql_awaken_task_daemon(ha, NULL, 0, 0);
529 }
530
531 if (isr_done_q.first != NULL) {
532 ql_done(isr_done_q.first);
533 }
534
535 if (rval == DDI_INTR_CLAIMED) {
536 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
537 ha->xioctl->TotalInterrupts++;
538 } else {
539 /*EMPTY*/
540 QL_PRINT_3(CE_CONT, "(%d): interrupt not claimed\n",
541 ha->instance);
542 }
543
544 QL_PM_LOCK(ha);
545 ha->busy--;
546 QL_PM_UNLOCK(ha);
547
548 return (rval);
549 }
550
551 /*
552 * ql_handle_uncommon_risc_intr
553 * Handle an uncommon RISC interrupt.
554 *
555 * Input:
556 * ha: adapter state pointer.
557 * stat: interrupt status
558 *
559 * Context:
560 * Interrupt or Kernel context, no mailbox commands allowed.
561 */
562 static void
563 ql_handle_uncommon_risc_intr(ql_adapter_state_t *ha, uint32_t stat,
564 uint32_t *set_flags)
565 {
566 uint16_t hccr_reg;
567
568 hccr_reg = RD16_IO_REG(ha, hccr);
569
570 if (stat & RH_RISC_PAUSED ||
571 (hccr_reg & (BIT_15 | BIT_13 | BIT_11 | BIT_8))) {
572
573 ADAPTER_STATE_LOCK(ha);
574 ha->flags |= PARITY_ERROR;
575 ADAPTER_STATE_UNLOCK(ha);
576
577 if (ha->parity_pause_errors == 0 ||
578 ha->parity_hccr_err != hccr_reg ||
579 ha->parity_stat_err != stat) {
580 cmn_err(CE_WARN, "qlc(%d): isr, Internal Parity/"
581 "Pause Error - hccr=%xh, stat=%xh, count=%d",
582 ha->instance, hccr_reg, stat,
583 ha->parity_pause_errors);
584 ha->parity_hccr_err = hccr_reg;
585 ha->parity_stat_err = stat;
586 }
587
588 EL(ha, "parity/pause error, isp_abort_needed\n");
589
590 if (ql_binary_fw_dump(ha, FALSE) != QL_SUCCESS) {
591 ql_reset_chip(ha);
592 }
593
594 if (ha->parity_pause_errors == 0) {
595 ha->log_parity_pause = B_TRUE;
596 }
597
598 if (ha->parity_pause_errors < 0xffffffff) {
599 ha->parity_pause_errors++;
600 }
601
602 *set_flags |= ISP_ABORT_NEEDED;
603
604 /* Disable ISP interrupts. */
605 CFG_IST(ha, CFG_CTRL_8021) ? ql_8021_disable_intrs(ha) :
606 WRT16_IO_REG(ha, ictrl, 0);
607 ADAPTER_STATE_LOCK(ha);
608 ha->flags &= ~INTERRUPTS_ENABLED;
609 ADAPTER_STATE_UNLOCK(ha);
610 } else {
611 EL(ha, "UNKNOWN interrupt status=%xh, hccr=%xh\n",
612 stat, hccr_reg);
613 }
614 }
615
616 /*
617 * ql_spurious_intr
618 * Inform Solaris of spurious interrupts.
619 *
620 * Input:
621 * ha: adapter state pointer.
622 * intr_clr: early interrupt clear
623 *
624 * Context:
625 * Interrupt or Kernel context, no mailbox commands allowed.
626 */
627 static void
628 ql_spurious_intr(ql_adapter_state_t *ha, int intr_clr)
629 {
630 ddi_devstate_t state;
631
632 EL(ha, "Spurious interrupt\n");
633
634 /* Disable ISP interrupts. */
635 WRT16_IO_REG(ha, ictrl, 0);
636 ADAPTER_STATE_LOCK(ha);
637 ha->flags &= ~INTERRUPTS_ENABLED;
638 ADAPTER_STATE_UNLOCK(ha);
639
640 /* Clear RISC interrupt */
641 if (intr_clr) {
642 if (CFG_IST(ha, CFG_CTRL_8021)) {
643 ql_8021_clr_fw_intr(ha);
644 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
645 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
646 } else {
647 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
648 }
649 }
650
651 state = ddi_get_devstate(ha->dip);
652 if (state == DDI_DEVSTATE_UP) {
653 /*EMPTY*/
654 ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
655 DDI_DEVICE_FAULT, "spurious interrupts");
656 }
657 }
658
659 /*
660 * ql_mbx_completion
661 * Processes mailbox completions.
662 *
663 * Input:
664 * ha: adapter state pointer.
665 * mb0: Mailbox 0 contents.
666 * set_flags: task daemon flags to set.
667 * reset_flags: task daemon flags to reset.
668 * intr_clr: early interrupt clear
669 *
670 * Context:
671 * Interrupt context.
672 */
673 /* ARGSUSED */
674 static void
675 ql_mbx_completion(ql_adapter_state_t *ha, uint16_t mb0, uint32_t *set_flags,
676 uint32_t *reset_flags, int intr_clr)
677 {
678 uint32_t index;
679 uint16_t cnt;
680
681 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
682
683 /* Load return mailbox registers. */
684 MBX_REGISTER_LOCK(ha);
685
686 if (ha->mcp != NULL) {
687 ha->mcp->mb[0] = mb0;
688 index = ha->mcp->in_mb & ~MBX_0;
689
690 for (cnt = 1; cnt < MAX_MBOX_COUNT && index != 0; cnt++) {
691 index >>= 1;
692 if (index & MBX_0) {
693 ha->mcp->mb[cnt] = RD16_IO_REG(ha,
694 mailbox_out[cnt]);
695 }
696 }
697
698 } else {
699 EL(ha, "mcp == NULL\n");
700 }
701
702 if (intr_clr) {
703 /* Clear RISC interrupt. */
704 if (CFG_IST(ha, CFG_CTRL_8021)) {
705 ql_8021_clr_fw_intr(ha);
706 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
707 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
708 } else {
709 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
710 }
711 }
712
713 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_INTERRUPT);
714 if (ha->flags & INTERRUPTS_ENABLED) {
715 cv_broadcast(&ha->cv_mbx_intr);
716 }
717
718 MBX_REGISTER_UNLOCK(ha);
719
720 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
721 }
722
723 /*
724 * ql_async_event
725 * Processes asynchronous events.
726 *
727 * Input:
728 * ha: adapter state pointer.
729 * mbx: Mailbox 0 register.
730 * done_q: head pointer to done queue.
731 * set_flags: task daemon flags to set.
732 * reset_flags: task daemon flags to reset.
733 * intr_clr: early interrupt clear
734 *
735 * Context:
736 * Interrupt or Kernel context, no mailbox commands allowed.
737 */
738 static void
739 ql_async_event(ql_adapter_state_t *ha, uint32_t mbx, ql_head_t *done_q,
740 uint32_t *set_flags, uint32_t *reset_flags, int intr_clr)
741 {
742 uint32_t handle;
743 uint32_t index;
744 uint16_t cnt;
745 uint16_t mb[MAX_MBOX_COUNT];
746 ql_srb_t *sp;
747 port_id_t s_id;
748 ql_tgt_t *tq;
749 boolean_t intr = B_TRUE;
750 ql_adapter_state_t *vha;
751
752 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
753
754 /* Setup to process fast completion. */
755 mb[0] = LSW(mbx);
756 switch (mb[0]) {
757 case MBA_SCSI_COMPLETION:
758 handle = SHORT_TO_LONG(RD16_IO_REG(ha, mailbox_out[1]),
759 RD16_IO_REG(ha, mailbox_out[2]));
760 break;
761
762 case MBA_CMPLT_1_16BIT:
763 handle = MSW(mbx);
764 mb[0] = MBA_SCSI_COMPLETION;
765 break;
766
767 case MBA_CMPLT_1_32BIT:
768 handle = SHORT_TO_LONG(MSW(mbx),
769 RD16_IO_REG(ha, mailbox_out[2]));
770 mb[0] = MBA_SCSI_COMPLETION;
771 break;
772
773 case MBA_CTIO_COMPLETION:
774 case MBA_IP_COMPLETION:
775 handle = CFG_IST(ha, CFG_CTRL_2200) ? SHORT_TO_LONG(
776 RD16_IO_REG(ha, mailbox_out[1]),
777 RD16_IO_REG(ha, mailbox_out[2])) :
778 SHORT_TO_LONG(MSW(mbx), RD16_IO_REG(ha, mailbox_out[2]));
779 mb[0] = MBA_SCSI_COMPLETION;
780 break;
781
782 default:
783 break;
784 }
785
786 /* Handle asynchronous event */
787 switch (mb[0]) {
788 case MBA_SCSI_COMPLETION:
789 QL_PRINT_5(CE_CONT, "(%d): Fast post completion\n",
790 ha->instance);
791
792 if (intr_clr) {
793 /* Clear RISC interrupt */
794 if (CFG_IST(ha, CFG_CTRL_8021)) {
795 ql_8021_clr_fw_intr(ha);
796 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
797 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
798 } else {
799 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
800 }
801 intr = B_FALSE;
802 }
803
804 if ((ha->flags & ONLINE) == 0) {
805 break;
806 }
807
808 /* Get handle. */
809 index = handle & OSC_INDEX_MASK;
810
811 /* Validate handle. */
812 sp = index < MAX_OUTSTANDING_COMMANDS ?
813 ha->outstanding_cmds[index] : NULL;
814
815 if (sp != NULL && sp->handle == handle) {
816 ha->outstanding_cmds[index] = NULL;
817 sp->handle = 0;
818 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
819
820 /* Set completed status. */
821 sp->flags |= SRB_ISP_COMPLETED;
822
823 /* Set completion status */
824 sp->pkt->pkt_reason = CS_COMPLETE;
825
826 if (!(sp->flags & SRB_FCP_CMD_PKT)) {
827 /* Place block on done queue */
828 ql_add_link_b(done_q, &sp->cmd);
829 } else {
830 ql_fast_fcp_post(sp);
831 }
832 } else if (handle != QL_FCA_BRAND) {
833 if (sp == NULL) {
834 EL(ha, "%xh unknown IOCB handle=%xh\n",
835 mb[0], handle);
836 } else {
837 EL(ha, "%xh mismatch IOCB handle pkt=%xh, "
838 "sp=%xh\n", mb[0], handle, sp->handle);
839 }
840
841 EL(ha, "%xh Fast post, mbx1=%xh, mbx2=%xh, mbx3=%xh,"
842 "mbx6=%xh, mbx7=%xh\n", mb[0],
843 RD16_IO_REG(ha, mailbox_out[1]),
844 RD16_IO_REG(ha, mailbox_out[2]),
845 RD16_IO_REG(ha, mailbox_out[3]),
846 RD16_IO_REG(ha, mailbox_out[6]),
847 RD16_IO_REG(ha, mailbox_out[7]));
848
849 (void) ql_binary_fw_dump(ha, FALSE);
850
851 if (!(ha->task_daemon_flags &
852 (ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE))) {
853 EL(ha, "%xh ISP Invalid handle, "
854 "isp_abort_needed\n", mb[0]);
855 *set_flags |= ISP_ABORT_NEEDED;
856 }
857 }
858 break;
859
860 case MBA_RESET: /* Reset */
861 EL(ha, "%xh Reset received\n", mb[0]);
862 *set_flags |= RESET_MARKER_NEEDED;
863 break;
864
865 case MBA_SYSTEM_ERR: /* System Error */
866 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
867 mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
868 mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
869 mb[7] = RD16_IO_REG(ha, mailbox_out[7]);
870
871 EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx1=%xh, "
872 "mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh, mbx6=%xh,\n "
873 "mbx7=%xh, mbx8=%xh, mbx9=%xh, mbx10=%xh, mbx11=%xh, "
874 "mbx12=%xh,\n", mb[0], mb[1], mb[2], mb[3],
875 RD16_IO_REG(ha, mailbox_out[4]),
876 RD16_IO_REG(ha, mailbox_out[5]),
877 RD16_IO_REG(ha, mailbox_out[6]), mb[7],
878 RD16_IO_REG(ha, mailbox_out[8]),
879 RD16_IO_REG(ha, mailbox_out[9]),
880 RD16_IO_REG(ha, mailbox_out[10]),
881 RD16_IO_REG(ha, mailbox_out[11]),
882 RD16_IO_REG(ha, mailbox_out[12]));
893 RD16_IO_REG(ha, mailbox_out[19]),
894 RD16_IO_REG(ha, mailbox_out[20]),
895 RD16_IO_REG(ha, mailbox_out[21]),
896 RD16_IO_REG(ha, mailbox_out[22]),
897 RD16_IO_REG(ha, mailbox_out[23]));
898
899 if (ha->reg_off->mbox_cnt > 24) {
900 EL(ha, "%xh ISP System Error, mbx24=%xh, mbx25=%xh, "
901 "mbx26=%xh,\n mbx27=%xh, mbx28=%xh, mbx29=%xh, "
902 "mbx30=%xh, mbx31=%xh\n", mb[0],
903 RD16_IO_REG(ha, mailbox_out[24]),
904 RD16_IO_REG(ha, mailbox_out[25]),
905 RD16_IO_REG(ha, mailbox_out[26]),
906 RD16_IO_REG(ha, mailbox_out[27]),
907 RD16_IO_REG(ha, mailbox_out[28]),
908 RD16_IO_REG(ha, mailbox_out[29]),
909 RD16_IO_REG(ha, mailbox_out[30]),
910 RD16_IO_REG(ha, mailbox_out[31]));
911 }
912
913 (void) ql_binary_fw_dump(ha, FALSE);
914
915 (void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8002, mb[1],
916 mb[2], mb[3]);
917
918 if (CFG_IST(ha, CFG_CTRL_81XX) && mb[7] & SE_MPI_RISC) {
919 ADAPTER_STATE_LOCK(ha);
920 ha->flags |= MPI_RESET_NEEDED;
921 ADAPTER_STATE_UNLOCK(ha);
922 }
923
924 *set_flags |= ISP_ABORT_NEEDED;
925 ha->xioctl->ControllerErrorCount++;
926 break;
927
928 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
929 EL(ha, "%xh Request Transfer Error received, "
930 "isp_abort_needed\n", mb[0]);
931
932 (void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8003,
933 RD16_IO_REG(ha, mailbox_out[1]),
934 RD16_IO_REG(ha, mailbox_out[2]),
935 RD16_IO_REG(ha, mailbox_out[3]));
936
937 *set_flags |= ISP_ABORT_NEEDED;
938 ha->xioctl->ControllerErrorCount++;
939 break;
940
941 case MBA_RSP_TRANSFER_ERR: /* Response Xfer Err */
942 EL(ha, "%xh Response Transfer Error received,"
943 " isp_abort_needed\n", mb[0]);
944
945 (void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8004,
946 RD16_IO_REG(ha, mailbox_out[1]),
947 RD16_IO_REG(ha, mailbox_out[2]),
948 RD16_IO_REG(ha, mailbox_out[3]));
949
950 *set_flags |= ISP_ABORT_NEEDED;
951 ha->xioctl->ControllerErrorCount++;
952 break;
953
954 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
955 EL(ha, "%xh Request Queue Wake-up received\n",
956 mb[0]);
957 break;
958
959 case MBA_MENLO_ALERT: /* Menlo Alert Notification */
960 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
961 mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
962 mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
963
964 EL(ha, "%xh Menlo Alert Notification received, mbx1=%xh,"
965 " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
966
967 switch (mb[1]) {
968 case MLA_LOGIN_OPERATIONAL_FW:
969 ADAPTER_STATE_LOCK(ha);
970 ha->flags |= MENLO_LOGIN_OPERATIONAL;
971 ADAPTER_STATE_UNLOCK(ha);
972 break;
973 case MLA_PANIC_RECOVERY:
974 case MLA_LOGIN_DIAGNOSTIC_FW:
975 case MLA_LOGIN_GOLDEN_FW:
976 case MLA_REJECT_RESPONSE:
977 default:
978 break;
979 }
980 break;
981
982 case MBA_LIP_F8: /* Received a LIP F8. */
983 case MBA_LIP_RESET: /* LIP reset occurred. */
984 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
985 if (CFG_IST(ha, CFG_CTRL_8081)) {
986 EL(ha, "%xh DCBX_STARTED received, mbx1=%xh, mbx2=%xh"
987 "\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]),
988 RD16_IO_REG(ha, mailbox_out[2]));
989 } else {
990 EL(ha, "%xh LIP received\n", mb[0]);
991 }
992
993 ADAPTER_STATE_LOCK(ha);
994 ha->flags &= ~POINT_TO_POINT;
995 ADAPTER_STATE_UNLOCK(ha);
996
997 if (!(ha->task_daemon_flags & LOOP_DOWN)) {
998 *set_flags |= LOOP_DOWN;
999 }
1000 ql_port_state(ha, FC_STATE_OFFLINE,
1001 FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1002
1003 if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1004 ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1005 }
1006
1007 ha->adapter_stats->lip_count++;
1008
1009 /* Update AEN queue. */
1010 ha->xioctl->TotalLipResets++;
1011 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1012 ql_enqueue_aen(ha, mb[0], NULL);
1013 }
1014 break;
1015
1016 case MBA_LOOP_UP:
1017 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322 |
1018 CFG_CTRL_24258081))) {
1019 ha->iidma_rate = RD16_IO_REG(ha, mailbox_out[1]);
1020 if (ha->iidma_rate == IIDMA_RATE_1GB) {
1021 ha->state = FC_PORT_STATE_MASK(
1022 ha->state) | FC_STATE_1GBIT_SPEED;
1023 index = 1;
1024 } else if (ha->iidma_rate == IIDMA_RATE_2GB) {
1025 ha->state = FC_PORT_STATE_MASK(
1026 ha->state) | FC_STATE_2GBIT_SPEED;
1027 index = 2;
1028 } else if (ha->iidma_rate == IIDMA_RATE_4GB) {
1029 ha->state = FC_PORT_STATE_MASK(
1030 ha->state) | FC_STATE_4GBIT_SPEED;
1031 index = 4;
1032 } else if (ha->iidma_rate == IIDMA_RATE_8GB) {
1033 ha->state = FC_PORT_STATE_MASK(
1034 ha->state) | FC_STATE_8GBIT_SPEED;
1035 index = 8;
1036 } else if (ha->iidma_rate == IIDMA_RATE_10GB) {
1037 ha->state = FC_PORT_STATE_MASK(
1038 ha->state) | FC_STATE_10GBIT_SPEED;
1039 index = 10;
1040 } else {
1041 ha->state = FC_PORT_STATE_MASK(
1042 ha->state);
1043 index = 0;
1044 }
1045 } else {
1046 ha->iidma_rate = IIDMA_RATE_1GB;
1047 ha->state = FC_PORT_STATE_MASK(ha->state) |
1048 FC_STATE_FULL_SPEED;
1049 index = 1;
1050 }
1051
1052 for (vha = ha; vha != NULL; vha = vha->vp_next) {
1053 vha->state = FC_PORT_STATE_MASK(vha->state) |
1054 FC_PORT_SPEED_MASK(ha->state);
1055 }
1056 EL(ha, "%d GB %xh Loop Up received\n", index, mb[0]);
1057
1058 /* Update AEN queue. */
1059 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1061 }
1062 break;
1063
1064 case MBA_LOOP_DOWN:
1065 EL(ha, "%xh Loop Down received, mbx1=%xh, mbx2=%xh, mbx3=%xh, "
1066 "mbx4=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1067 RD16_IO_REG(ha, mailbox_out[2]),
1068 RD16_IO_REG(ha, mailbox_out[3]),
1069 RD16_IO_REG(ha, mailbox_out[4]));
1070
1071 if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1072 *set_flags |= LOOP_DOWN;
1073 }
1074 ql_port_state(ha, FC_STATE_OFFLINE,
1075 FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1076
1077 if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1078 ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1079 }
1080
1081 if (CFG_IST(ha, CFG_CTRL_258081)) {
1082 ha->sfp_stat = RD16_IO_REG(ha, mailbox_out[2]);
1083 }
1084
1085 /* Update AEN queue. */
1086 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1087 ql_enqueue_aen(ha, mb[0], NULL);
1088 }
1089 break;
1090
1091 case MBA_PORT_UPDATE:
1092 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1093 mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1094 mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1095 RD16_IO_REG(ha, mailbox_out[3]) : 0);
1096
1097 /* Locate port state structure. */
1098 for (vha = ha; vha != NULL; vha = vha->vp_next) {
1099 if (vha->vp_index == LSB(mb[3])) {
1100 break;
1101 }
1102 }
1103 if (vha == NULL) {
1104 break;
1105 }
1106
1107 if (CFG_IST(ha, CFG_CTRL_8081) && mb[1] == 0xffff &&
1108 mb[2] == 7 && (MSB(mb[3]) == 0xe || MSB(mb[3]) == 0x1a ||
1109 MSB(mb[3]) == 0x1c || MSB(mb[3]) == 0x1d ||
1110 MSB(mb[3]) == 0x1e)) {
1111 /*
1112 * received FLOGI reject
1113 * received FLOGO
1114 * FCF configuration changed
1115 * FIP Clear Virtual Link received
1116 * FKA timeout
1117 */
1118 if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1119 *set_flags |= LOOP_DOWN;
1120 }
1121 ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE |
1122 COMMAND_WAIT_NEEDED | LOOP_DOWN);
1123 if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1124 ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1125 }
1126 /*
1127 * In N port 2 N port topology the FW provides a port
1128 * database entry at loop_id 0x7fe which we use to
1129 * acquire the Ports WWPN.
1130 */
1131 } else if ((mb[1] != 0x7fe) &&
1132 ((FC_PORT_STATE_MASK(vha->state) != FC_STATE_OFFLINE ||
1133 (CFG_IST(ha, CFG_CTRL_24258081) &&
1134 (mb[1] != 0xffff || mb[2] != 6 || mb[3] != 0))))) {
1135 EL(ha, "%xh Port Database Update, Login/Logout "
1136 "received, mbx1=%xh, mbx2=%xh, mbx3=%xh\n",
1137 mb[0], mb[1], mb[2], mb[3]);
1138 } else {
1139 EL(ha, "%xh Port Database Update received, mbx1=%xh,"
1140 " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2],
1141 mb[3]);
1142 *set_flags |= LOOP_RESYNC_NEEDED;
1143 *set_flags &= ~LOOP_DOWN;
1144 *reset_flags |= LOOP_DOWN;
1145 *reset_flags &= ~LOOP_RESYNC_NEEDED;
1146 vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
1147 TASK_DAEMON_LOCK(ha);
1148 vha->task_daemon_flags |= LOOP_RESYNC_NEEDED;
1149 vha->task_daemon_flags &= ~LOOP_DOWN;
1150 TASK_DAEMON_UNLOCK(ha);
1151 ADAPTER_STATE_LOCK(ha);
1152 vha->flags &= ~ABORT_CMDS_LOOP_DOWN_TMO;
1153 ADAPTER_STATE_UNLOCK(ha);
1154 }
1206 break;
1207
1208 case MBA_IP_RECEIVE:
1209 case MBA_IP_BROADCAST:
1210 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1211 mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1212 mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
1213
1214 EL(ha, "%xh IP packet/broadcast received, mbx1=%xh, "
1215 "mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1216
1217 /* Locate device queue. */
1218 s_id.b.al_pa = LSB(mb[2]);
1219 s_id.b.area = MSB(mb[2]);
1220 s_id.b.domain = LSB(mb[1]);
1221 if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
1222 EL(ha, "Unknown IP device=%xh\n", s_id.b24);
1223 break;
1224 }
1225
1226 cnt = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
1227 CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb24.buf_size[0],
1228 ha->ip_init_ctrl_blk.cb24.buf_size[1]) :
1229 CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb.buf_size[0],
1230 ha->ip_init_ctrl_blk.cb.buf_size[1]));
1231
1232 tq->ub_sequence_length = mb[3];
1233 tq->ub_total_seg_cnt = (uint8_t)(mb[3] / cnt);
1234 if (mb[3] % cnt) {
1235 tq->ub_total_seg_cnt++;
1236 }
1237 cnt = (uint16_t)(tq->ub_total_seg_cnt + 10);
1238
1239 for (index = 10; index < ha->reg_off->mbox_cnt && index < cnt;
1240 index++) {
1241 mb[index] = RD16_IO_REG(ha, mailbox_out[index]);
1242 }
1243
1244 tq->ub_seq_id = ++ha->ub_seq_id;
1245 tq->ub_seq_cnt = 0;
1246 tq->ub_frame_ro = 0;
1247 tq->ub_loop_id = (uint16_t)(mb[0] == MBA_IP_BROADCAST ?
1248 (CFG_IST(ha, CFG_CTRL_24258081) ? BROADCAST_24XX_HDL :
1249 IP_BROADCAST_LOOP_ID) : tq->loop_id);
1250 ha->rcv_dev_q = tq;
1251
1252 for (cnt = 10; cnt < ha->reg_off->mbox_cnt &&
1253 tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
1254 if (ql_ub_frame_hdr(ha, tq, mb[cnt], done_q) !=
1255 QL_SUCCESS) {
1256 EL(ha, "ql_ub_frame_hdr failed, "
1257 "isp_abort_needed\n");
1258 *set_flags |= ISP_ABORT_NEEDED;
1259 break;
1260 }
1261 }
1262 break;
1263
1264 case MBA_IP_LOW_WATER_MARK:
1265 case MBA_IP_RCV_BUFFER_EMPTY:
1266 EL(ha, "%xh IP low water mark / RCV buffer empty received\n",
1267 mb[0]);
1268 *set_flags |= NEED_UNSOLICITED_BUFFERS;
1269 break;
1270
1271 case MBA_IP_HDR_DATA_SPLIT:
1272 EL(ha, "%xh IP HDR data split received\n", mb[0]);
1273 break;
1274
1275 case MBA_ERROR_LOGGING_DISABLED:
1276 EL(ha, "%xh error logging disabled received, "
1277 "mbx1=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1278 break;
1279
1280 case MBA_POINT_TO_POINT:
1281 /* case MBA_DCBX_COMPLETED: */
1282 if (CFG_IST(ha, CFG_CTRL_8081)) {
1283 EL(ha, "%xh DCBX completed received\n", mb[0]);
1284 } else {
1285 EL(ha, "%xh Point to Point Mode received\n", mb[0]);
1286 }
1287 ADAPTER_STATE_LOCK(ha);
1288 ha->flags |= POINT_TO_POINT;
1289 ADAPTER_STATE_UNLOCK(ha);
1290 break;
1291
1292 case MBA_FCF_CONFIG_ERROR:
1293 EL(ha, "%xh FCF configuration Error received, mbx1=%xh\n",
1294 mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1295 break;
1296
1297 case MBA_DCBX_PARAM_CHANGED:
1298 EL(ha, "%xh DCBX parameters changed received, mbx1=%xh\n",
1299 mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1300 break;
1301
1302 case MBA_CHG_IN_CONNECTION:
1303 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1304 if (mb[1] == 2) {
1305 EL(ha, "%xh Change In Connection received, "
1306 "mbx1=%xh\n", mb[0], mb[1]);
1307 ADAPTER_STATE_LOCK(ha);
1308 ha->flags &= ~POINT_TO_POINT;
1309 ADAPTER_STATE_UNLOCK(ha);
1310 if (ha->topology & QL_N_PORT) {
1311 ha->topology = (uint8_t)(ha->topology &
1312 ~QL_N_PORT);
1313 ha->topology = (uint8_t)(ha->topology |
1314 QL_NL_PORT);
1315 }
1316 } else {
1317 EL(ha, "%xh Change In Connection received, "
1318 "mbx1=%xh, isp_abort_needed\n", mb[0], mb[1]);
1319 *set_flags |= ISP_ABORT_NEEDED;
1320 }
1321 break;
1322
1323 case MBA_ZIO_UPDATE:
1324 EL(ha, "%xh ZIO response received\n", mb[0]);
1325
1326 ha->isp_rsp_index = RD16_IO_REG(ha, resp_in);
1327 ql_response_pkt(ha, done_q, set_flags, reset_flags, intr_clr);
1328 intr = B_FALSE;
1329 break;
1330
1331 case MBA_PORT_BYPASS_CHANGED:
1332 EL(ha, "%xh Port Bypass Changed received, mbx1=%xh\n",
1333 mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1334 /*
1335 * Event generated when there is a transition on
1336 * port bypass of crystal+.
1337 * Mailbox 1: Bit 0 - External.
1338 * Bit 2 - Internal.
1339 * When the bit is 0, the port is bypassed.
1340 *
1341 * For now we will generate a LIP for all cases.
1342 */
1343 *set_flags |= HANDLE_PORT_BYPASS_CHANGE;
1344 break;
1345
1346 case MBA_RECEIVE_ERROR:
1347 EL(ha, "%xh Receive Error received, mbx1=%xh, mbx2=%xh\n",
1348 mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1349 RD16_IO_REG(ha, mailbox_out[2]));
1350 break;
1351
1352 case MBA_LS_RJT_SENT:
1353 EL(ha, "%xh LS_RJT Response Sent ELS=%xh\n", mb[0],
1354 RD16_IO_REG(ha, mailbox_out[1]));
1355 break;
1356
1357 case MBA_FW_RESTART_COMP:
1358 EL(ha, "%xh firmware restart complete received mb1=%xh\n",
1359 mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1360 break;
1361
1362 /*
1363 * MBA_IDC_COMPLETE & MBA_IDC_NOTIFICATION: We won't get another
1364 * IDC async event until we ACK the current one.
1365 */
1366 case MBA_IDC_COMPLETE:
1367 ha->idc_mb[0] = mb[0];
1368 ha->idc_mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1369 ha->idc_mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1370 ha->idc_mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
1371 ha->idc_mb[4] = RD16_IO_REG(ha, mailbox_out[4]);
1372 ha->idc_mb[5] = RD16_IO_REG(ha, mailbox_out[5]);
1373 ha->idc_mb[6] = RD16_IO_REG(ha, mailbox_out[6]);
1374 ha->idc_mb[7] = RD16_IO_REG(ha, mailbox_out[7]);
1375 EL(ha, "%xh Inter-driver communication complete received, "
1376 " mbx1=%xh, mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh,"
1377 " mbx6=%xh, mbx7=%xh\n", mb[0], ha->idc_mb[1],
1378 ha->idc_mb[2], ha->idc_mb[3], ha->idc_mb[4], ha->idc_mb[5],
1379 ha->idc_mb[6], ha->idc_mb[7]);
1380 *set_flags |= IDC_EVENT;
1381 break;
1382
1383 case MBA_IDC_NOTIFICATION:
1384 ha->idc_mb[0] = mb[0];
1385 ha->idc_mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1386 ha->idc_mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1387 ha->idc_mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
1388 ha->idc_mb[4] = RD16_IO_REG(ha, mailbox_out[4]);
1389 ha->idc_mb[5] = RD16_IO_REG(ha, mailbox_out[5]);
1390 ha->idc_mb[6] = RD16_IO_REG(ha, mailbox_out[6]);
1391 ha->idc_mb[7] = RD16_IO_REG(ha, mailbox_out[7]);
1392 EL(ha, "%xh Inter-driver communication request notification "
1393 "received, mbx1=%xh, mbx2=%xh, mbx3=%xh, mbx4=%xh, "
1394 "mbx5=%xh, mbx6=%xh, mbx7=%xh\n", mb[0], ha->idc_mb[1],
1395 ha->idc_mb[2], ha->idc_mb[3], ha->idc_mb[4], ha->idc_mb[5],
1396 ha->idc_mb[6], ha->idc_mb[7]);
1397 *set_flags |= IDC_EVENT;
1398 break;
1399
1400 case MBA_IDC_TIME_EXTENDED:
1401 EL(ha, "%xh Inter-driver communication time extended received,"
1402 " mbx1=%xh, mbx2=%xh\n", mb[0],
1403 RD16_IO_REG(ha, mailbox_out[1]),
1404 RD16_IO_REG(ha, mailbox_out[2]));
1405 break;
1406
1407 default:
1408 EL(ha, "%xh UNKNOWN event received, mbx1=%xh, mbx2=%xh, "
1409 "mbx3=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1410 RD16_IO_REG(ha, mailbox_out[2]),
1411 RD16_IO_REG(ha, mailbox_out[3]));
1412 break;
1413 }
1414
1415 /* Clear RISC interrupt */
1416 if (intr && intr_clr) {
1417 if (CFG_IST(ha, CFG_CTRL_8021)) {
1418 ql_8021_clr_fw_intr(ha);
1419 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
1420 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
1421 } else {
1422 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
1423 }
1424 }
1425
1426 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1427 }
1428
1429 /*
1430 * ql_fast_fcp_post
1431 * Fast path for good SCSI I/O completion.
1432 *
1433 * Input:
1434 * sp: SRB pointer.
1435 *
1436 * Context:
1437 * Interrupt or Kernel context, no mailbox commands allowed.
1438 */
1439 static void
1440 ql_fast_fcp_post(ql_srb_t *sp)
1441 {
1442 ql_adapter_state_t *ha = sp->ha;
1443 ql_lun_t *lq = sp->lun_queue;
1444 ql_tgt_t *tq = lq->target_queue;
1445
1446 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1447
1448 /* Acquire device queue lock. */
1449 DEVICE_QUEUE_LOCK(tq);
1450
1451 /* Decrement outstanding commands on device. */
1452 if (tq->outcnt != 0) {
1453 tq->outcnt--;
1454 }
1455
1456 if (sp->flags & SRB_FCP_CMD_PKT) {
1457 if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_UNTAGGED) {
1458 /*
1459 * Clear the flag for this LUN so that
1460 * untagged commands can be submitted
1461 * for it.
1462 */
1463 lq->flags &= ~LQF_UNTAGGED_PENDING;
1464 }
1465
1466 if (lq->lun_outcnt != 0) {
1483 ql_next(ha, lq);
1484 } else {
1485 /* Release LU queue specific lock. */
1486 DEVICE_QUEUE_UNLOCK(tq);
1487 if (ha->pha->pending_cmds.first != NULL) {
1488 ql_start_iocb(ha, NULL);
1489 }
1490 }
1491
1492 /* Sync buffers if required. */
1493 if (sp->flags & SRB_MS_PKT) {
1494 (void) ddi_dma_sync(sp->pkt->pkt_resp_dma, 0, 0,
1495 DDI_DMA_SYNC_FORCPU);
1496 }
1497
1498 /* Map ISP completion codes. */
1499 sp->pkt->pkt_expln = FC_EXPLN_NONE;
1500 sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
1501 sp->pkt->pkt_state = FC_PKT_SUCCESS;
1502
1503 /* Now call the pkt completion callback */
1504 if (sp->flags & SRB_POLL) {
1505 sp->flags &= ~SRB_POLL;
1506 } else if (sp->pkt->pkt_comp) {
1507 INTR_UNLOCK(ha);
1508 (*sp->pkt->pkt_comp)(sp->pkt);
1509 INTR_LOCK(ha);
1510 }
1511
1512 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1513 }
1514
1515 /*
1516 * ql_response_pkt
1517 * Processes response entry.
1518 *
1519 * Input:
1520 * ha: adapter state pointer.
1521 * done_q: head pointer to done queue.
1522 * set_flags: task daemon flags to set.
1523 * reset_flags: task daemon flags to reset.
1524 * intr_clr: early interrupt clear
1525 *
1526 * Context:
1527 * Interrupt or Kernel context, no mailbox commands allowed.
1528 */
1529 static void
1530 ql_response_pkt(ql_adapter_state_t *ha, ql_head_t *done_q, uint32_t *set_flags,
1531 uint32_t *reset_flags, int intr_clr)
1532 {
1533 response_t *pkt;
1534 uint32_t dma_sync_size_1 = 0;
1535 uint32_t dma_sync_size_2 = 0;
1536 int status = 0;
1537
1538 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1539
1540 /* Clear RISC interrupt */
1541 if (intr_clr) {
1542 if (CFG_IST(ha, CFG_CTRL_8021)) {
1543 ql_8021_clr_fw_intr(ha);
1544 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
1545 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
1546 } else {
1547 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
1548 }
1549 }
1550
1551 if (ha->isp_rsp_index >= RESPONSE_ENTRY_CNT) {
1552 EL(ha, "index error = %xh, isp_abort_needed",
1553 ha->isp_rsp_index);
1554 *set_flags |= ISP_ABORT_NEEDED;
1555 return;
1556 }
1557
1558 if ((ha->flags & ONLINE) == 0) {
1559 QL_PRINT_3(CE_CONT, "(%d): not onlne, done\n", ha->instance);
1560 return;
1561 }
1562
1563 /* Calculate size of response queue entries to sync. */
1564 if (ha->isp_rsp_index > ha->rsp_ring_index) {
1565 dma_sync_size_1 = (uint32_t)
1566 ((uint32_t)(ha->isp_rsp_index - ha->rsp_ring_index) *
1567 RESPONSE_ENTRY_SIZE);
1568 } else if (ha->isp_rsp_index == 0) {
1569 dma_sync_size_1 = (uint32_t)
1570 ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) *
1571 RESPONSE_ENTRY_SIZE);
1572 } else {
1573 /* Responses wrap around the Q */
1574 dma_sync_size_1 = (uint32_t)
1575 ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) *
1576 RESPONSE_ENTRY_SIZE);
1577 dma_sync_size_2 = (uint32_t)
1578 (ha->isp_rsp_index * RESPONSE_ENTRY_SIZE);
1579 }
1580
1581 /* Sync DMA buffer. */
1582 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
1583 (off_t)(ha->rsp_ring_index * RESPONSE_ENTRY_SIZE +
1584 RESPONSE_Q_BUFFER_OFFSET), dma_sync_size_1,
1585 DDI_DMA_SYNC_FORKERNEL);
1586 if (dma_sync_size_2) {
1587 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
1588 RESPONSE_Q_BUFFER_OFFSET, dma_sync_size_2,
1589 DDI_DMA_SYNC_FORKERNEL);
1590 }
1591
1592 while (ha->rsp_ring_index != ha->isp_rsp_index) {
1593 pkt = ha->response_ring_ptr;
1594
1595 QL_PRINT_5(CE_CONT, "(%d): ha->rsp_rg_idx=%xh, mbx[5]=%xh\n",
1596 ha->instance, ha->rsp_ring_index, ha->isp_rsp_index);
1597 QL_DUMP_5((uint8_t *)ha->response_ring_ptr, 8,
1598 RESPONSE_ENTRY_SIZE);
1599
1600 /* Adjust ring index. */
1601 ha->rsp_ring_index++;
1602 if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
1603 ha->rsp_ring_index = 0;
1604 ha->response_ring_ptr = ha->response_ring_bp;
1605 } else {
1606 ha->response_ring_ptr++;
1607 }
1608
1609 /* Process packet. */
1610 if (ha->status_srb != NULL && pkt->entry_type !=
1611 STATUS_CONT_TYPE) {
1612 ql_add_link_b(done_q, &ha->status_srb->cmd);
1613 ha->status_srb = NULL;
1614 }
1615
1616 pkt->entry_status = (uint8_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
1617 pkt->entry_status & 0x3c : pkt->entry_status & 0x7e);
1618
1619 if (pkt->entry_status != 0) {
1620 ql_error_entry(ha, pkt, done_q, set_flags,
1621 reset_flags);
1622 } else {
1623 switch (pkt->entry_type) {
1624 case STATUS_TYPE:
1625 status |= CFG_IST(ha, CFG_CTRL_24258081) ?
1626 ql_24xx_status_entry(ha,
1627 (sts_24xx_entry_t *)pkt, done_q, set_flags,
1628 reset_flags) :
1629 ql_status_entry(ha, (sts_entry_t *)pkt,
1630 done_q, set_flags, reset_flags);
1631 break;
1632 case STATUS_CONT_TYPE:
1633 ql_status_cont_entry(ha,
1634 (sts_cont_entry_t *)pkt, done_q, set_flags,
1635 reset_flags);
1636 break;
1637 case IP_TYPE:
1638 case IP_A64_TYPE:
1639 case IP_CMD_TYPE:
1640 ql_ip_entry(ha, (ip_entry_t *)pkt, done_q,
1641 set_flags, reset_flags);
1642 break;
1643 case IP_RECEIVE_TYPE:
1644 ql_ip_rcv_entry(ha,
1645 (ip_rcv_entry_t *)pkt, done_q, set_flags,
1646 reset_flags);
1647 break;
1648 case IP_RECEIVE_CONT_TYPE:
1649 ql_ip_rcv_cont_entry(ha,
1650 (ip_rcv_cont_entry_t *)pkt, done_q,
1651 set_flags, reset_flags);
1652 break;
1653 case IP_24XX_RECEIVE_TYPE:
1654 ql_ip_24xx_rcv_entry(ha,
1655 (ip_rcv_24xx_entry_t *)pkt, done_q,
1656 set_flags, reset_flags);
1657 break;
1658 case MS_TYPE:
1659 ql_ms_entry(ha, (ms_entry_t *)pkt, done_q,
1660 set_flags, reset_flags);
1661 break;
1662 case REPORT_ID_TYPE:
1663 ql_report_id_entry(ha, (report_id_1_t *)pkt,
1664 done_q, set_flags, reset_flags);
1665 break;
1666 case ELS_PASSTHRU_TYPE:
1667 ql_els_passthru_entry(ha,
1668 (els_passthru_entry_rsp_t *)pkt,
1669 done_q, set_flags, reset_flags);
1670 break;
1671 case IP_BUF_POOL_TYPE:
1672 case MARKER_TYPE:
1673 case VP_MODIFY_TYPE:
1674 case VP_CONTROL_TYPE:
1675 break;
1676 default:
1677 EL(ha, "Unknown IOCB entry type=%xh\n",
1678 pkt->entry_type);
1679 break;
1680 }
1681 }
1682 }
1683
1684 /* Inform RISC of processed responses. */
1685 WRT16_IO_REG(ha, resp_out, ha->rsp_ring_index);
1686
1687 /* RESET packet received delay for possible async event. */
1688 if (status & BIT_0) {
1689 drv_usecwait(500000);
1690 }
1691
1692 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1693 }
1694
1695 /*
1696 * ql_error_entry
1697 * Processes error entry.
1698 *
1699 * Input:
1700 * ha = adapter state pointer.
1701 * pkt = entry pointer.
1702 * done_q = head pointer to done queue.
1703 * set_flags = task daemon flags to set.
1704 * reset_flags = task daemon flags to reset.
1705 *
1706 * Context:
1707 * Interrupt or Kernel context, no mailbox commands allowed.
1708 */
1709 /* ARGSUSED */
1710 static void
1711 ql_error_entry(ql_adapter_state_t *ha, response_t *pkt, ql_head_t *done_q,
1712 uint32_t *set_flags, uint32_t *reset_flags)
1713 {
1714 ql_srb_t *sp;
1715 uint32_t index, resp_identifier;
1716
1717 if (pkt->entry_type == INVALID_ENTRY_TYPE) {
1718 EL(ha, "Aborted command\n");
1719 return;
1720 }
1721
1722 QL_PRINT_2(CE_CONT, "(%d): started, packet:\n", ha->instance);
1723 QL_DUMP_2((uint8_t *)pkt, 8, RESPONSE_ENTRY_SIZE);
1724
1725 if (pkt->entry_status & BIT_6) {
1726 EL(ha, "Request Queue DMA error\n");
1727 } else if (pkt->entry_status & BIT_5) {
1728 EL(ha, "Invalid Entry Order\n");
1729 } else if (pkt->entry_status & BIT_4) {
1730 EL(ha, "Invalid Entry Count\n");
1731 } else if (pkt->entry_status & BIT_3) {
1732 EL(ha, "Invalid Entry Parameter\n");
1733 } else if (pkt->entry_status & BIT_2) {
1734 EL(ha, "Invalid Entry Type\n");
1735 } else if (pkt->entry_status & BIT_1) {
1736 EL(ha, "Busy\n");
1737 } else {
1738 EL(ha, "UNKNOWN flag = %xh error\n", pkt->entry_status);
1739 }
1740
1741 /* Validate the response entry handle. */
1742 resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1743 index = resp_identifier & OSC_INDEX_MASK;
1744 if (index < MAX_OUTSTANDING_COMMANDS) {
1745 /* the index seems reasonable */
1746 sp = ha->outstanding_cmds[index];
1747 if (sp != NULL) {
1748 if (sp->handle == resp_identifier) {
1749 /* Neo, you're the one... */
1750 ha->outstanding_cmds[index] = NULL;
1751 sp->handle = 0;
1752 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1753 } else {
1754 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1755 resp_identifier, sp->handle);
1756 sp = NULL;
1757 ql_signal_abort(ha, set_flags);
1758 }
1759 } else {
1760 sp = ql_verify_preprocessed_cmd(ha,
1761 (uint32_t *)&pkt->handle, set_flags, reset_flags);
1762 }
1763 } else {
1764 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1765 index, resp_identifier);
1766 ql_signal_abort(ha, set_flags);
1767 }
1768
1769 if (sp != NULL) {
1770 /* Bad payload or header */
1771 if (pkt->entry_status & (BIT_5 + BIT_4 + BIT_3 + BIT_2)) {
1772 /* Bad payload or header, set error status. */
1773 sp->pkt->pkt_reason = CS_BAD_PAYLOAD;
1774 } else if (pkt->entry_status & BIT_1) /* FULL flag */ {
1775 sp->pkt->pkt_reason = CS_QUEUE_FULL;
1776 } else {
1777 /* Set error status. */
1778 sp->pkt->pkt_reason = CS_UNKNOWN;
1779 }
1780
1781 /* Set completed status. */
1782 sp->flags |= SRB_ISP_COMPLETED;
1783
1784 /* Place command on done queue. */
1785 ql_add_link_b(done_q, &sp->cmd);
1786
1787 }
1788 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1789 }
1790
1791 /*
1792 * ql_status_entry
1793 * Processes received ISP2200-2300 status entry.
1794 *
1795 * Input:
1796 * ha: adapter state pointer.
1797 * pkt: entry pointer.
1798 * done_q: done queue pointer.
1799 * set_flags: task daemon flags to set.
1800 * reset_flags: task daemon flags to reset.
1801 *
1802 * Returns:
1803 * BIT_0 = CS_RESET status received.
1804 *
1805 * Context:
1806 * Interrupt or Kernel context, no mailbox commands allowed.
1807 */
1808 /* ARGSUSED */
1809 static int
1810 ql_status_entry(ql_adapter_state_t *ha, sts_entry_t *pkt,
1811 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
1812 {
1813 ql_srb_t *sp;
1814 uint32_t index, resp_identifier;
1815 uint16_t comp_status;
1816 int rval = 0;
1817
1818 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1819
1820 /* Validate the response entry handle. */
1821 resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1822 index = resp_identifier & OSC_INDEX_MASK;
1823 if (index < MAX_OUTSTANDING_COMMANDS) {
1824 /* the index seems reasonable */
1825 sp = ha->outstanding_cmds[index];
1826 if (sp != NULL) {
1827 if (sp->handle == resp_identifier) {
1828 /* Neo, you're the one... */
1829 ha->outstanding_cmds[index] = NULL;
1830 sp->handle = 0;
1831 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1832 } else {
1833 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1834 resp_identifier, sp->handle);
1835 sp = NULL;
1836 ql_signal_abort(ha, set_flags);
1837 }
1838 } else {
1839 sp = ql_verify_preprocessed_cmd(ha,
1840 (uint32_t *)&pkt->handle, set_flags, reset_flags);
1841 }
1842 } else {
1843 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1844 index, resp_identifier);
1845 ql_signal_abort(ha, set_flags);
1846 }
1847
1848 if (sp != NULL) {
1849 comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
1850 &pkt->comp_status);
1851
1852 /*
1853 * We dont care about SCSI QFULLs.
1854 */
1855 if (comp_status == CS_QUEUE_FULL) {
1856 EL(ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
1857 sp->lun_queue->target_queue->d_id.b24,
1858 sp->lun_queue->lun_no);
1859 comp_status = CS_COMPLETE;
1860 }
1861
1862 /*
1863 * 2300 firmware marks completion status as data underrun
1864 * for scsi qfulls. Make it transport complete.
1865 */
1866 if ((CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) &&
1867 (comp_status == CS_DATA_UNDERRUN) &&
1868 (pkt->scsi_status_l != 0)) {
1869 comp_status = CS_COMPLETE;
1870 }
1871
1872 /*
1873 * Workaround T3 issue where we do not get any data xferred
1874 * but get back a good status.
1875 */
1876 if ((pkt->state_flags_h & SF_XFERRED_DATA) == 0 &&
1877 comp_status == CS_COMPLETE &&
1878 pkt->scsi_status_l == 0 &&
1879 (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
1880 pkt->residual_length == 0 &&
1881 sp->fcp &&
1882 sp->fcp->fcp_data_len != 0 &&
1883 (pkt->state_flags_l & (SF_DATA_OUT | SF_DATA_IN)) ==
1884 SF_DATA_OUT) {
1885 comp_status = CS_ABORTED;
1886 }
1887
1888 if (sp->flags & SRB_MS_PKT) {
1889 /*
1890 * Ideally it should never be true. But there
1891 * is a bug in FW which upon receiving invalid
1892 * parameters in MS IOCB returns it as
1893 * status entry and not as ms entry type.
1894 */
1895 ql_ms_entry(ha, (ms_entry_t *)pkt, done_q,
1896 set_flags, reset_flags);
1897 QL_PRINT_3(CE_CONT, "(%d): ql_ms_entry done\n",
1898 ha->instance);
1899 return (0);
1900 }
1901
1902 /*
1903 * Fast path to good SCSI I/O completion
1904 */
1905 if ((comp_status == CS_COMPLETE) &
1906 (!pkt->scsi_status_l) &
1907 (!(pkt->scsi_status_h & FCP_RSP_MASK))) {
1908 /* Set completed status. */
1909 sp->flags |= SRB_ISP_COMPLETED;
1910 sp->pkt->pkt_reason = comp_status;
1911 ql_fast_fcp_post(sp);
1912 QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n",
1913 ha->instance);
1914 return (0);
1915 }
1916 rval = ql_status_error(ha, sp, pkt, done_q, set_flags,
1917 reset_flags);
1918 }
1919 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1920
1921 return (rval);
1922 }
1923
1924 /*
1925 * ql_24xx_status_entry
1926 * Processes received ISP24xx status entry.
1927 *
1928 * Input:
1929 * ha: adapter state pointer.
1930 * pkt: entry pointer.
1931 * done_q: done queue pointer.
1932 * set_flags: task daemon flags to set.
1933 * reset_flags: task daemon flags to reset.
1934 *
1935 * Returns:
1936 * BIT_0 = CS_RESET status received.
1937 *
1938 * Context:
1939 * Interrupt or Kernel context, no mailbox commands allowed.
1940 */
1941 /* ARGSUSED */
1942 static int
1943 ql_24xx_status_entry(ql_adapter_state_t *ha, sts_24xx_entry_t *pkt,
1944 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
1945 {
1946 ql_srb_t *sp = NULL;
1947 uint16_t comp_status;
1948 uint32_t index, resp_identifier;
1949 int rval = 0;
1950
1951 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1952
1953 /* Validate the response entry handle. */
1954 resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1955 index = resp_identifier & OSC_INDEX_MASK;
1956 if (index < MAX_OUTSTANDING_COMMANDS) {
1957 /* the index seems reasonable */
1958 sp = ha->outstanding_cmds[index];
1959 if (sp != NULL) {
1960 if (sp->handle == resp_identifier) {
1961 /* Neo, you're the one... */
1962 ha->outstanding_cmds[index] = NULL;
1963 sp->handle = 0;
1964 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1965 } else {
1966 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1967 resp_identifier, sp->handle);
1968 sp = NULL;
1969 ql_signal_abort(ha, set_flags);
1970 }
1971 } else {
1972 sp = ql_verify_preprocessed_cmd(ha,
1973 (uint32_t *)&pkt->handle, set_flags, reset_flags);
1974 }
1975 } else {
1976 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1977 index, resp_identifier);
1978 ql_signal_abort(ha, set_flags);
1979 }
1980
1981 if (sp != NULL) {
1982 comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
1983 &pkt->comp_status);
1984
1985 /* We dont care about SCSI QFULLs. */
1986 if (comp_status == CS_QUEUE_FULL) {
1987 EL(sp->ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
1988 sp->lun_queue->target_queue->d_id.b24,
1989 sp->lun_queue->lun_no);
1990 comp_status = CS_COMPLETE;
1991 }
1992
1993 /*
1994 * 2300 firmware marks completion status as data underrun
1995 * for scsi qfulls. Make it transport complete.
1996 */
1997 if ((comp_status == CS_DATA_UNDERRUN) &&
1998 (pkt->scsi_status_l != 0)) {
1999 comp_status = CS_COMPLETE;
2000 }
2001
2002 /*
2003 * Workaround T3 issue where we do not get any data xferred
2004 * but get back a good status.
2005 */
2006 if (comp_status == CS_COMPLETE &&
2007 pkt->scsi_status_l == 0 &&
2008 (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
2009 pkt->residual_length != 0 &&
2010 sp->fcp &&
2011 sp->fcp->fcp_data_len != 0 &&
2012 sp->fcp->fcp_cntl.cntl_write_data) {
2013 comp_status = CS_ABORTED;
2014 }
2015
2016 /*
2017 * Fast path to good SCSI I/O completion
2018 */
2019 if ((comp_status == CS_COMPLETE) &
2020 (!pkt->scsi_status_l) &
2021 (!(pkt->scsi_status_h & FCP_RSP_MASK))) {
2022 /* Set completed status. */
2023 sp->flags |= SRB_ISP_COMPLETED;
2024 sp->pkt->pkt_reason = comp_status;
2025 ql_fast_fcp_post(sp);
2026 QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n",
2027 ha->instance);
2028 return (0);
2029 }
2030 rval = ql_status_error(ha, sp, (sts_entry_t *)pkt, done_q,
2031 set_flags, reset_flags);
2032 }
2033 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2034
2035 return (rval);
2036 }
2037
2038 /*
2039 * ql_verify_preprocessed_cmd
2040 * Handles preprocessed cmds..
2041 *
2042 * Input:
2043 * ha: adapter state pointer.
2044 * pkt_handle: handle pointer.
2045 * set_flags: task daemon flags to set.
2046 * reset_flags: task daemon flags to reset.
2047 *
2048 * Returns:
2049 * srb pointer or NULL
2050 *
2051 * Context:
2052 * Interrupt or Kernel context, no mailbox commands allowed.
2053 */
2054 /* ARGSUSED */
2055 ql_srb_t *
2056 ql_verify_preprocessed_cmd(ql_adapter_state_t *ha, uint32_t *pkt_handle,
2057 uint32_t *set_flags, uint32_t *reset_flags)
2058 {
2059 ql_srb_t *sp = NULL;
2060 uint32_t index, resp_identifier;
2061 uint32_t get_handle = 10;
2062
2063 while (get_handle) {
2064 /* Get handle. */
2065 resp_identifier = ddi_get32(ha->hba_buf.acc_handle, pkt_handle);
2066 index = resp_identifier & OSC_INDEX_MASK;
2067 /* Validate handle. */
2068 if (index < MAX_OUTSTANDING_COMMANDS) {
2069 sp = ha->outstanding_cmds[index];
2070 }
2071
2072 if (sp != NULL) {
2073 EL(ha, "sp=%xh, resp_id=%xh, get=%d, index=%xh\n", sp,
2074 resp_identifier, get_handle, index);
2075 break;
2076 } else {
2077 get_handle -= 1;
2078 drv_usecwait(10000);
2079 if (get_handle == 1) {
2080 /* Last chance, Sync whole DMA buffer. */
2081 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
2082 RESPONSE_Q_BUFFER_OFFSET,
2083 RESPONSE_QUEUE_SIZE,
2084 DDI_DMA_SYNC_FORKERNEL);
2085 EL(ha, "last chance DMA sync, index=%xh\n",
2086 index);
2087 }
2088 }
2089 }
2090 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2091
2092 return (sp);
2093 }
2094
2095
2096 /*
2097 * ql_status_error
2098 * Processes received ISP status entry error.
2099 *
2100 * Input:
2101 * ha: adapter state pointer.
2102 * sp: SRB pointer.
2103 * pkt: entry pointer.
2104 * done_q: done queue pointer.
2105 * set_flags: task daemon flags to set.
2106 * reset_flags: task daemon flags to reset.
2107 *
2108 * Returns:
2109 * BIT_0 = CS_RESET status received.
2110 *
2111 * Context:
2112 * Interrupt or Kernel context, no mailbox commands allowed.
2113 */
2114 /* ARGSUSED */
2115 static int
2116 ql_status_error(ql_adapter_state_t *ha, ql_srb_t *sp, sts_entry_t *pkt23,
2117 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2118 {
2119 uint32_t sense_sz = 0;
2120 uint32_t cnt;
2121 ql_tgt_t *tq;
2122 fcp_rsp_t *fcpr;
2123 struct fcp_rsp_info *rsp;
2124 int rval = 0;
2125
2126 struct {
2127 uint8_t *rsp_info;
2128 uint8_t *req_sense_data;
2129 uint32_t residual_length;
2130 uint32_t fcp_residual_length;
2131 uint32_t rsp_info_length;
2132 uint32_t req_sense_length;
2133 uint16_t comp_status;
2134 uint8_t state_flags_l;
2135 uint8_t state_flags_h;
2136 uint8_t scsi_status_l;
2137 uint8_t scsi_status_h;
2138 } sts;
2139
2140 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2141
2142 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2143 sts_24xx_entry_t *pkt24 = (sts_24xx_entry_t *)pkt23;
2144
2145 /* Setup status. */
2146 sts.comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2147 &pkt24->comp_status);
2148 sts.scsi_status_l = pkt24->scsi_status_l;
2149 sts.scsi_status_h = pkt24->scsi_status_h;
2150
2151 /* Setup firmware residuals. */
2152 sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2153 ddi_get32(ha->hba_buf.acc_handle,
2154 (uint32_t *)&pkt24->residual_length) : 0;
2155
2156 /* Setup FCP residuals. */
2157 sts.fcp_residual_length = sts.scsi_status_h &
2158 (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2159 ddi_get32(ha->hba_buf.acc_handle,
2160 (uint32_t *)&pkt24->fcp_rsp_residual_count) : 0;
2161
2162 if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2163 (sts.scsi_status_h & FCP_RESID_UNDER) &&
2164 (sts.residual_length != pkt24->fcp_rsp_residual_count)) {
2165
2166 EL(sp->ha, "mismatch resid's: fw=%xh, pkt=%xh\n",
2167 sts.residual_length,
2168 pkt24->fcp_rsp_residual_count);
2169 sts.scsi_status_h = (uint8_t)
2170 (sts.scsi_status_h & ~FCP_RESID_UNDER);
2171 }
2172
2173 /* Setup state flags. */
2174 sts.state_flags_l = pkt24->state_flags_l;
2175 sts.state_flags_h = pkt24->state_flags_h;
2176
2177 if (sp->fcp->fcp_data_len &&
2178 (sts.comp_status != CS_DATA_UNDERRUN ||
2179 sts.residual_length != sp->fcp->fcp_data_len)) {
2191 sts.state_flags_l = (uint8_t)
2192 (sts.state_flags_l | SF_DATA_OUT);
2193 } else if (sp->fcp->fcp_cntl.cntl_read_data) {
2194 sts.state_flags_l = (uint8_t)
2195 (sts.state_flags_l | SF_DATA_IN);
2196 }
2197 if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) {
2198 sts.state_flags_l = (uint8_t)
2199 (sts.state_flags_l | SF_HEAD_OF_Q);
2200 } else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) {
2201 sts.state_flags_l = (uint8_t)
2202 (sts.state_flags_l | SF_ORDERED_Q);
2203 } else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) {
2204 sts.state_flags_l = (uint8_t)
2205 (sts.state_flags_l | SF_SIMPLE_Q);
2206 }
2207
2208 /* Setup FCP response info. */
2209 sts.rsp_info = &pkt24->rsp_sense_data[0];
2210 if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2211 sts.rsp_info_length = ddi_get32(ha->hba_buf.acc_handle,
2212 (uint32_t *)&pkt24->fcp_rsp_data_length);
2213 if (sts.rsp_info_length >
2214 sizeof (struct fcp_rsp_info)) {
2215 sts.rsp_info_length =
2216 sizeof (struct fcp_rsp_info);
2217 }
2218 for (cnt = 0; cnt < sts.rsp_info_length; cnt += 4) {
2219 ql_chg_endian(sts.rsp_info + cnt, 4);
2220 }
2221 } else {
2222 sts.rsp_info_length = 0;
2223 }
2224
2225 /* Setup sense data. */
2226 sts.req_sense_data =
2227 &pkt24->rsp_sense_data[sts.rsp_info_length];
2228 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2229 sts.req_sense_length =
2230 ddi_get32(ha->hba_buf.acc_handle,
2231 (uint32_t *)&pkt24->fcp_sense_length);
2232 sts.state_flags_h = (uint8_t)
2233 (sts.state_flags_h | SF_ARQ_DONE);
2234 sense_sz = (uint32_t)
2235 (((uintptr_t)pkt24 + sizeof (sts_24xx_entry_t)) -
2236 (uintptr_t)sts.req_sense_data);
2237 for (cnt = 0; cnt < sense_sz; cnt += 4) {
2238 ql_chg_endian(sts.req_sense_data + cnt, 4);
2239 }
2240 } else {
2241 sts.req_sense_length = 0;
2242 }
2243 } else {
2244 /* Setup status. */
2245 sts.comp_status = (uint16_t)ddi_get16(
2246 ha->hba_buf.acc_handle, &pkt23->comp_status);
2247 sts.scsi_status_l = pkt23->scsi_status_l;
2248 sts.scsi_status_h = pkt23->scsi_status_h;
2249
2250 /* Setup firmware residuals. */
2251 sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2252 ddi_get32(ha->hba_buf.acc_handle,
2253 (uint32_t *)&pkt23->residual_length) : 0;
2254
2255 /* Setup FCP residuals. */
2256 sts.fcp_residual_length = sts.scsi_status_h &
2257 (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2258 sts.residual_length : 0;
2259
2260 /* Setup state flags. */
2261 sts.state_flags_l = pkt23->state_flags_l;
2262 sts.state_flags_h = pkt23->state_flags_h;
2263
2264 /* Setup FCP response info. */
2265 sts.rsp_info = &pkt23->rsp_info[0];
2266 if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2267 sts.rsp_info_length = ddi_get16(
2268 ha->hba_buf.acc_handle,
2269 (uint16_t *)&pkt23->rsp_info_length);
2270 if (sts.rsp_info_length >
2271 sizeof (struct fcp_rsp_info)) {
2272 sts.rsp_info_length =
2273 sizeof (struct fcp_rsp_info);
2274 }
2275 } else {
2276 sts.rsp_info_length = 0;
2277 }
2278
2279 /* Setup sense data. */
2280 sts.req_sense_data = &pkt23->req_sense_data[0];
2281 sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2282 ddi_get16(ha->hba_buf.acc_handle,
2283 (uint16_t *)&pkt23->req_sense_length) : 0;
2284 }
2285
2286 bzero(sp->pkt->pkt_resp, sp->pkt->pkt_rsplen);
2287
2288 fcpr = (fcp_rsp_t *)sp->pkt->pkt_resp;
2289 rsp = (struct fcp_rsp_info *)(sp->pkt->pkt_resp +
2290 sizeof (fcp_rsp_t));
2291
2292 tq = sp->lun_queue->target_queue;
2293
2294 fcpr->fcp_u.fcp_status.scsi_status = sts.scsi_status_l;
2295 if (sts.scsi_status_h & FCP_RSP_LEN_VALID) {
2296 fcpr->fcp_u.fcp_status.rsp_len_set = 1;
2297 }
2298 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2299 fcpr->fcp_u.fcp_status.sense_len_set = 1;
2300 }
2301 if (sts.scsi_status_h & FCP_RESID_OVER) {
2302 fcpr->fcp_u.fcp_status.resid_over = 1;
2303 }
2304 if (sts.scsi_status_h & FCP_RESID_UNDER) {
2305 fcpr->fcp_u.fcp_status.resid_under = 1;
2306 }
2307 fcpr->fcp_u.fcp_status.reserved_1 = 0;
2308
2309 /* Set ISP completion status */
2310 sp->pkt->pkt_reason = sts.comp_status;
2311
2312 /* Update statistics. */
2313 if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) &&
2314 (sp->pkt->pkt_rsplen > sizeof (fcp_rsp_t))) {
2315
2316 sense_sz = sp->pkt->pkt_rsplen - (uint32_t)sizeof (fcp_rsp_t);
2317 if (sense_sz > sts.rsp_info_length) {
2318 sense_sz = sts.rsp_info_length;
2319 }
2320
2321 /* copy response information data. */
2322 if (sense_sz) {
2323 ddi_rep_get8(ha->hba_buf.acc_handle, (uint8_t *)rsp,
2324 sts.rsp_info, sense_sz, DDI_DEV_AUTOINCR);
2325 }
2326 fcpr->fcp_response_len = sense_sz;
2327
2328 rsp = (struct fcp_rsp_info *)((caddr_t)rsp +
2329 fcpr->fcp_response_len);
2330
2331 switch (*(sts.rsp_info + 3)) {
2332 case FCP_NO_FAILURE:
2333 break;
2334 case FCP_DL_LEN_MISMATCH:
2335 ha->adapter_stats->d_stats[lobyte(
2336 tq->loop_id)].dl_len_mismatches++;
2337 break;
2338 case FCP_CMND_INVALID:
2339 break;
2340 case FCP_DATA_RO_MISMATCH:
2341 ha->adapter_stats->d_stats[lobyte(
2342 tq->loop_id)].data_ro_mismatches++;
2343 break;
2344 case FCP_TASK_MGMT_NOT_SUPPTD:
2345 break;
2346 case FCP_TASK_MGMT_FAILED:
2347 ha->adapter_stats->d_stats[lobyte(
2348 tq->loop_id)].task_mgmt_failures++;
2349 break;
2350 default:
2351 break;
2352 }
2353 } else {
2354 /*
2355 * EL(sp->ha, "scsi_h=%xh, pkt_rsplen=%xh\n",
2356 * sts.scsi_status_h, sp->pkt->pkt_rsplen);
2357 */
2358 fcpr->fcp_response_len = 0;
2359 }
2360
2361 /* Set reset status received. */
2362 if (sts.comp_status == CS_RESET && LOOP_READY(ha)) {
2363 rval |= BIT_0;
2364 }
2365
2366 if (!(tq->flags & TQF_TAPE_DEVICE) &&
2367 (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) ||
2368 ha->loop_down_abort_time < LOOP_DOWN_TIMER_START) &&
2369 ha->task_daemon_flags & LOOP_DOWN) {
2370 EL(sp->ha, "Loop Not Ready Retry, d_id=%xh, lun=%xh\n",
2371 tq->d_id.b24, sp->lun_queue->lun_no);
2372
2373 /* Set retry status. */
2374 sp->flags |= SRB_RETRY;
2375 } else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2376 tq->port_down_retry_count != 0 &&
2377 (sts.comp_status == CS_INCOMPLETE ||
2378 sts.comp_status == CS_PORT_UNAVAILABLE ||
2379 sts.comp_status == CS_PORT_LOGGED_OUT ||
2380 sts.comp_status == CS_PORT_CONFIG_CHG ||
2381 sts.comp_status == CS_PORT_BUSY)) {
2382 EL(sp->ha, "Port Down Retry=%xh, d_id=%xh, lun=%xh, count=%d"
2431 tq->qfull_retry_count);
2432 if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2433 tq->flags |= TQF_QUEUE_SUSPENDED;
2434
2435 tq->qfull_retry_count--;
2436
2437 ADAPTER_STATE_LOCK(ha);
2438 if (ha->port_retry_timer == 0) {
2439 if ((ha->port_retry_timer =
2440 ha->qfull_retry_delay) ==
2441 0) {
2442 *set_flags |=
2443 PORT_RETRY_NEEDED;
2444 }
2445 }
2446 ADAPTER_STATE_UNLOCK(ha);
2447 }
2448 } else {
2449 EL(sp->ha, "Abort Retry, d_id=%xh, lun=%xh\n",
2450 tq->d_id.b24, sp->lun_queue->lun_no);
2451 }
2452
2453 /* Set retry status. */
2454 sp->flags |= SRB_RETRY;
2455 } else {
2456 fcpr->fcp_resid =
2457 sts.fcp_residual_length > sp->fcp->fcp_data_len ?
2458 sp->fcp->fcp_data_len : sts.fcp_residual_length;
2459
2460 if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2461 (sts.scsi_status_h & FCP_RESID_UNDER) == 0) {
2462
2463 if (sts.scsi_status_l == STATUS_CHECK) {
2464 sp->pkt->pkt_reason = CS_COMPLETE;
2465 } else {
2466 EL(ha, "transport error - "
2467 "underrun & invalid resid\n");
2468 EL(ha, "ssh=%xh, ssl=%xh\n",
2469 sts.scsi_status_h, sts.scsi_status_l);
2470 sp->pkt->pkt_reason = CS_FCP_RESPONSE_ERROR;
2471 }
2472 }
2473
2474 /* Ignore firmware underrun error. */
2475 if (sts.comp_status == CS_DATA_UNDERRUN &&
2476 (sts.scsi_status_h & FCP_RESID_UNDER ||
2477 (sts.scsi_status_l != STATUS_CHECK &&
2478 sts.scsi_status_l != STATUS_GOOD))) {
2479 sp->pkt->pkt_reason = CS_COMPLETE;
2480 }
2481
2482 if (sp->pkt->pkt_reason != CS_COMPLETE) {
2483 ha->xioctl->DeviceErrorCount++;
2484 EL(sp->ha, "Cmplt status err = %xh, d_id=%xh, lun=%xh"
2485 "\n", sts.comp_status, tq->d_id.b24,
2486 sp->lun_queue->lun_no);
2487 }
2488
2489 /* Set target request sense data. */
2490 if (sts.scsi_status_l == STATUS_CHECK) {
2491 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2492
2493 if (sp->pkt->pkt_reason == CS_COMPLETE &&
2494 sts.req_sense_data[2] != KEY_NO_SENSE &&
2495 sts.req_sense_data[2] !=
2496 KEY_UNIT_ATTENTION) {
2497 ha->xioctl->DeviceErrorCount++;
2498 }
2499
2500 sense_sz = sts.req_sense_length;
2501
2502 /* Insure data does not exceed buf. */
2503 if (sp->pkt->pkt_rsplen <=
2504 (uint32_t)sizeof (fcp_rsp_t) +
2505 fcpr->fcp_response_len) {
2506 sp->request_sense_length = 0;
2515 sp->request_sense_length) {
2516 sp->request_sense_length =
2517 sense_sz;
2518 }
2519
2520 sp->request_sense_ptr = (caddr_t)rsp;
2521
2522 sense_sz = (uint32_t)
2523 (((uintptr_t)pkt23 +
2524 sizeof (sts_entry_t)) -
2525 (uintptr_t)sts.req_sense_data);
2526 if (sp->request_sense_length <
2527 sense_sz) {
2528 sense_sz =
2529 sp->request_sense_length;
2530 }
2531
2532 fcpr->fcp_sense_len = sense_sz;
2533
2534 /* Move sense data. */
2535 ddi_rep_get8(ha->hba_buf.acc_handle,
2536 (uint8_t *)sp->request_sense_ptr,
2537 sts.req_sense_data,
2538 (size_t)sense_sz,
2539 DDI_DEV_AUTOINCR);
2540
2541 sp->request_sense_ptr += sense_sz;
2542 sp->request_sense_length -= sense_sz;
2543 if (sp->request_sense_length != 0 &&
2544 !(CFG_IST(ha, CFG_CTRL_8021))) {
2545 ha->status_srb = sp;
2546 }
2547 }
2548
2549 if (sense_sz != 0) {
2550 EL(sp->ha, "check condition sense data, "
2551 "d_id=%xh, lun=%xh\n%2xh%3xh%3xh%3xh"
2552 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
2553 "%3xh%3xh%3xh%3xh%3xh\n", tq->d_id.b24,
2554 sp->lun_queue->lun_no,
2555 sts.req_sense_data[0],
2556 sts.req_sense_data[1],
2557 sts.req_sense_data[2],
2558 sts.req_sense_data[3],
2559 sts.req_sense_data[4],
2560 sts.req_sense_data[5],
2561 sts.req_sense_data[6],
2562 sts.req_sense_data[7],
2563 sts.req_sense_data[8],
2564 sts.req_sense_data[9],
2565 sts.req_sense_data[10],
2566 sts.req_sense_data[11],
2567 sts.req_sense_data[12],
2568 sts.req_sense_data[13],
2569 sts.req_sense_data[14],
2570 sts.req_sense_data[15],
2571 sts.req_sense_data[16],
2572 sts.req_sense_data[17]);
2573 } else {
2574 EL(sp->ha, "check condition, d_id=%xh, lun=%xh"
2575 "\n", tq->d_id.b24, sp->lun_queue->lun_no);
2576 }
2577 }
2578 }
2579
2580 /* Set completed status. */
2581 sp->flags |= SRB_ISP_COMPLETED;
2582
2583 /* Place command on done queue. */
2584 if (ha->status_srb == NULL) {
2585 ql_add_link_b(done_q, &sp->cmd);
2586 }
2587
2588 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2589
2590 return (rval);
2591 }
2592
2593 /*
2594 * ql_status_cont_entry
2595 * Processes status continuation entry.
2596 *
2597 * Input:
2598 * ha: adapter state pointer.
2599 * pkt: entry pointer.
2600 * done_q: done queue pointer.
2601 * set_flags: task daemon flags to set.
2602 * reset_flags: task daemon flags to reset.
2603 *
2604 * Context:
2605 * Interrupt or Kernel context, no mailbox commands allowed.
2606 */
2607 /* ARGSUSED */
2608 static void
2609 ql_status_cont_entry(ql_adapter_state_t *ha, sts_cont_entry_t *pkt,
2610 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2611 {
2612 uint32_t sense_sz, index;
2613 ql_srb_t *sp = ha->status_srb;
2614
2615 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2616
2617 if (sp != NULL && sp->request_sense_length) {
2618 if (sp->request_sense_length > sizeof (pkt->req_sense_data)) {
2619 sense_sz = sizeof (pkt->req_sense_data);
2620 } else {
2621 sense_sz = sp->request_sense_length;
2622 }
2623
2624 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2625 for (index = 0; index < sense_sz; index += 4) {
2626 ql_chg_endian((uint8_t *)
2627 &pkt->req_sense_data[0] + index, 4);
2628 }
2629 }
2630
2631 /* Move sense data. */
2632 ddi_rep_get8(ha->hba_buf.acc_handle,
2633 (uint8_t *)sp->request_sense_ptr,
2634 (uint8_t *)&pkt->req_sense_data[0], (size_t)sense_sz,
2635 DDI_DEV_AUTOINCR);
2636
2637 sp->request_sense_ptr += sense_sz;
2638 sp->request_sense_length -= sense_sz;
2639
2640 /* Place command on done queue. */
2641 if (sp->request_sense_length == 0) {
2642 ql_add_link_b(done_q, &sp->cmd);
2643 ha->status_srb = NULL;
2644 }
2645 }
2646
2647 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2648 }
2649
2650 /*
2651 * ql_ip_entry
2652 * Processes received ISP IP entry.
2653 *
2654 * Input:
2655 * ha: adapter state pointer.
2656 * pkt: entry pointer.
2657 * done_q: done queue pointer.
2658 * set_flags: task daemon flags to set.
2659 * reset_flags: task daemon flags to reset.
2660 *
2661 * Context:
2662 * Interrupt or Kernel context, no mailbox commands allowed.
2663 */
2664 /* ARGSUSED */
2665 static void
2666 ql_ip_entry(ql_adapter_state_t *ha, ip_entry_t *pkt23, ql_head_t *done_q,
2667 uint32_t *set_flags, uint32_t *reset_flags)
2668 {
2669 ql_srb_t *sp;
2670 uint32_t index, resp_identifier;
2671 ql_tgt_t *tq;
2672
2673 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2674
2675 /* Validate the response entry handle. */
2676 resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle);
2677 index = resp_identifier & OSC_INDEX_MASK;
2678 if (index < MAX_OUTSTANDING_COMMANDS) {
2679 /* the index seems reasonable */
2680 sp = ha->outstanding_cmds[index];
2681 if (sp != NULL) {
2682 if (sp->handle == resp_identifier) {
2683 /* Neo, you're the one... */
2684 ha->outstanding_cmds[index] = NULL;
2685 sp->handle = 0;
2686 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2687 } else {
2688 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
2689 resp_identifier, sp->handle);
2690 sp = NULL;
2691 ql_signal_abort(ha, set_flags);
2692 }
2693 } else {
2694 sp = ql_verify_preprocessed_cmd(ha,
2695 (uint32_t *)&pkt23->handle, set_flags, reset_flags);
2696 }
2697 } else {
2698 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
2699 index, resp_identifier);
2700 ql_signal_abort(ha, set_flags);
2701 }
2702
2703 if (sp != NULL) {
2704 tq = sp->lun_queue->target_queue;
2705
2706 /* Set ISP completion status */
2707 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2708 ip_cmd_entry_t *pkt24 = (ip_cmd_entry_t *)pkt23;
2709
2710 sp->pkt->pkt_reason = ddi_get16(
2711 ha->hba_buf.acc_handle, &pkt24->hdl_status);
2712 } else {
2713 sp->pkt->pkt_reason = ddi_get16(
2714 ha->hba_buf.acc_handle, &pkt23->comp_status);
2715 }
2716
2717 if (ha->task_daemon_flags & LOOP_DOWN) {
2718 EL(ha, "Loop Not Ready Retry, d_id=%xh\n",
2719 tq->d_id.b24);
2720
2721 /* Set retry status. */
2722 sp->flags |= SRB_RETRY;
2723
2724 } else if (tq->port_down_retry_count &&
2725 (sp->pkt->pkt_reason == CS_INCOMPLETE ||
2726 sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE ||
2727 sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
2728 sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
2729 sp->pkt->pkt_reason == CS_PORT_BUSY)) {
2730 EL(ha, "Port Down Retry=%xh, d_id=%xh, count=%d\n",
2731 sp->pkt->pkt_reason, tq->d_id.b24,
2732 tq->port_down_retry_count);
2733
2734 /* Set retry status. */
2764 DEVICE_QUEUE_UNLOCK(tq);
2765
2766 } else if (sp->pkt->pkt_reason == CS_RESET) {
2767 EL(ha, "Reset Retry, d_id=%xh\n", tq->d_id.b24);
2768
2769 /* Set retry status. */
2770 sp->flags |= SRB_RETRY;
2771 } else {
2772 if (sp->pkt->pkt_reason != CS_COMPLETE) {
2773 EL(ha, "Cmplt status err=%xh, d_id=%xh\n",
2774 sp->pkt->pkt_reason, tq->d_id.b24);
2775 }
2776 }
2777
2778 /* Set completed status. */
2779 sp->flags |= SRB_ISP_COMPLETED;
2780
2781 ql_add_link_b(done_q, &sp->cmd);
2782
2783 }
2784 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2785 }
2786
2787 /*
2788 * ql_ip_rcv_entry
2789 * Processes received ISP IP buffers entry.
2790 *
2791 * Input:
2792 * ha: adapter state pointer.
2793 * pkt: entry pointer.
2794 * done_q: done queue pointer.
2795 * set_flags: task daemon flags to set.
2796 * reset_flags: task daemon flags to reset.
2797 *
2798 * Context:
2799 * Interrupt or Kernel context, no mailbox commands allowed.
2800 */
2801 /* ARGSUSED */
2802 static void
2803 ql_ip_rcv_entry(ql_adapter_state_t *ha, ip_rcv_entry_t *pkt,
2804 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2805 {
2806 port_id_t s_id;
2807 uint16_t index;
2808 uint8_t cnt;
2809 ql_tgt_t *tq;
2810
2811 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2812
2813 /* Locate device queue. */
2814 s_id.b.al_pa = pkt->s_id[0];
2815 s_id.b.area = pkt->s_id[1];
2816 s_id.b.domain = pkt->s_id[2];
2817 if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
2818 EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
2819 return;
2820 }
2821
2822 tq->ub_sequence_length = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2823 &pkt->seq_length);
2824 tq->ub_total_seg_cnt = pkt->segment_count;
2825 tq->ub_seq_id = ++ha->ub_seq_id;
2826 tq->ub_seq_cnt = 0;
2827 tq->ub_frame_ro = 0;
2828 tq->ub_loop_id = pkt->loop_id;
2829 ha->rcv_dev_q = tq;
2830
2831 for (cnt = 0; cnt < IP_RCVBUF_HANDLES && tq->ub_seq_cnt <
2832 tq->ub_total_seg_cnt; cnt++) {
2833
2834 index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2835 &pkt->buffer_handle[cnt]);
2836
2837 if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2838 EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2839 *set_flags |= ISP_ABORT_NEEDED;
2840 break;
2841 }
2842 }
2843
2844 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2845 }
2846
2847 /*
2848 * ql_ip_rcv_cont_entry
2849 * Processes received ISP IP buffers continuation entry.
2850 *
2851 * Input:
2852 * ha: adapter state pointer.
2853 * pkt: entry pointer.
2854 * done_q: done queue pointer.
2855 * set_flags: task daemon flags to set.
2856 * reset_flags: task daemon flags to reset.
2857 *
2858 * Context:
2859 * Interrupt or Kernel context, no mailbox commands allowed.
2860 */
2861 /* ARGSUSED */
2862 static void
2863 ql_ip_rcv_cont_entry(ql_adapter_state_t *ha, ip_rcv_cont_entry_t *pkt,
2864 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2865 {
2866 uint16_t index;
2867 uint8_t cnt;
2868 ql_tgt_t *tq;
2869
2870 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2871
2872 if ((tq = ha->rcv_dev_q) == NULL) {
2873 EL(ha, "No IP receive device\n");
2874 return;
2875 }
2876
2877 for (cnt = 0; cnt < IP_RCVBUF_CONT_HANDLES &&
2878 tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
2879
2880 index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2881 &pkt->buffer_handle[cnt]);
2882
2883 if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2884 EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2885 *set_flags |= ISP_ABORT_NEEDED;
2886 break;
2887 }
2888 }
2889
2890 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2891 }
2892
2893 /*
2894 * ip_rcv_24xx_entry_t
2895 * Processes received ISP24xx IP buffers entry.
2896 *
2897 * Input:
2898 * ha: adapter state pointer.
2899 * pkt: entry pointer.
2900 * done_q: done queue pointer.
2901 * set_flags: task daemon flags to set.
2902 * reset_flags: task daemon flags to reset.
2903 *
2904 * Context:
2905 * Interrupt or Kernel context, no mailbox commands allowed.
2906 */
2907 /* ARGSUSED */
2908 static void
2909 ql_ip_24xx_rcv_entry(ql_adapter_state_t *ha, ip_rcv_24xx_entry_t *pkt,
2910 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2911 {
2912 port_id_t s_id;
2913 uint16_t index;
2914 uint8_t cnt;
2915 ql_tgt_t *tq;
2916
2917 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2918
2919 /* Locate device queue. */
2920 s_id.b.al_pa = pkt->s_id[0];
2921 s_id.b.area = pkt->s_id[1];
2922 s_id.b.domain = pkt->s_id[2];
2923 if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
2924 EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
2925 return;
2926 }
2927
2928 if (tq->ub_total_seg_cnt == 0) {
2929 tq->ub_sequence_length = (uint16_t)ddi_get16(
2930 ha->hba_buf.acc_handle, &pkt->seq_length);
2931 tq->ub_total_seg_cnt = pkt->segment_count;
2932 tq->ub_seq_id = ++ha->ub_seq_id;
2933 tq->ub_seq_cnt = 0;
2934 tq->ub_frame_ro = 0;
2935 tq->ub_loop_id = (uint16_t)ddi_get16(
2936 ha->hba_buf.acc_handle, &pkt->n_port_hdl);
2937 }
2938
2939 for (cnt = 0; cnt < IP_24XX_RCVBUF_HANDLES && tq->ub_seq_cnt <
2940 tq->ub_total_seg_cnt; cnt++) {
2941
2942 index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2943 &pkt->buffer_handle[cnt]);
2944
2945 if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2946 EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2947 *set_flags |= ISP_ABORT_NEEDED;
2948 break;
2949 }
2950 }
2951
2952 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2953 }
2954
2955 /*
2956 * ql_ms_entry
2957 * Processes received Name/Management/CT Pass-Through entry.
2958 *
2959 * Input:
2960 * ha: adapter state pointer.
2961 * pkt23: entry pointer.
2962 * done_q: done queue pointer.
2963 * set_flags: task daemon flags to set.
2964 * reset_flags: task daemon flags to reset.
2965 *
2966 * Context:
2967 * Interrupt or Kernel context, no mailbox commands allowed.
2968 */
2969 /* ARGSUSED */
2970 static void
2971 ql_ms_entry(ql_adapter_state_t *ha, ms_entry_t *pkt23, ql_head_t *done_q,
2972 uint32_t *set_flags, uint32_t *reset_flags)
2973 {
2974 ql_srb_t *sp;
2975 uint32_t index, cnt, resp_identifier;
2976 ql_tgt_t *tq;
2977 ct_passthru_entry_t *pkt24 = (ct_passthru_entry_t *)pkt23;
2978
2979 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2980
2981 /* Validate the response entry handle. */
2982 resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle);
2983 index = resp_identifier & OSC_INDEX_MASK;
2984 if (index < MAX_OUTSTANDING_COMMANDS) {
2985 /* the index seems reasonable */
2986 sp = ha->outstanding_cmds[index];
2987 if (sp != NULL) {
2988 if (sp->handle == resp_identifier) {
2989 /* Neo, you're the one... */
2990 ha->outstanding_cmds[index] = NULL;
2991 sp->handle = 0;
2992 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2993 } else {
2994 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
2995 resp_identifier, sp->handle);
2996 sp = NULL;
2997 ql_signal_abort(ha, set_flags);
2998 }
2999 } else {
3000 sp = ql_verify_preprocessed_cmd(ha,
3001 (uint32_t *)&pkt23->handle, set_flags, reset_flags);
3002 }
3003 } else {
3004 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
3005 index, resp_identifier);
3006 ql_signal_abort(ha, set_flags);
3007 }
3008
3009 if (sp != NULL) {
3010 if (!(sp->flags & SRB_MS_PKT)) {
3011 EL(ha, "Not SRB_MS_PKT flags=%xh, isp_abort_needed",
3012 sp->flags);
3013 *set_flags |= ISP_ABORT_NEEDED;
3014 return;
3015 }
3016
3017 tq = sp->lun_queue->target_queue;
3018
3019 /* Set ISP completion status */
3020 if (CFG_IST(ha, CFG_CTRL_24258081)) {
3021 sp->pkt->pkt_reason = ddi_get16(
3022 ha->hba_buf.acc_handle, &pkt24->status);
3023 } else {
3024 sp->pkt->pkt_reason = ddi_get16(
3025 ha->hba_buf.acc_handle, &pkt23->comp_status);
3026 }
3027
3028 if (sp->pkt->pkt_reason == CS_RESOUCE_UNAVAILABLE &&
3029 sp->retry_count) {
3030 EL(ha, "Resouce Unavailable Retry = %d\n",
3031 sp->retry_count);
3032
3033 /* Set retry status. */
3034 sp->retry_count--;
3035 sp->flags |= SRB_RETRY;
3036
3037 /* Acquire device queue lock. */
3038 DEVICE_QUEUE_LOCK(tq);
3039
3040 if (!(tq->flags & TQF_QUEUE_SUSPENDED)) {
3041 tq->flags |= TQF_QUEUE_SUSPENDED;
3042
3043 ADAPTER_STATE_LOCK(ha);
3044 if (ha->port_retry_timer == 0) {
3045 ha->port_retry_timer = 2;
3068
3069 ADAPTER_STATE_LOCK(ha);
3070 if (ha->port_retry_timer == 0) {
3071 if ((ha->port_retry_timer =
3072 ha->port_down_retry_delay) == 0) {
3073 *set_flags |=
3074 PORT_RETRY_NEEDED;
3075 }
3076 }
3077 ADAPTER_STATE_UNLOCK(ha);
3078 }
3079 /* Release device queue specific lock. */
3080 DEVICE_QUEUE_UNLOCK(tq);
3081
3082 } else if (sp->pkt->pkt_reason == CS_RESET) {
3083 EL(ha, "Reset Retry\n");
3084
3085 /* Set retry status. */
3086 sp->flags |= SRB_RETRY;
3087
3088 } else if (CFG_IST(ha, CFG_CTRL_24258081) &&
3089 sp->pkt->pkt_reason == CS_DATA_UNDERRUN) {
3090 cnt = ddi_get32(ha->hba_buf.acc_handle,
3091 &pkt24->resp_byte_count);
3092 if (cnt < sizeof (fc_ct_header_t)) {
3093 EL(ha, "Data underrun\n");
3094 } else {
3095 sp->pkt->pkt_reason = CS_COMPLETE;
3096 }
3097
3098 } else if (sp->pkt->pkt_reason != CS_COMPLETE) {
3099 EL(ha, "status err=%xh\n", sp->pkt->pkt_reason);
3100 }
3101
3102 if (sp->pkt->pkt_reason == CS_COMPLETE) {
3103 /*EMPTY*/
3104 QL_PRINT_3(CE_CONT, "(%d): ct_cmdrsp=%x%02xh resp\n",
3105 ha->instance, sp->pkt->pkt_cmd[8],
3106 sp->pkt->pkt_cmd[9]);
3107 QL_DUMP_3(sp->pkt->pkt_resp, 8, sp->pkt->pkt_rsplen);
3108 }
3109
3110 /* For nameserver restore command, management change header. */
3111 if ((sp->flags & SRB_RETRY) == 0) {
3112 tq->d_id.b24 == 0xfffffc ?
3113 ql_cthdr_endian(sp->pkt->pkt_cmd_acc,
3114 sp->pkt->pkt_cmd, B_TRUE) :
3115 ql_cthdr_endian(sp->pkt->pkt_resp_acc,
3116 sp->pkt->pkt_resp, B_TRUE);
3117 }
3118
3119 /* Set completed status. */
3120 sp->flags |= SRB_ISP_COMPLETED;
3121
3122 /* Place command on done queue. */
3123 ql_add_link_b(done_q, &sp->cmd);
3124
3125 }
3126 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3127 }
3128
3129 /*
3130 * ql_report_id_entry
3131 * Processes received Name/Management/CT Pass-Through entry.
3132 *
3133 * Input:
3134 * ha: adapter state pointer.
3135 * pkt: entry pointer.
3136 * done_q: done queue pointer.
3137 * set_flags: task daemon flags to set.
3138 * reset_flags: task daemon flags to reset.
3139 *
3140 * Context:
3141 * Interrupt or Kernel context, no mailbox commands allowed.
3142 */
3143 /* ARGSUSED */
3144 static void
3145 ql_report_id_entry(ql_adapter_state_t *ha, report_id_1_t *pkt,
3146 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
3147 {
3148 ql_adapter_state_t *vha;
3149
3150 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3151
3152 EL(ha, "format=%d, vp=%d, status=%d\n",
3153 pkt->format, pkt->vp_index, pkt->status);
3154
3155 if (pkt->format == 1) {
3156 /* Locate port state structure. */
3157 for (vha = ha; vha != NULL; vha = vha->vp_next) {
3158 if (vha->vp_index == pkt->vp_index) {
3159 break;
3160 }
3161 }
3162 if (vha != NULL && vha->vp_index != 0 &&
3163 (pkt->status == CS_COMPLETE ||
3164 pkt->status == CS_PORT_ID_CHANGE)) {
3165 *set_flags |= LOOP_RESYNC_NEEDED;
3166 *reset_flags &= ~LOOP_RESYNC_NEEDED;
3167 vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
3168 TASK_DAEMON_LOCK(ha);
3169 vha->task_daemon_flags |= LOOP_RESYNC_NEEDED;
3170 vha->task_daemon_flags &= ~LOOP_DOWN;
3171 TASK_DAEMON_UNLOCK(ha);
3172 }
3173 }
3174
3175 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3176 }
3177
3178 /*
3179 * ql_els_entry
3180 * Processes received ELS Pass-Through entry.
3181 *
3182 * Input:
3183 * ha: adapter state pointer.
3184 * pkt23: entry pointer.
3185 * done_q: done queue pointer.
3186 * set_flags: task daemon flags to set.
3187 * reset_flags: task daemon flags to reset.
3188 *
3189 * Context:
3190 * Interrupt or Kernel context, no mailbox commands allowed.
3191 */
3192 /* ARGSUSED */
3193 static void
3194 ql_els_passthru_entry(ql_adapter_state_t *ha, els_passthru_entry_rsp_t *rsp,
3195 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
3196 {
3197 ql_tgt_t *tq;
3198 port_id_t d_id, s_id;
3199 ql_srb_t *srb;
3200 uint32_t index, resp_identifier;
3201
3202 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3203
3204 /* Validate the response entry handle. */
3205 resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &rsp->handle);
3206 index = resp_identifier & OSC_INDEX_MASK;
3207 if (index < MAX_OUTSTANDING_COMMANDS) {
3208 /* the index seems reasonable */
3209 srb = ha->outstanding_cmds[index];
3210 if (srb != NULL) {
3211 if (srb->handle == resp_identifier) {
3212 /* Neo, you're the one... */
3213 ha->outstanding_cmds[index] = NULL;
3214 srb->handle = 0;
3215 srb->flags &= ~SRB_IN_TOKEN_ARRAY;
3216 } else {
3217 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
3218 resp_identifier, srb->handle);
3219 srb = NULL;
3220 ql_signal_abort(ha, set_flags);
3221 }
3222 } else {
3223 srb = ql_verify_preprocessed_cmd(ha,
3224 (uint32_t *)&rsp->handle, set_flags, reset_flags);
3225 }
3226 } else {
3227 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
3228 index, resp_identifier);
3229 ql_signal_abort(ha, set_flags);
3230 }
3231
3232 if (srb != NULL) {
3233 if (!(srb->flags & SRB_ELS_PKT)) {
3234 EL(ha, "Not SRB_ELS_PKT flags=%xh, isp_abort_needed",
3235 srb->flags);
3236 *set_flags |= ISP_ABORT_NEEDED;
3237 return;
3238 }
3239
3240 (void) ddi_dma_sync(srb->pkt->pkt_resp_dma, 0, 0,
3241 DDI_DMA_SYNC_FORKERNEL);
3242
3243 /* Set ISP completion status */
3244 srb->pkt->pkt_reason = ddi_get16(
3245 ha->hba_buf.acc_handle, &rsp->comp_status);
3246
3247 if (srb->pkt->pkt_reason != CS_COMPLETE) {
3248 la_els_rjt_t rjt;
3249 EL(ha, "status err=%xh\n", srb->pkt->pkt_reason);
3250
3251 if (srb->pkt->pkt_reason == CS_LOGIN_LOGOUT_ERROR) {
3252 EL(ha, "e1=%xh e2=%xh\n",
3253 rsp->error_subcode1, rsp->error_subcode2);
3254 }
3255
3256 srb->pkt->pkt_state = FC_PKT_TRAN_ERROR;
3257
3258 /* Build RJT in the response. */
3259 rjt.ls_code.ls_code = LA_ELS_RJT;
3260 rjt.reason = FC_REASON_NO_CONNECTION;
3261
3262 ddi_rep_put8(srb->pkt->pkt_resp_acc, (uint8_t *)&rjt,
3263 (uint8_t *)srb->pkt->pkt_resp,
3264 sizeof (rjt), DDI_DEV_AUTOINCR);
3265
3266 srb->pkt->pkt_state = FC_PKT_TRAN_ERROR;
3267 srb->pkt->pkt_reason = FC_REASON_NO_CONNECTION;
3268 }
3269
3270 if (srb->pkt->pkt_reason == CS_COMPLETE) {
3271 uint8_t opcode;
3272 uint16_t loop_id;
3273
3274 /* Indicate ISP completion */
3275 srb->flags |= SRB_ISP_COMPLETED;
3276
3277 loop_id = ddi_get16(ha->hba_buf.acc_handle,
3278 &rsp->n_port_hdl);
3279
3280 if (ha->topology & QL_N_PORT) {
3281 /* create a target Q if there isn't one */
3282 tq = ql_loop_id_to_queue(ha, loop_id);
3283 if (tq == NULL) {
3284 d_id.b.al_pa = rsp->d_id_7_0;
3285 d_id.b.area = rsp->d_id_15_8;
3286 d_id.b.domain = rsp->d_id_23_16;
3287 /* Acquire adapter state lock. */
3288 ADAPTER_STATE_LOCK(ha);
3289
3290 tq = ql_dev_init(ha, d_id, loop_id);
3291 EL(ha, " tq = %x\n", tq);
3292
3293 ADAPTER_STATE_UNLOCK(ha);
3294 }
3295
3296 /* on plogi success assume the chosen s_id */
3297 opcode = ddi_get8(ha->hba_buf.acc_handle,
3298 &rsp->els_cmd_opcode);
3299
3300 EL(ha, "els_cmd_opcode=%x srb->pkt=%x\n",
3301 opcode, srb->pkt);
3302
3303 if (opcode == LA_ELS_PLOGI) {
3304 s_id.b.al_pa = rsp->s_id_7_0;
3305 s_id.b.area = rsp->s_id_15_8;
3306 s_id.b.domain = rsp->s_id_23_16;
3307
3308 ha->d_id.b24 = s_id.b24;
3309 EL(ha, "Set port's source ID %xh\n",
3310 ha->d_id.b24);
3311 }
3312 }
3313 ql_isp_els_handle_rsp_endian(ha, srb);
3314
3315 if (ha != srb->ha) {
3316 EL(ha, "ha=%x srb->ha=%x\n", ha, srb->ha);
3317 }
3318
3319 if (tq != NULL) {
3320 tq->logout_sent = 0;
3321 tq->flags &= ~TQF_NEED_AUTHENTICATION;
3322
3323 if (CFG_IST(ha, CFG_CTRL_24258081)) {
3324 tq->flags |= TQF_IIDMA_NEEDED;
3325 }
3326 srb->pkt->pkt_state = FC_PKT_SUCCESS;
3327 }
3328 }
3329 /* invoke the callback */
3330 ql_awaken_task_daemon(ha, srb, 0, 0);
3331 }
3332 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3333 }
3334
3335 /*
3336 * ql_signal_abort
3337 * Signal to the task daemon that a condition warranting an
3338 * isp reset has been detected.
3339 *
3340 * Input:
3341 * ha: adapter state pointer.
3342 * set_flags: task daemon flags to set.
3343 *
3344 * Context:
3345 * Interrupt or Kernel context, no mailbox commands allowed.
3346 */
3347 static void
3348 ql_signal_abort(ql_adapter_state_t *ha, uint32_t *set_flags)
3349 {
3350 if (!CFG_IST(ha, CFG_CTRL_8021) &&
3351 !(ha->task_daemon_flags & (ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE))) {
3352 *set_flags |= ISP_ABORT_NEEDED;
3353 }
3354 }
|
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright 2015 QLogic Corporation */
23
24 /*
25 * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
26 */
27
28 #pragma ident "Copyright 2015 QLogic Corporation; ql_isr.c"
29
30 /*
31 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
32 *
33 * ***********************************************************************
34 * * **
35 * * NOTICE **
36 * * COPYRIGHT (C) 1996-2015 QLOGIC CORPORATION **
37 * * ALL RIGHTS RESERVED **
38 * * **
39 * ***********************************************************************
40 *
41 */
42
43 #include <ql_apps.h>
44 #include <ql_api.h>
45 #include <ql_debug.h>
46 #include <ql_iocb.h>
47 #include <ql_isr.h>
48 #include <ql_init.h>
49 #include <ql_mbx.h>
50 #include <ql_nx.h>
51 #include <ql_xioctl.h>
52 #include <ql_fm.h>
53
54 /*
55 * Local Function Prototypes.
56 */
57 static void ql_clr_risc_intr(ql_adapter_state_t *);
58 static void ql_handle_uncommon_risc_intr(ql_adapter_state_t *, int, uint32_t,
59 uint64_t *);
60 static void ql_mbx_completion(ql_adapter_state_t *, uint16_t, uint64_t *,
61 uint64_t *);
62 static void ql_async_event(ql_adapter_state_t *, ql_response_q_t *, uint32_t,
63 ql_head_t *, uint64_t *, uint64_t *);
64 static void ql_fast_fcp_post(ql_srb_t *, ql_response_q_t *);
65 static void ql_response_pkt(ql_adapter_state_t *, ql_response_q_t *,
66 ql_head_t *, uint64_t *, uint64_t *);
67 static void ql_error_entry(ql_adapter_state_t *, ql_response_q_t *,
68 response_t *, ql_head_t *, uint64_t *, uint64_t *);
69 static int ql_status_entry(ql_adapter_state_t *, ql_response_q_t *,
70 sts_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
71 static int ql_24xx_status_entry(ql_adapter_state_t *, ql_response_q_t *,
72 sts_24xx_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
73 static int ql_status_error(ql_adapter_state_t *, ql_response_q_t *, ql_srb_t *,
74 sts_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
75 static void ql_status_cont_entry(ql_adapter_state_t *, ql_response_q_t *,
76 sts_cont_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
77 static void ql_ip_entry(ql_adapter_state_t *, ql_response_q_t *, ip_entry_t *,
78 ql_head_t *, uint64_t *, uint64_t *);
79 static void ql_ip_rcv_entry(ql_adapter_state_t *, ql_response_q_t *,
80 ip_rcv_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
81 static void ql_ip_rcv_cont_entry(ql_adapter_state_t *, ql_response_q_t *,
82 ip_rcv_cont_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
83 static void ql_ip_24xx_rcv_entry(ql_adapter_state_t *, ql_response_q_t *,
84 ip_rcv_24xx_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
85 static void ql_ms_entry(ql_adapter_state_t *, ql_response_q_t *, ms_entry_t *,
86 ql_head_t *, uint64_t *, uint64_t *);
87 static void ql_report_id_entry(ql_adapter_state_t *, ql_response_q_t *,
88 report_id_acq_t *, ql_head_t *, uint64_t *, uint64_t *);
89 static void ql_els_passthru_entry(ql_adapter_state_t *, ql_response_q_t *,
90 els_passthru_entry_rsp_t *, ql_head_t *, uint64_t *, uint64_t *);
91 static ql_srb_t *ql_verify_preprocessed_cmd(ql_adapter_state_t *,
92 ql_response_q_t *, uint32_t *, uint32_t *, uint64_t *, uint64_t *);
93 static void ql_signal_abort(ql_adapter_state_t *, uint64_t *);
94
95 /*
96 * ql_disable_intr
97 * Disable interrupts.
98 *
99 * Input:
100 * ha: adapter state pointer.
101 *
102 * Context:
103 * Interrupt or Kernel context, no mailbox commands allowed.
104 */
105 void
106 ql_disable_intr(ql_adapter_state_t *ha)
107 {
108 int i, rval;
109
110 QL_PRINT_10(ha, "started\n");
111
112 if (CFG_IST(ha, CFG_CTRL_82XX)) {
113 ql_8021_disable_intrs(ha);
114 } else {
115 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
116 WRT32_IO_REG(ha, ictrl, 0);
117 (void) RD32_IO_REG(ha, ictrl); /* PCI posting */
118 } else {
119 WRT16_IO_REG(ha, ictrl, 0);
120 (void) RD16_IO_REG(ha, ictrl); /* PCI posting */
121 }
122 }
123 if (ha->intr_cap & DDI_INTR_FLAG_MASKABLE) {
124 for (i = 0; i < ha->intr_cnt; i++) {
125 QL_PRINT_10(ha, "intr_set_mask %d\n", i);
126 if ((rval = ddi_intr_set_mask(ha->htable[i])) !=
127 DDI_SUCCESS) {
128 EL(ha, "intr_set_mask status=%xh\n", rval);
129 }
130 }
131 }
132 ADAPTER_STATE_LOCK(ha);
133 ha->flags &= ~INTERRUPTS_ENABLED;
134 ADAPTER_STATE_UNLOCK(ha);
135
136 QL_PRINT_10(ha, "done\n");
137 }
138
139 /*
140 * ql_enaable_intr
141 * Enable interrupts.
142 *
143 * Input:
144 * ha: adapter state pointer.
145 *
146 * Context:
147 * Interrupt or Kernel context, no mailbox commands allowed.
148 */
149 void
150 ql_enable_intr(ql_adapter_state_t *ha)
151 {
152 int i, rval;
153
154 QL_PRINT_10(ha, "started\n");
155
156 if (CFG_IST(ha, CFG_CTRL_82XX)) {
157 ql_8021_enable_intrs(ha);
158 } else {
159 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
160 WRT32_IO_REG(ha, ictrl, ISP_EN_RISC);
161 (void) RD32_IO_REG(ha, ictrl); /* PCI posting */
162 } else {
163 WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC);
164 (void) RD16_IO_REG(ha, ictrl); /* PCI posting */
165 }
166 }
167 if (ha->intr_cap & DDI_INTR_FLAG_MASKABLE) {
168 for (i = 0; i < ha->intr_cnt; i++) {
169 QL_PRINT_10(ha, "intr_clr_mask %d\n", i);
170 if ((rval = ddi_intr_clr_mask(ha->htable[i])) !=
171 DDI_SUCCESS) {
172 EL(ha, "intr_clr_mask status=%xh\n", rval);
173 }
174 }
175 }
176 ADAPTER_STATE_LOCK(ha);
177 ha->flags |= INTERRUPTS_ENABLED;
178 ADAPTER_STATE_UNLOCK(ha);
179
180 QL_PRINT_10(ha, "done\n");
181 }
182
183 /*
184 * ql_clr_risc_intr
185 * Clear firmware interrupt.
186 *
187 * Input:
188 * ha: adapter state pointer.
189 *
190 * Context:
191 * Interrupt or Kernel context, no mailbox commands allowed.
192 */
193 static void
194 ql_clr_risc_intr(ql_adapter_state_t *ha)
195 {
196 QL_PRINT_3(ha, "started\n");
197
198 if (CFG_IST(ha, CFG_CTRL_82XX)) {
199 ql_8021_clr_fw_intr(ha);
200 } else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
201 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
202 RD32_IO_REG(ha, hccr); /* PCI posting. */
203 } else {
204 WRT16_IO_REG(ha, semaphore, 0);
205 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
206 RD16_IO_REG(ha, hccr); /* PCI posting. */
207 }
208
209 QL_PRINT_3(ha, "done\n");
210 }
211
212 /*
213 * ql_isr
214 * Process all INTX intr types.
215 *
216 * Input:
217 * arg1: adapter state pointer.
218 *
219 * Returns:
220 * DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
221 *
222 * Context:
223 * Interrupt or Kernel context, no mailbox commands allowed.
224 */
225 /* ARGSUSED */
226 uint_t
227 ql_isr(caddr_t arg1)
228 {
229 return (ql_isr_aif(arg1, 0));
230 }
231
232 /*
233 * ql_isr_aif
234 * Process mailbox and I/O command completions.
235 *
236 * Input:
237 * arg: adapter state pointer.
238 * arg2: interrupt vector.
239 *
240 * Returns:
241 * DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
242 *
243 * Context:
244 * Interrupt or Kernel context, no mailbox commands allowed.
245 */
246 /* ARGSUSED */
247 uint_t
248 ql_isr_aif(caddr_t arg, caddr_t arg2)
249 {
250 uint32_t mbx, stat;
251 ql_adapter_state_t *ha = (void *)arg;
252 uint64_t set_flags = 0, reset_flags = 0;
253 ql_head_t isr_done_q = {NULL, NULL};
254 uint_t rval = DDI_INTR_UNCLAIMED;
255 ql_response_q_t *rsp_q = NULL;
256 int intr, index = (int)((uintptr_t)arg2);
257
258 QL_PRINT_3(ha, "started, index=%d\n", index);
259
260 /* Exit if not attached. */
261 if (ha == NULL || ha->intr_pri == NULL) {
262 EL(ha, "ha=%p, intr_pri=%p not attached\n", (void *)ha,
263 ha != NULL ? ha->intr_pri : NULL);
264 return (DDI_INTR_UNCLAIMED);
265 }
266
267 /* Exit if chip not powered up. */
268 if (ha->power_level != PM_LEVEL_D0) {
269 EL(ha, "power down exit\n");
270 return (DDI_INTR_UNCLAIMED);
271 }
272 QL_PM_LOCK(ha);
273 ha->pm_busy++;
274 QL_PM_UNLOCK(ha);
275
276 /* Acquire interrupt lock. */
277 if (index > ha->rsp_queues_cnt) {
278 intr = index = 0;
279 } else if (index) {
280 intr = index - 1;
281 } else {
282 intr = 0;
283 }
284 INDX_INTR_LOCK(ha, intr);
285
286 if (index && ha->flags & NO_INTR_HANDSHAKE) {
287 QL_PRINT_3(ha, "MULTI_Q_RSP_UPDATE, index=%xh\n", index);
288 index--;
289 if (index < ha->rsp_queues_cnt) {
290 rsp_q = ha->rsp_queues[index];
291 }
292 if (rsp_q == NULL) {
293 EL(ha, "unsupported MULTI_Q_RSP_UPDATE, index=%d\n",
294 index);
295 rsp_q = ha->rsp_queues[0];
296 }
297
298 if (ha->flags & QUEUE_SHADOW_PTRS) {
299 (void) ddi_dma_sync(rsp_q->rsp_ring.dma_handle,
300 (off_t)rsp_q->rsp_in_shadow_ofst,
301 SHADOW_ENTRY_SIZE, DDI_DMA_SYNC_FORCPU);
302 mbx = ddi_get32(rsp_q->rsp_ring.acc_handle,
303 rsp_q->rsp_in_shadow_ptr);
304 } else {
305 mbx = RD32_MBAR_REG(ha, rsp_q->mbar_rsp_in);
306 }
307
308 if (mbx != rsp_q->rsp_ring_index) {
309 rsp_q->isp_rsp_index = (uint16_t)mbx;
310 ql_response_pkt(ha, rsp_q, &isr_done_q,
311 &set_flags, &reset_flags);
312 /* PCI posting */
313 (void) RD32_MBAR_REG(ha, rsp_q->mbar_rsp_in);
314 } else if (ha->flags & INTERRUPTS_ENABLED) {
315 /*EMPTY*/
316 QL_PRINT_3(ha, "MULTI_Q_RSP_UPDATE mbar_rsp_in "
317 "same as before\n");
318 }
319
320 /* Set interrupt claimed status. */
321 rval = DDI_INTR_CLAIMED;
322
323 } else if (CFG_IST(ha, CFG_CTRL_22XX)) {
324 rsp_q = ha->rsp_queues[0];
325 if (RD16_IO_REG(ha, istatus) & RISC_INT) {
326 rval = DDI_INTR_CLAIMED;
327
328 /* Check for mailbox interrupt. */
329 stat = RD16_IO_REG(ha, semaphore);
330 if (stat & BIT_0) {
331 /* Get mailbox data. */
332 mbx = RD16_IO_REG(ha, mailbox_out[0]);
333 if (mbx > 0x3fff && mbx < 0x8000) {
334 ql_mbx_completion(ha, mbx,
335 &set_flags, &reset_flags);
336 } else if (mbx > 0x7fff && mbx < 0xc000) {
337 ql_async_event(ha, rsp_q, mbx,
338 &isr_done_q, &set_flags,
339 &reset_flags);
340 } else {
341 EL(ha, "22XX unknown interrupt type\n");
342 }
343 } else {
344 rsp_q->isp_rsp_index = RD16_IO_REG(ha, resp_in);
345 if (rsp_q->isp_rsp_index !=
346 rsp_q->rsp_ring_index) {
347 ql_response_pkt(ha, rsp_q,
348 &isr_done_q, &set_flags,
349 &reset_flags);
350 } else {
351 /*EMPTY*/
352 QL_PRINT_10(ha, "22XX isp_rsp_index "
353 "same as before\n");
354 }
355 }
356 /* Clear RISC interrupt */
357 ql_clr_risc_intr(ha);
358 }
359 } else {
360 if (CFG_IST(ha, CFG_CTRL_82XX)) {
361 ql_8021_clr_hw_intr(ha);
362 }
363
364 if (((stat = RD32_IO_REG(ha, risc2host)) & RH_RISC_INT) == 0) {
365 QL_PRINT_10(ha, "done, index=%d, no interrupt "
366 "stat=%xh\n", index, stat);
367 rval = DDI_INTR_UNCLAIMED;
368 } else if (ha->ql_dump_state & QL_DUMPING) {
369 EL(ha, "fw_dump, index=%d, active stat=%xh\n",
370 index, stat);
371 rval = DDI_INTR_CLAIMED;
372 } else if (CFG_IST(ha, CFG_CTRL_82XX) &&
373 RD32_IO_REG(ha, nx_risc_int) == 0) {
374 QL_PRINT_10(ha, "done, index=%d, no nx_risc_int "
375 "stat=%xh\n", index, stat);
376 rval = DDI_INTR_UNCLAIMED;
377 } else {
378 rval = DDI_INTR_CLAIMED;
379 QL_PRINT_3(ha, "index=%d, interrupt stat=%xh\n",
380 index, stat);
381
382 /* Capture FW defined interrupt info */
383 mbx = MSW(stat);
384
385 if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
386 != DDI_FM_OK) {
387 qlc_fm_report_err_impact(ha,
388 QL_FM_EREPORT_ACC_HANDLE_CHECK);
389 }
390
391 switch (stat & 0x1ff) {
392 case ROM_MBX_SUCCESS:
393 case ROM_MBX_ERR:
394 ql_mbx_completion(ha, mbx, &set_flags,
395 &reset_flags);
396 break;
397
398 case MBX_SUCCESS:
399 case MBX_ERR:
400 ql_mbx_completion(ha, mbx, &set_flags,
401 &reset_flags);
402 break;
403
404 case ASYNC_EVENT:
405 ql_async_event(ha, ha->rsp_queues[0],
406 (uint32_t)mbx, &isr_done_q,
407 &set_flags, &reset_flags);
408 break;
409
410 case MULTI_Q_RSP_UPDATE:
411 QL_PRINT_3(ha, "MULTI_Q_RSP_UPDATE mbx=%xh\n",
412 mbx);
413 if (mbx < ha->rsp_queues_cnt) {
414 rsp_q = ha->rsp_queues[mbx];
415 }
416 if (rsp_q == NULL) {
417 EL(ha, "unsupported MULTI_Q_RSP_UPDATE"
418 " mbx=%d\n", mbx);
419 rsp_q = ha->rsp_queues[0];
420 }
421 if (ha->flags & QUEUE_SHADOW_PTRS) {
422 (void) ddi_dma_sync(
423 rsp_q->rsp_ring.dma_handle,
424 (off_t)rsp_q->rsp_in_shadow_ofst,
425 SHADOW_ENTRY_SIZE,
426 DDI_DMA_SYNC_FORCPU);
427 mbx = ddi_get32(
428 rsp_q->rsp_ring.acc_handle,
429 rsp_q->rsp_in_shadow_ptr);
430 } else {
431 mbx = RD32_MBAR_REG(ha,
432 rsp_q->mbar_rsp_in);
433 }
434 /* FALLTHRU */
435
436 case RESP_UPDATE:
437 /* Clear RISC interrupt */
438 ql_clr_risc_intr(ha);
439
440 if (rsp_q == NULL) {
441 rsp_q = ha->rsp_queues[0];
442 }
443 if (mbx != rsp_q->rsp_ring_index) {
444 rsp_q->isp_rsp_index = (uint16_t)mbx;
445 ql_response_pkt(ha, rsp_q, &isr_done_q,
446 &set_flags, &reset_flags);
447 } else {
448 /*EMPTY*/
449 QL_PRINT_3(ha, "response "
450 "ring index same as before\n");
451 }
452 break;
453
454 case SCSI_FAST_POST_16:
455 stat = (stat & 0xffff0000) | MBA_CMPLT_1_16BIT;
456 ql_async_event(ha, ha->rsp_queues[0],
457 stat, &isr_done_q, &set_flags,
458 &reset_flags);
459 break;
460
461 case SCSI_FAST_POST_32:
462 stat = (stat & 0xffff0000) | MBA_CMPLT_1_32BIT;
463 ql_async_event(ha, ha->rsp_queues[0],
464 stat, &isr_done_q, &set_flags,
465 &reset_flags);
466 break;
467
468 case CTIO_FAST_POST:
469 stat = (stat & 0xffff0000) |
470 MBA_CTIO_COMPLETION;
471 ql_async_event(ha, ha->rsp_queues[0],
472 stat, &isr_done_q, &set_flags,
473 &reset_flags);
474 break;
475
476 case IP_FAST_POST_XMT:
477 stat = (stat & 0xffff0000) | MBA_IP_COMPLETION;
478 ql_async_event(ha, ha->rsp_queues[0],
479 stat, &isr_done_q, &set_flags,
480 &reset_flags);
481 break;
482
483 case IP_FAST_POST_RCV:
484 stat = (stat & 0xffff0000) | MBA_IP_RECEIVE;
485 ql_async_event(ha, ha->rsp_queues[0],
486 stat, &isr_done_q, &set_flags,
487 &reset_flags);
488 break;
489
490 case IP_FAST_POST_BRD:
491 stat = (stat & 0xffff0000) | MBA_IP_BROADCAST;
492 ql_async_event(ha, ha->rsp_queues[0],
493 stat, &isr_done_q, &set_flags,
494 &reset_flags);
495 break;
496
497 case IP_FAST_POST_RCV_ALN:
498 stat = (stat & 0xffff0000) |
499 MBA_IP_HDR_DATA_SPLIT;
500 ql_async_event(ha, ha->rsp_queues[0],
501 stat, &isr_done_q, &set_flags,
502 &reset_flags);
503 break;
504
505 case ATIO_UPDATE:
506 EL(ha, "unsupported ATIO queue update"
507 " interrupt, status=%xh\n", stat);
508 break;
509
510 case ATIO_RESP_UPDATE:
511 EL(ha, "unsupported ATIO response queue "
512 "update interrupt, status=%xh\n", stat);
513 break;
514
515 default:
516 ql_handle_uncommon_risc_intr(ha, intr, stat,
517 &set_flags);
518 break;
519 }
520 }
521
522 /* Clear RISC interrupt */
523 if (rval == DDI_INTR_CLAIMED && rsp_q == NULL) {
524 ql_clr_risc_intr(ha);
525 }
526
527 /* A0 chip delay */
528 if (CFG_IST(ha, CFG_CTRL_83XX) && ha->rev_id == 1 &&
529 ha->iflags & (IFLG_INTR_LEGACY | IFLG_INTR_FIXED)) {
530 drv_usecwait(4);
531 }
532 }
533
534 /* Process claimed interrupts during polls. */
535 if (rval == DDI_INTR_UNCLAIMED && ha->intr_claimed == B_TRUE) {
536 ha->intr_claimed = B_FALSE;
537 rval = DDI_INTR_CLAIMED;
538 }
539
540 /* Release interrupt lock. */
541 INDX_INTR_UNLOCK(ha, intr);
542
543 if (set_flags || reset_flags) {
544 ql_awaken_task_daemon(ha, NULL, set_flags, reset_flags);
545 }
546
547 if (isr_done_q.first != NULL) {
548 ql_done(isr_done_q.first, B_FALSE);
549 }
550
551 QL_PM_LOCK(ha);
552 if (ha->pm_busy) {
553 ha->pm_busy--;
554 }
555 QL_PM_UNLOCK(ha);
556
557 if (rval == DDI_INTR_CLAIMED) {
558 QL_PRINT_3(ha, "done\n");
559 ha->idle_timer = 0;
560 ha->xioctl->TotalInterrupts++;
561 } else {
562 /*EMPTY*/
563 QL_PRINT_10(ha, "interrupt not claimed\n");
564 }
565
566 return (rval);
567 }
568
569 /*
570 * ql_handle_uncommon_risc_intr
571 * Handle an uncommon RISC interrupt.
572 *
573 * Input:
574 * ha: adapter state pointer.
575 * intr: interrupt index.
576 * stat: interrupt status
577 * set_flags: task daemon flags to set.
578 *
579 * Context:
580 * Interrupt or Kernel context, no mailbox commands allowed.
581 */
582 static void
583 ql_handle_uncommon_risc_intr(ql_adapter_state_t *ha, int intr, uint32_t stat,
584 uint64_t *set_flags)
585 {
586 uint16_t hccr_reg;
587
588 hccr_reg = RD16_IO_REG(ha, hccr);
589
590 if (stat & RH_RISC_PAUSED ||
591 (hccr_reg & (BIT_15 | BIT_13 | BIT_11 | BIT_8))) {
592
593 ADAPTER_STATE_LOCK(ha);
594 ha->flags |= PARITY_ERROR;
595 ADAPTER_STATE_UNLOCK(ha);
596
597 if (ha->parity_pause_errors == 0 ||
598 ha->parity_hccr_err != hccr_reg ||
599 ha->parity_stat_err != stat) {
600 cmn_err(CE_WARN, "qlc(%d): isr, Internal Parity/"
601 "Pause Error - hccr=%xh, stat=%xh, count=%d",
602 ha->instance, hccr_reg, stat,
603 ha->parity_pause_errors);
604 ha->parity_hccr_err = hccr_reg;
605 ha->parity_stat_err = stat;
606 }
607
608 EL(ha, "parity/pause error, isp_abort_needed\n");
609
610 INDX_INTR_UNLOCK(ha, intr);
611 if (ql_binary_fw_dump(ha, FALSE) != QL_SUCCESS) {
612 ql_reset_chip(ha);
613 }
614 INDX_INTR_LOCK(ha, intr);
615
616 if (ha->parity_pause_errors == 0) {
617 ha->log_parity_pause = B_TRUE;
618 }
619
620 if (ha->parity_pause_errors < 0xffffffff) {
621 ha->parity_pause_errors++;
622 }
623
624 *set_flags |= ISP_ABORT_NEEDED;
625
626 /* Disable ISP interrupts. */
627 ql_disable_intr(ha);
628 } else {
629 EL(ha, "UNKNOWN interrupt status=%xh, hccr=%xh\n",
630 stat, hccr_reg);
631 }
632 }
633
634 /*
635 * ql_mbx_completion
636 * Processes mailbox completions.
637 *
638 * Input:
639 * ha: adapter state pointer.
640 * mb0: Mailbox 0 contents.
641 * set_flags: task daemon flags to set.
642 * reset_flags: task daemon flags to reset.
643 *
644 * Context:
645 * Interrupt context.
646 */
647 /* ARGSUSED */
648 static void
649 ql_mbx_completion(ql_adapter_state_t *ha, uint16_t mb0, uint64_t *set_flags,
650 uint64_t *reset_flags)
651 {
652 uint32_t index;
653 uint16_t cnt;
654
655 QL_PRINT_3(ha, "started\n");
656
657 /* Load return mailbox registers. */
658 MBX_REGISTER_LOCK(ha);
659
660 if (ha->mcp != NULL) {
661 ha->mcp->mb[0] = mb0;
662 index = ha->mcp->in_mb & ~MBX_0;
663
664 for (cnt = 1; cnt < MAX_MBOX_COUNT && index != 0; cnt++) {
665 index >>= 1;
666 if (index & MBX_0) {
667 ha->mcp->mb[cnt] = RD16_IO_REG(ha,
668 mailbox_out[cnt]);
669 }
670 }
671
672 } else {
673 EL(ha, "mcp == NULL\n");
674 }
675
676 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_INTERRUPT);
677 if (ha->flags & INTERRUPTS_ENABLED) {
678 cv_broadcast(&ha->cv_mbx_intr);
679 }
680
681 MBX_REGISTER_UNLOCK(ha);
682
683 QL_PRINT_3(ha, "done\n");
684 }
685
686 /*
687 * ql_async_event
688 * Processes asynchronous events.
689 *
690 * Input:
691 * ha: adapter state pointer.
692 * rsp_q: response queue structure pointer.
693 * mbx: Mailbox 0 register.
694 * done_q: head pointer to done queue.
695 * set_flags: task daemon flags to set.
696 * reset_flags: task daemon flags to reset.
697 *
698 * Context:
699 * Interrupt or Kernel context, no mailbox commands allowed.
700 */
701 static void
702 ql_async_event(ql_adapter_state_t *ha, ql_response_q_t *rsp_q, uint32_t mbx,
703 ql_head_t *done_q, uint64_t *set_flags, uint64_t *reset_flags)
704 {
705 uint32_t index, handles[5];
706 uint16_t cnt, handle_cnt, mb[MAX_MBOX_COUNT];
707 ql_srb_t *sp;
708 port_id_t s_id;
709 ql_tgt_t *tq;
710 ql_adapter_state_t *vha;
711
712 QL_PRINT_3(ha, "started\n");
713
714 /* Setup to process fast completion. */
715 mb[0] = LSW(mbx);
716 switch (mb[0]) {
717 case MBA_SCSI_COMPLETION:
718 handles[0] = SHORT_TO_LONG(RD16_IO_REG(ha, mailbox_out[1]),
719 RD16_IO_REG(ha, mailbox_out[2]));
720 handle_cnt = 1;
721 break;
722
723 case MBA_CMPLT_1_16BIT:
724 handles[0] = MSW(mbx);
725 handle_cnt = 1;
726 mb[0] = MBA_SCSI_COMPLETION;
727 break;
728
729 case MBA_CMPLT_2_16BIT:
730 handles[0] = (uint32_t)RD16_IO_REG(ha, mailbox_out[1]);
731 handles[1] = (uint32_t)RD16_IO_REG(ha, mailbox_out[2]);
732 handle_cnt = 2;
733 mb[0] = MBA_SCSI_COMPLETION;
734 break;
735
736 case MBA_CMPLT_3_16BIT:
737 handles[0] = (uint32_t)RD16_IO_REG(ha, mailbox_out[1]);
738 handles[1] = (uint32_t)RD16_IO_REG(ha, mailbox_out[2]);
739 handles[2] = (uint32_t)RD16_IO_REG(ha, mailbox_out[3]);
740 handle_cnt = 3;
741 mb[0] = MBA_SCSI_COMPLETION;
742 break;
743
744 case MBA_CMPLT_4_16BIT:
745 handles[0] = (uint32_t)RD16_IO_REG(ha, mailbox_out[1]);
746 handles[1] = (uint32_t)RD16_IO_REG(ha, mailbox_out[2]);
747 handles[2] = (uint32_t)RD16_IO_REG(ha, mailbox_out[3]);
748 handles[3] = (uint32_t)RD16_IO_REG(ha, mailbox_out[6]);
749 handle_cnt = 4;
750 mb[0] = MBA_SCSI_COMPLETION;
751 break;
752
753 case MBA_CMPLT_5_16BIT:
754 handles[0] = (uint32_t)RD16_IO_REG(ha, mailbox_out[1]);
755 handles[1] = (uint32_t)RD16_IO_REG(ha, mailbox_out[2]);
756 handles[2] = (uint32_t)RD16_IO_REG(ha, mailbox_out[3]);
757 handles[3] = (uint32_t)RD16_IO_REG(ha, mailbox_out[6]);
758 handles[4] = (uint32_t)RD16_IO_REG(ha, mailbox_out[7]);
759 handle_cnt = 5;
760 mb[0] = MBA_SCSI_COMPLETION;
761 break;
762
763 case MBA_CMPLT_1_32BIT:
764 handles[0] = SHORT_TO_LONG(MSW(mbx),
765 RD16_IO_REG(ha, mailbox_out[2]));
766 handle_cnt = 1;
767 mb[0] = MBA_SCSI_COMPLETION;
768 break;
769
770 case MBA_CMPLT_2_32BIT:
771 handles[0] = SHORT_TO_LONG(
772 RD16_IO_REG(ha, mailbox_out[1]),
773 RD16_IO_REG(ha, mailbox_out[2]));
774 handles[1] = SHORT_TO_LONG(
775 RD16_IO_REG(ha, mailbox_out[6]),
776 RD16_IO_REG(ha, mailbox_out[7]));
777 handle_cnt = 2;
778 mb[0] = MBA_SCSI_COMPLETION;
779 break;
780
781 case MBA_CTIO_COMPLETION:
782 case MBA_IP_COMPLETION:
783 handles[0] = CFG_IST(ha, CFG_CTRL_22XX) ? SHORT_TO_LONG(
784 RD16_IO_REG(ha, mailbox_out[1]),
785 RD16_IO_REG(ha, mailbox_out[2])) :
786 SHORT_TO_LONG(MSW(mbx), RD16_IO_REG(ha, mailbox_out[2]));
787 handle_cnt = 1;
788 mb[0] = MBA_SCSI_COMPLETION;
789 break;
790
791 default:
792 break;
793 }
794
795 /* Handle asynchronous event */
796 switch (mb[0]) {
797 case MBA_SCSI_COMPLETION:
798 QL_PRINT_5(ha, "Fast post completion\n");
799
800 if ((ha->flags & ONLINE) == 0) {
801 break;
802 }
803
804 for (cnt = 0; cnt < handle_cnt; cnt++) {
805 QL_PRINT_5(ha, "Fast post completion, handle=%xh\n",
806 handles[cnt]);
807
808 /* Get handle. */
809 index = handles[cnt] & OSC_INDEX_MASK;
810
811 /* Validate handle. */
812 sp = index < ha->osc_max_cnt ?
813 ha->outstanding_cmds[index] : NULL;
814
815 if (sp == QL_ABORTED_SRB(ha)) {
816 EL(ha, "QL_ABORTED_SRB handle=%xh\n",
817 handles[cnt]);
818 ha->outstanding_cmds[index] = NULL;
819 continue;
820 }
821 if (sp != NULL && sp->handle == handles[cnt]) {
822 ha->outstanding_cmds[index] = NULL;
823 sp->handle = 0;
824 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
825
826 /* Set completed status. */
827 sp->flags |= SRB_ISP_COMPLETED;
828
829 /* Set completion status */
830 sp->pkt->pkt_reason = CS_COMPLETE;
831
832 if (!(sp->flags & SRB_FCP_CMD_PKT)) {
833 /* Place block on done queue */
834 ql_add_link_b(done_q, &sp->cmd);
835 } else {
836 ql_fast_fcp_post(sp, rsp_q);
837 }
838 } else if (handles[cnt] != QL_FCA_BRAND) {
839 if (sp == NULL) {
840 EL(ha, "%xh unknown IOCB handle=%xh\n",
841 mb[0], handles[cnt]);
842 } else {
843 EL(ha, "%xh mismatch IOCB handle "
844 "pkt=%xh, sp=%xh\n", mb[0],
845 handles[cnt], sp->handle);
846 }
847
848 EL(ha, "%xh Fast post, mbx1=%xh, mbx2=%xh, "
849 "mbx3=%xh, mbx6=%xh, mbx7=%xh\n", mb[0],
850 RD16_IO_REG(ha, mailbox_out[1]),
851 RD16_IO_REG(ha, mailbox_out[2]),
852 RD16_IO_REG(ha, mailbox_out[3]),
853 RD16_IO_REG(ha, mailbox_out[6]),
854 RD16_IO_REG(ha, mailbox_out[7]));
855
856 ADAPTER_STATE_LOCK(ha);
857 ha->flags |= FW_DUMP_NEEDED;
858 ADAPTER_STATE_UNLOCK(ha);
859
860 if (!(ha->task_daemon_flags &
861 ISP_ABORT_NEEDED)) {
862 EL(ha, "%xh ISP Invalid handle, "
863 "isp_abort_needed\n", mb[0]);
864 *set_flags |= ISP_ABORT_NEEDED;
865 }
866 }
867 }
868 break;
869
870 case MBA_RESET: /* Reset */
871 EL(ha, "%xh Reset received\n", mb[0]);
872 *set_flags |= MARKER_NEEDED;
873 break;
874
875 case MBA_SYSTEM_ERR: /* System Error */
876 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
877 mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
878 mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
879 mb[7] = RD16_IO_REG(ha, mailbox_out[7]);
880
881 EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx1=%xh, "
882 "mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh, mbx6=%xh,\n "
883 "mbx7=%xh, mbx8=%xh, mbx9=%xh, mbx10=%xh, mbx11=%xh, "
884 "mbx12=%xh,\n", mb[0], mb[1], mb[2], mb[3],
885 RD16_IO_REG(ha, mailbox_out[4]),
886 RD16_IO_REG(ha, mailbox_out[5]),
887 RD16_IO_REG(ha, mailbox_out[6]), mb[7],
888 RD16_IO_REG(ha, mailbox_out[8]),
889 RD16_IO_REG(ha, mailbox_out[9]),
890 RD16_IO_REG(ha, mailbox_out[10]),
891 RD16_IO_REG(ha, mailbox_out[11]),
892 RD16_IO_REG(ha, mailbox_out[12]));
903 RD16_IO_REG(ha, mailbox_out[19]),
904 RD16_IO_REG(ha, mailbox_out[20]),
905 RD16_IO_REG(ha, mailbox_out[21]),
906 RD16_IO_REG(ha, mailbox_out[22]),
907 RD16_IO_REG(ha, mailbox_out[23]));
908
909 if (ha->reg_off->mbox_cnt > 24) {
910 EL(ha, "%xh ISP System Error, mbx24=%xh, mbx25=%xh, "
911 "mbx26=%xh,\n mbx27=%xh, mbx28=%xh, mbx29=%xh, "
912 "mbx30=%xh, mbx31=%xh\n", mb[0],
913 RD16_IO_REG(ha, mailbox_out[24]),
914 RD16_IO_REG(ha, mailbox_out[25]),
915 RD16_IO_REG(ha, mailbox_out[26]),
916 RD16_IO_REG(ha, mailbox_out[27]),
917 RD16_IO_REG(ha, mailbox_out[28]),
918 RD16_IO_REG(ha, mailbox_out[29]),
919 RD16_IO_REG(ha, mailbox_out[30]),
920 RD16_IO_REG(ha, mailbox_out[31]));
921 }
922
923 ADAPTER_STATE_LOCK(ha);
924 ha->flags |= FW_DUMP_NEEDED;
925 ADAPTER_STATE_UNLOCK(ha);
926
927 /* Signal task daemon to store error log. */
928 if (ha->errlog[0] == 0) {
929 ha->errlog[3] = mb[3];
930 ha->errlog[2] = mb[2];
931 ha->errlog[1] = mb[1];
932 ha->errlog[0] = FLASH_ERRLOG_AEN_8002;
933 }
934
935 if (CFG_IST(ha, CFG_CTRL_81XX) && mb[7] & SE_MPI_RISC) {
936 ADAPTER_STATE_LOCK(ha);
937 ha->flags |= MPI_RESET_NEEDED;
938 ADAPTER_STATE_UNLOCK(ha);
939 }
940
941 *set_flags |= ISP_ABORT_NEEDED;
942 ha->xioctl->ControllerErrorCount++;
943 break;
944
945 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
946 EL(ha, "%xh Request Transfer Error received, "
947 "isp_abort_needed\n", mb[0]);
948
949 /* Signal task daemon to store error log. */
950 if (ha->errlog[0] == 0) {
951 ha->errlog[3] = RD16_IO_REG(ha, mailbox_out[3]);
952 ha->errlog[2] = RD16_IO_REG(ha, mailbox_out[2]);
953 ha->errlog[1] = RD16_IO_REG(ha, mailbox_out[1]);
954 ha->errlog[0] = FLASH_ERRLOG_AEN_8003;
955 }
956
957 *set_flags |= ISP_ABORT_NEEDED;
958 ha->xioctl->ControllerErrorCount++;
959
960 (void) qlc_fm_report_err_impact(ha,
961 QL_FM_EREPORT_MBA_REQ_TRANSFER_ERR);
962
963 break;
964
965 case MBA_RSP_TRANSFER_ERR: /* Response Xfer Err */
966 EL(ha, "%xh Response Transfer Error received,"
967 " isp_abort_needed\n", mb[0]);
968
969 /* Signal task daemon to store error log. */
970 if (ha->errlog[0] == 0) {
971 ha->errlog[3] = RD16_IO_REG(ha, mailbox_out[3]);
972 ha->errlog[2] = RD16_IO_REG(ha, mailbox_out[2]);
973 ha->errlog[1] = RD16_IO_REG(ha, mailbox_out[1]);
974 ha->errlog[0] = FLASH_ERRLOG_AEN_8004;
975 }
976
977 *set_flags |= ISP_ABORT_NEEDED;
978 ha->xioctl->ControllerErrorCount++;
979
980 (void) qlc_fm_report_err_impact(ha,
981 QL_FM_EREPORT_MBA_RSP_TRANSFER_ERR);
982
983 break;
984
985 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
986 EL(ha, "%xh Request Queue Wake-up "
987 "received, mbx1=%xh\n", mb[0],
988 RD16_IO_REG(ha, mailbox_out[1]));
989 break;
990
991 case MBA_MENLO_ALERT: /* Menlo Alert Notification */
992 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
993 mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
994 mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
995
996 EL(ha, "%xh Menlo Alert Notification received, mbx1=%xh,"
997 " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
998
999 switch (mb[1]) {
1000 case MLA_LOGIN_OPERATIONAL_FW:
1001 ADAPTER_STATE_LOCK(ha);
1002 ha->flags |= MENLO_LOGIN_OPERATIONAL;
1003 ADAPTER_STATE_UNLOCK(ha);
1004 break;
1005 case MLA_PANIC_RECOVERY:
1006 case MLA_LOGIN_DIAGNOSTIC_FW:
1007 case MLA_LOGIN_GOLDEN_FW:
1008 case MLA_REJECT_RESPONSE:
1009 default:
1010 break;
1011 }
1012 break;
1013
1014 case MBA_LIP_F8: /* Received a LIP F8. */
1015 case MBA_LIP_RESET: /* LIP reset occurred. */
1016 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
1017 if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
1018 EL(ha, "%xh DCBX_STARTED received, mbx1=%xh, mbx2=%xh"
1019 "\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1020 RD16_IO_REG(ha, mailbox_out[2]));
1021 } else {
1022 EL(ha, "%xh LIP received\n", mb[0]);
1023 }
1024
1025 ADAPTER_STATE_LOCK(ha);
1026 ha->flags &= ~POINT_TO_POINT;
1027 ADAPTER_STATE_UNLOCK(ha);
1028
1029 if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1030 *set_flags |= LOOP_DOWN;
1031 }
1032 ql_port_state(ha, FC_STATE_OFFLINE,
1033 FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1034
1035 if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1036 ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1037 }
1038
1039 ha->adapter_stats->lip_count++;
1040
1041 /* Update AEN queue. */
1042 ha->xioctl->TotalLipResets++;
1043 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1044 ql_enqueue_aen(ha, mb[0], NULL);
1045 }
1046 break;
1047
1048 case MBA_LOOP_UP:
1049 if (!CFG_IST(ha, CFG_CTRL_22XX)) {
1050 ha->iidma_rate = RD16_IO_REG(ha, mailbox_out[1]);
1051 if (ha->iidma_rate == IIDMA_RATE_1GB) {
1052 ha->state = FC_PORT_STATE_MASK(
1053 ha->state) | FC_STATE_1GBIT_SPEED;
1054 index = 1;
1055 } else if (ha->iidma_rate == IIDMA_RATE_2GB) {
1056 ha->state = FC_PORT_STATE_MASK(
1057 ha->state) | FC_STATE_2GBIT_SPEED;
1058 index = 2;
1059 } else if (ha->iidma_rate == IIDMA_RATE_4GB) {
1060 ha->state = FC_PORT_STATE_MASK(
1061 ha->state) | FC_STATE_4GBIT_SPEED;
1062 index = 4;
1063 } else if (ha->iidma_rate == IIDMA_RATE_8GB) {
1064 ha->state = FC_PORT_STATE_MASK(
1065 ha->state) | FC_STATE_8GBIT_SPEED;
1066 index = 8;
1067 } else if (ha->iidma_rate == IIDMA_RATE_10GB) {
1068 ha->state = FC_PORT_STATE_MASK(
1069 ha->state) | FC_STATE_10GBIT_SPEED;
1070 index = 10;
1071 } else if (ha->iidma_rate == IIDMA_RATE_16GB) {
1072 ha->state = FC_PORT_STATE_MASK(
1073 ha->state) | FC_STATE_16GBIT_SPEED;
1074 index = 16;
1075 } else if (ha->iidma_rate == IIDMA_RATE_32GB) {
1076 ha->state = FC_PORT_STATE_MASK(
1077 ha->state) | FC_STATE_32GBIT_SPEED;
1078 index = 32;
1079 } else {
1080 ha->state = FC_PORT_STATE_MASK(
1081 ha->state);
1082 index = 0;
1083 }
1084 } else {
1085 ha->iidma_rate = IIDMA_RATE_1GB;
1086 ha->state = FC_PORT_STATE_MASK(ha->state) |
1087 FC_STATE_FULL_SPEED;
1088 index = 1;
1089 }
1090
1091 for (vha = ha; vha != NULL; vha = vha->vp_next) {
1092 vha->state = FC_PORT_STATE_MASK(vha->state) |
1093 FC_PORT_SPEED_MASK(ha->state);
1094 }
1095 EL(ha, "%d GB %xh Loop Up received\n", index, mb[0]);
1096
1097 /* Update AEN queue. */
1098 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1100 }
1101 break;
1102
1103 case MBA_LOOP_DOWN:
1104 EL(ha, "%xh Loop Down received, mbx1=%xh, mbx2=%xh, mbx3=%xh, "
1105 "mbx4=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1106 RD16_IO_REG(ha, mailbox_out[2]),
1107 RD16_IO_REG(ha, mailbox_out[3]),
1108 RD16_IO_REG(ha, mailbox_out[4]));
1109
1110 if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1111 *set_flags |= LOOP_DOWN;
1112 }
1113 ql_port_state(ha, FC_STATE_OFFLINE,
1114 FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1115
1116 if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1117 ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1118 }
1119
1120 if (CFG_IST(ha, CFG_CTRL_252780818283)) {
1121 ha->sfp_stat = RD16_IO_REG(ha, mailbox_out[2]);
1122 }
1123
1124 /* Update AEN queue. */
1125 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1126 ql_enqueue_aen(ha, mb[0], NULL);
1127 }
1128 break;
1129
1130 case MBA_PORT_UPDATE:
1131 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1132 mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1133 mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1134 RD16_IO_REG(ha, mailbox_out[3]) : 0);
1135
1136 /* Locate port state structure. */
1137 for (vha = ha; vha != NULL; vha = vha->vp_next) {
1138 if (vha->vp_index == LSB(mb[3])) {
1139 break;
1140 }
1141 }
1142 if (vha == NULL) {
1143 break;
1144 }
1145
1146 if (mb[1] == 0xffff &&
1147 mb[2] == 7 && (MSB(mb[3]) == 0xe || MSB(mb[3]) == 0x1a ||
1148 MSB(mb[3]) == 0x1c || MSB(mb[3]) == 0x1d ||
1149 MSB(mb[3]) == 0x1e)) {
1150 EL(ha, "%xh Port Database Update, Loop down "
1151 "received, mbx1=%xh, mbx2=%xh, mbx3=%xh\n",
1152 mb[0], mb[1], mb[2], mb[3]);
1153 /*
1154 * received FLOGI reject
1155 * received FLOGO
1156 * FCF configuration changed
1157 * FIP Clear Virtual Link received
1158 * FCF timeout
1159 */
1160 if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1161 *set_flags |= LOOP_DOWN;
1162 }
1163 ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE |
1164 COMMAND_WAIT_NEEDED | LOOP_DOWN);
1165 if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1166 ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1167 }
1168 /*
1169 * In N port 2 N port topology the FW provides a port
1170 * database entry at loop_id 0x7fe which we use to
1171 * acquire the Ports WWPN.
1172 */
1173 } else if ((mb[1] != 0x7fe) &&
1174 ((FC_PORT_STATE_MASK(vha->state) != FC_STATE_OFFLINE ||
1175 (CFG_IST(ha, CFG_ISP_FW_TYPE_2) &&
1176 (mb[2] != 6 || mb[3] != 0))))) {
1177 EL(ha, "%xh Port Database Update, Login/Logout "
1178 "received, mbx1=%xh, mbx2=%xh, mbx3=%xh\n",
1179 mb[0], mb[1], mb[2], mb[3]);
1180 } else {
1181 EL(ha, "%xh Port Database Update received, mbx1=%xh,"
1182 " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2],
1183 mb[3]);
1184 *set_flags |= LOOP_RESYNC_NEEDED;
1185 *set_flags &= ~LOOP_DOWN;
1186 *reset_flags |= LOOP_DOWN;
1187 *reset_flags &= ~LOOP_RESYNC_NEEDED;
1188 vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
1189 TASK_DAEMON_LOCK(ha);
1190 vha->task_daemon_flags |= LOOP_RESYNC_NEEDED;
1191 vha->task_daemon_flags &= ~LOOP_DOWN;
1192 TASK_DAEMON_UNLOCK(ha);
1193 ADAPTER_STATE_LOCK(ha);
1194 vha->flags &= ~ABORT_CMDS_LOOP_DOWN_TMO;
1195 ADAPTER_STATE_UNLOCK(ha);
1196 }
1248 break;
1249
1250 case MBA_IP_RECEIVE:
1251 case MBA_IP_BROADCAST:
1252 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1253 mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1254 mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
1255
1256 EL(ha, "%xh IP packet/broadcast received, mbx1=%xh, "
1257 "mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1258
1259 /* Locate device queue. */
1260 s_id.b.al_pa = LSB(mb[2]);
1261 s_id.b.area = MSB(mb[2]);
1262 s_id.b.domain = LSB(mb[1]);
1263 if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
1264 EL(ha, "Unknown IP device=%xh\n", s_id.b24);
1265 break;
1266 }
1267
1268 cnt = (uint16_t)(CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
1269 CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb24.buf_size[0],
1270 ha->ip_init_ctrl_blk.cb24.buf_size[1]) :
1271 CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb.buf_size[0],
1272 ha->ip_init_ctrl_blk.cb.buf_size[1]));
1273
1274 tq->ub_sequence_length = mb[3];
1275 tq->ub_total_seg_cnt = (uint8_t)(mb[3] / cnt);
1276 if (mb[3] % cnt) {
1277 tq->ub_total_seg_cnt++;
1278 }
1279 cnt = (uint16_t)(tq->ub_total_seg_cnt + 10);
1280
1281 for (index = 10; index < ha->reg_off->mbox_cnt && index < cnt;
1282 index++) {
1283 mb[index] = RD16_IO_REG(ha, mailbox_out[index]);
1284 }
1285
1286 tq->ub_seq_id = ++ha->ub_seq_id;
1287 tq->ub_seq_cnt = 0;
1288 tq->ub_frame_ro = 0;
1289 tq->ub_loop_id = (uint16_t)(mb[0] == MBA_IP_BROADCAST ?
1290 (CFG_IST(ha, CFG_CTRL_24XX) ? BROADCAST_24XX_HDL :
1291 IP_BROADCAST_LOOP_ID) : tq->loop_id);
1292 ha->rcv_dev_q = tq;
1293
1294 for (cnt = 10; cnt < ha->reg_off->mbox_cnt &&
1295 tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
1296 if (ql_ub_frame_hdr(ha, tq, mb[cnt], done_q) !=
1297 QL_SUCCESS) {
1298 EL(ha, "ql_ub_frame_hdr failed, "
1299 "isp_abort_needed\n");
1300 *set_flags |= ISP_ABORT_NEEDED;
1301 break;
1302 }
1303 }
1304 break;
1305
1306 case MBA_IP_LOW_WATER_MARK:
1307 case MBA_IP_RCV_BUFFER_EMPTY:
1308 EL(ha, "%xh IP low water mark / RCV buffer empty received\n",
1309 mb[0]);
1310 *set_flags |= NEED_UNSOLICITED_BUFFERS;
1311 break;
1312
1313 case MBA_IP_HDR_DATA_SPLIT:
1314 EL(ha, "%xh IP HDR data split received\n", mb[0]);
1315 break;
1316
1317 case MBA_ERROR_LOGGING_DISABLED:
1318 EL(ha, "%xh error logging disabled received, "
1319 "mbx1=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1320 break;
1321
1322 case MBA_POINT_TO_POINT:
1323 /* case MBA_DCBX_COMPLETED: */
1324 if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
1325 EL(ha, "%xh DCBX completed received\n", mb[0]);
1326 } else {
1327 EL(ha, "%xh Point to Point Mode received\n", mb[0]);
1328 }
1329 ADAPTER_STATE_LOCK(ha);
1330 ha->flags |= POINT_TO_POINT;
1331 ADAPTER_STATE_UNLOCK(ha);
1332 if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1333 *set_flags |= LOOP_DOWN;
1334 }
1335 if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1336 ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1337 }
1338 ql_port_state(ha, FC_STATE_OFFLINE,
1339 FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1340 break;
1341
1342 case MBA_FCF_CONFIG_ERROR:
1343 EL(ha, "%xh FCF configuration Error received, mbx1=%xh\n",
1344 mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1345 break;
1346
1347 case MBA_DCBX_PARAM_CHANGED:
1348 EL(ha, "%xh DCBX parameters changed received, mbx1=%xh\n",
1349 mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1350 break;
1351
1352 case MBA_CHG_IN_CONNECTION:
1353 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1354 if (mb[1] == 2) {
1355 EL(ha, "%xh Change In Connection received, "
1356 "mbx1=%xh\n", mb[0], mb[1]);
1357 ADAPTER_STATE_LOCK(ha);
1358 ha->flags &= ~POINT_TO_POINT;
1359 ADAPTER_STATE_UNLOCK(ha);
1360 if (ha->topology & QL_N_PORT) {
1361 ha->topology = (uint8_t)(ha->topology &
1362 ~QL_N_PORT);
1363 ha->topology = (uint8_t)(ha->topology |
1364 QL_NL_PORT);
1365 }
1366 } else {
1367 EL(ha, "%xh Change In Connection received, "
1368 "mbx1=%xh, isp_abort_needed\n", mb[0], mb[1]);
1369 *set_flags |= ISP_ABORT_NEEDED;
1370 }
1371 break;
1372
1373 case MBA_ZIO_UPDATE:
1374 EL(ha, "%xh ZIO response received\n", mb[0]);
1375
1376 rsp_q->isp_rsp_index = RD16_IO_REG(ha, resp_in);
1377 ql_response_pkt(ha, rsp_q, done_q, set_flags, reset_flags);
1378 break;
1379
1380 case MBA_PORT_BYPASS_CHANGED:
1381 EL(ha, "%xh Port Bypass Changed received, mbx1=%xh\n",
1382 mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1383 /*
1384 * Event generated when there is a transition on
1385 * port bypass of crystal+.
1386 * Mailbox 1: Bit 0 - External.
1387 * Bit 2 - Internal.
1388 * When the bit is 0, the port is bypassed.
1389 *
1390 * For now we will generate a LIP for all cases.
1391 */
1392 *set_flags |= HANDLE_PORT_BYPASS_CHANGE;
1393 break;
1394
1395 case MBA_RECEIVE_ERROR:
1396 EL(ha, "%xh Receive Error received, mbx1=%xh, mbx2=%xh\n",
1397 mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1398 RD16_IO_REG(ha, mailbox_out[2]));
1399 break;
1400
1401 case MBA_LS_RJT_SENT:
1402 EL(ha, "%xh LS_RJT Response Sent ELS=%xh\n", mb[0],
1403 RD16_IO_REG(ha, mailbox_out[1]));
1404 break;
1405
1406 case MBA_FW_RESTART_COMP:
1407 EL(ha, "%xh firmware restart complete received mb1=%xh\n",
1408 mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1409 break;
1410
1411 /*
1412 * MBA_IDC_COMPLETE & MBA_IDC_NOTIFICATION: We won't get another
1413 * IDC async event until we ACK the current one.
1414 */
1415 case MBA_IDC_COMPLETE:
1416 mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1417 EL(ha, "%xh MBA_IDC_COMPLETE received, mbx2=%xh\n", mb[0],
1418 mb[2]);
1419 switch (mb[2]) {
1420 case IDC_OPC_FLASH_ACC:
1421 case IDC_OPC_RESTART_MPI:
1422 case IDC_OPC_PORT_RESET_MBC:
1423 case IDC_OPC_SET_PORT_CONFIG_MBC:
1424 ADAPTER_STATE_LOCK(ha);
1425 ha->flags |= IDC_RESTART_NEEDED;
1426 ADAPTER_STATE_UNLOCK(ha);
1427 break;
1428 default:
1429 EL(ha, "unknown IDC completion opcode=%xh\n", mb[2]);
1430 break;
1431 }
1432 break;
1433
1434 case MBA_IDC_NOTIFICATION:
1435 for (cnt = 1; cnt < 8; cnt++) {
1436 ha->idc_mb[cnt] = RD16_IO_REG(ha, mailbox_out[cnt]);
1437 }
1438 EL(ha, "%xh MBA_IDC_REQ_NOTIFICATION received, mbx1=%xh, "
1439 "mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh, mbx6=%xh, "
1440 "mbx7=%xh\n", mb[0], ha->idc_mb[1], ha->idc_mb[2],
1441 ha->idc_mb[3], ha->idc_mb[4], ha->idc_mb[5], ha->idc_mb[6],
1442 ha->idc_mb[7]);
1443
1444 ADAPTER_STATE_LOCK(ha);
1445 switch (ha->idc_mb[2]) {
1446 case IDC_OPC_DRV_START:
1447 ha->flags |= IDC_RESTART_NEEDED;
1448 break;
1449 case IDC_OPC_FLASH_ACC:
1450 case IDC_OPC_RESTART_MPI:
1451 case IDC_OPC_PORT_RESET_MBC:
1452 case IDC_OPC_SET_PORT_CONFIG_MBC:
1453 ha->flags |= IDC_STALL_NEEDED;
1454 break;
1455 default:
1456 EL(ha, "unknown IDC request opcode=%xh\n",
1457 ha->idc_mb[2]);
1458 break;
1459 }
1460 /*
1461 * If there is a timeout value associated with this IDC
1462 * notification then there is an implied requirement
1463 * that we return an ACK.
1464 */
1465 if (ha->idc_mb[1] & IDC_TIMEOUT_MASK) {
1466 ha->flags |= IDC_ACK_NEEDED;
1467 }
1468 ADAPTER_STATE_UNLOCK(ha);
1469
1470 ql_awaken_task_daemon(ha, NULL, 0, 0);
1471 break;
1472
1473 case MBA_IDC_TIME_EXTENDED:
1474 EL(ha, "%xh MBA_IDC_TIME_EXTENDED received, mbx2=%xh\n",
1475 mb[0], RD16_IO_REG(ha, mailbox_out[2]));
1476 break;
1477
1478 default:
1479 EL(ha, "%xh UNKNOWN event received, mbx1=%xh, mbx2=%xh, "
1480 "mbx3=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1481 RD16_IO_REG(ha, mailbox_out[2]),
1482 RD16_IO_REG(ha, mailbox_out[3]));
1483 break;
1484 }
1485
1486 QL_PRINT_3(ha, "done\n");
1487 }
1488
1489 /*
1490 * ql_fast_fcp_post
1491 * Fast path for good SCSI I/O completion.
1492 *
1493 * Input:
1494 * sp: SRB pointer.
1495 * rsp_q: response queue structure pointer.
1496 *
1497 * Context:
1498 * Interrupt or Kernel context, no mailbox commands allowed.
1499 */
1500 static void
1501 ql_fast_fcp_post(ql_srb_t *sp, ql_response_q_t *rsp_q)
1502 {
1503 ql_adapter_state_t *ha = sp->ha;
1504 ql_lun_t *lq = sp->lun_queue;
1505 ql_tgt_t *tq = lq->target_queue;
1506
1507 QL_PRINT_3(ha, "started\n");
1508
1509 /* Acquire device queue lock. */
1510 DEVICE_QUEUE_LOCK(tq);
1511
1512 /* Decrement outstanding commands on device. */
1513 if (tq->outcnt != 0) {
1514 tq->outcnt--;
1515 }
1516
1517 if (sp->flags & SRB_FCP_CMD_PKT) {
1518 if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_UNTAGGED) {
1519 /*
1520 * Clear the flag for this LUN so that
1521 * untagged commands can be submitted
1522 * for it.
1523 */
1524 lq->flags &= ~LQF_UNTAGGED_PENDING;
1525 }
1526
1527 if (lq->lun_outcnt != 0) {
1544 ql_next(ha, lq);
1545 } else {
1546 /* Release LU queue specific lock. */
1547 DEVICE_QUEUE_UNLOCK(tq);
1548 if (ha->pha->pending_cmds.first != NULL) {
1549 ql_start_iocb(ha, NULL);
1550 }
1551 }
1552
1553 /* Sync buffers if required. */
1554 if (sp->flags & SRB_MS_PKT) {
1555 (void) ddi_dma_sync(sp->pkt->pkt_resp_dma, 0, 0,
1556 DDI_DMA_SYNC_FORCPU);
1557 }
1558
1559 /* Map ISP completion codes. */
1560 sp->pkt->pkt_expln = FC_EXPLN_NONE;
1561 sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
1562 sp->pkt->pkt_state = FC_PKT_SUCCESS;
1563
1564 (void) qlc_fm_check_pkt_dma_handle(ha, sp);
1565
1566 /* Now call the pkt completion callback */
1567 if (sp->flags & SRB_POLL) {
1568 sp->flags &= ~SRB_POLL;
1569 } else if (ha->completion_thds == 1 && sp->pkt->pkt_comp &&
1570 !(ha->flags & POLL_INTR)) {
1571 INDX_INTR_UNLOCK(ha, rsp_q->rsp_q_number);
1572 (*sp->pkt->pkt_comp)(sp->pkt);
1573 INDX_INTR_LOCK(ha, rsp_q->rsp_q_number);
1574 } else {
1575 ql_io_comp(sp);
1576 }
1577
1578 if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
1579 != DDI_FM_OK) {
1580 qlc_fm_report_err_impact(ha,
1581 QL_FM_EREPORT_ACC_HANDLE_CHECK);
1582 }
1583
1584 QL_PRINT_3(ha, "done\n");
1585 }
1586
1587 /*
1588 * ql_response_pkt
1589 * Processes response entry.
1590 *
1591 * Input:
1592 * ha: adapter state pointer.
1593 * rsp_q: response queue structure pointer.
1594 * done_q: head pointer to done queue.
1595 * set_flags: task daemon flags to set.
1596 * reset_flags: task daemon flags to reset.
1597 *
1598 * Context:
1599 * Interrupt or Kernel context, no mailbox commands allowed.
1600 */
1601 static void
1602 ql_response_pkt(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
1603 ql_head_t *done_q, uint64_t *set_flags, uint64_t *reset_flags)
1604 {
1605 response_t *pkt;
1606 uint32_t dma_sync_size_1 = 0;
1607 uint32_t dma_sync_size_2 = 0;
1608 int status = 0;
1609
1610 QL_PRINT_3(ha, "started\n");
1611
1612 if (rsp_q->isp_rsp_index >= rsp_q->rsp_entry_cnt) {
1613 EL(ha, "index error = %xh, isp_abort_needed",
1614 rsp_q->isp_rsp_index);
1615 *set_flags |= ISP_ABORT_NEEDED;
1616 return;
1617 }
1618
1619 if ((ha->flags & ONLINE) == 0) {
1620 QL_PRINT_10(ha, "not onlne, done\n");
1621 return;
1622 }
1623
1624 /* Calculate size of response queue entries to sync. */
1625 if (rsp_q->isp_rsp_index > rsp_q->rsp_ring_index) {
1626 dma_sync_size_1 = (uint32_t)
1627 ((uint32_t)(rsp_q->isp_rsp_index - rsp_q->rsp_ring_index) *
1628 RESPONSE_ENTRY_SIZE);
1629 } else if (rsp_q->isp_rsp_index == 0) {
1630 dma_sync_size_1 = (uint32_t)
1631 ((uint32_t)(rsp_q->rsp_entry_cnt - rsp_q->rsp_ring_index) *
1632 RESPONSE_ENTRY_SIZE);
1633 } else {
1634 /* Responses wrap around the Q */
1635 dma_sync_size_1 = (uint32_t)
1636 ((uint32_t)(rsp_q->rsp_entry_cnt - rsp_q->rsp_ring_index) *
1637 RESPONSE_ENTRY_SIZE);
1638 dma_sync_size_2 = (uint32_t)
1639 (rsp_q->isp_rsp_index * RESPONSE_ENTRY_SIZE);
1640 }
1641
1642 /* Sync DMA buffer. */
1643 (void) ddi_dma_sync(rsp_q->rsp_ring.dma_handle,
1644 (off_t)(rsp_q->rsp_ring_index * RESPONSE_ENTRY_SIZE),
1645 dma_sync_size_1, DDI_DMA_SYNC_FORCPU);
1646 if (dma_sync_size_2) {
1647 (void) ddi_dma_sync(rsp_q->rsp_ring.dma_handle, 0,
1648 dma_sync_size_2, DDI_DMA_SYNC_FORCPU);
1649 }
1650
1651 if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
1652 != DDI_FM_OK) {
1653 qlc_fm_report_err_impact(ha,
1654 QL_FM_EREPORT_ACC_HANDLE_CHECK);
1655 }
1656
1657 while (rsp_q->rsp_ring_index != rsp_q->isp_rsp_index) {
1658 pkt = rsp_q->rsp_ring_ptr;
1659
1660 QL_PRINT_5(ha, "ha->rsp_rg_idx=%xh, mbx[5]=%xh\n",
1661 rsp_q->rsp_ring_index, rsp_q->isp_rsp_index);
1662 QL_DUMP_5((uint8_t *)rsp_q->rsp_ring_ptr, 8,
1663 RESPONSE_ENTRY_SIZE);
1664
1665 /* Adjust ring index. */
1666 rsp_q->rsp_ring_index++;
1667 if (rsp_q->rsp_ring_index == rsp_q->rsp_entry_cnt) {
1668 rsp_q->rsp_ring_index = 0;
1669 rsp_q->rsp_ring_ptr = rsp_q->rsp_ring.bp;
1670 } else {
1671 rsp_q->rsp_ring_ptr++;
1672 }
1673
1674 /* Process packet. */
1675 if (rsp_q->status_srb != NULL &&
1676 pkt->entry_type != STATUS_CONT_TYPE) {
1677 ql_add_link_b(done_q, &rsp_q->status_srb->cmd);
1678 rsp_q->status_srb = NULL;
1679 }
1680
1681 pkt->entry_status = (uint8_t)
1682 (CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
1683 pkt->entry_status & 0x3c : pkt->entry_status & 0x7e);
1684
1685 if (pkt->entry_status != 0 ||
1686 pkt->entry_type == ABORTED_ENTRY_TYPE) {
1687 ql_error_entry(ha, rsp_q,
1688 pkt, done_q,
1689 set_flags, reset_flags);
1690 } else {
1691 switch (pkt->entry_type) {
1692 case STATUS_TYPE:
1693 status |= CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
1694 ql_24xx_status_entry(ha, rsp_q,
1695 (sts_24xx_entry_t *)pkt, done_q,
1696 set_flags, reset_flags) :
1697 ql_status_entry(ha, rsp_q,
1698 (sts_entry_t *)pkt,
1699 done_q, set_flags, reset_flags);
1700 break;
1701 case STATUS_CONT_TYPE:
1702 ql_status_cont_entry(ha, rsp_q,
1703 (sts_cont_entry_t *)pkt, done_q,
1704 set_flags, reset_flags);
1705 break;
1706 case IP_TYPE:
1707 case IP_A64_TYPE:
1708 case IP_CMD_TYPE:
1709 ql_ip_entry(ha, rsp_q,
1710 (ip_entry_t *)pkt, done_q,
1711 set_flags, reset_flags);
1712 break;
1713 case IP_RECEIVE_TYPE:
1714 ql_ip_rcv_entry(ha, rsp_q,
1715 (ip_rcv_entry_t *)pkt, done_q,
1716 set_flags, reset_flags);
1717 break;
1718 case IP_RECEIVE_CONT_TYPE:
1719 ql_ip_rcv_cont_entry(ha, rsp_q,
1720 (ip_rcv_cont_entry_t *)pkt, done_q,
1721 set_flags, reset_flags);
1722 break;
1723 case IP_24XX_RECEIVE_TYPE:
1724 ql_ip_24xx_rcv_entry(ha, rsp_q,
1725 (ip_rcv_24xx_entry_t *)pkt, done_q,
1726 set_flags, reset_flags);
1727 break;
1728 case MS_TYPE:
1729 ql_ms_entry(ha, rsp_q,
1730 (ms_entry_t *)pkt, done_q,
1731 set_flags, reset_flags);
1732 break;
1733 case REPORT_ID_TYPE:
1734 ql_report_id_entry(ha, rsp_q,
1735 (report_id_acq_t *)pkt, done_q,
1736 set_flags, reset_flags);
1737 break;
1738 case ELS_PASSTHRU_TYPE:
1739 ql_els_passthru_entry(ha, rsp_q,
1740 (els_passthru_entry_rsp_t *)pkt, done_q,
1741 set_flags, reset_flags);
1742 break;
1743 case IP_BUF_POOL_TYPE:
1744 case MARKER_TYPE:
1745 case VP_MODIFY_TYPE:
1746 case VP_CONTROL_TYPE:
1747 break;
1748 default:
1749 EL(ha, "Unknown IOCB entry type=%xh\n",
1750 pkt->entry_type);
1751 break;
1752 }
1753 }
1754 }
1755
1756 /* Inform RISC of processed responses. */
1757
1758 if (ha->flags & MULTI_QUEUE) {
1759 WR32_MBAR_REG(ha, rsp_q->mbar_rsp_out, rsp_q->rsp_ring_index);
1760 } else {
1761 WRT16_IO_REG(ha, resp_out, rsp_q->rsp_ring_index);
1762 }
1763
1764 if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
1765 != DDI_FM_OK) {
1766 qlc_fm_report_err_impact(ha,
1767 QL_FM_EREPORT_ACC_HANDLE_CHECK);
1768 }
1769
1770 /* RESET packet received delay for possible async event. */
1771 if (status & BIT_0) {
1772 drv_usecwait(500000);
1773 }
1774
1775 QL_PRINT_3(ha, "done\n");
1776 }
1777
1778 /*
1779 * ql_error_entry
1780 * Processes error entry.
1781 *
1782 * Input:
1783 * ha: adapter state pointer.
1784 * rsp_q: response queue structure pointer.
1785 * pkt: entry pointer.
1786 * done_q: head pointer to done queue.
1787 * set_flags: task daemon flags to set.
1788 * reset_flags: task daemon flags to reset.
1789 *
1790 * Context:
1791 * Interrupt or Kernel context, no mailbox commands allowed.
1792 */
1793 /* ARGSUSED */
1794 static void
1795 ql_error_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q, response_t *pkt,
1796 ql_head_t *done_q, uint64_t *set_flags, uint64_t *reset_flags)
1797 {
1798 ql_srb_t *sp = NULL;
1799 uint32_t index, resp_identifier;
1800
1801 if (pkt->entry_type == ABORTED_ENTRY_TYPE) {
1802 resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle,
1803 &pkt->handle);
1804 index = resp_identifier & OSC_INDEX_MASK;
1805 if (index < ha->osc_max_cnt) {
1806 if (ha->outstanding_cmds[index] ==
1807 QL_ABORTED_SRB(ha)) {
1808 EL(ha, "Aborted command sp=QL_ABORTED_SRB, "
1809 "handle=%xh\n", resp_identifier);
1810 ha->outstanding_cmds[index] = NULL;
1811 } else {
1812 EL(ha, "Aborted command sp=%ph, handle=%xh\n",
1813 (void *) ha->outstanding_cmds[index],
1814 resp_identifier);
1815 }
1816 } else {
1817 EL(ha, "Aborted command handle=%xh, out of range "
1818 "index=%xh\n", resp_identifier, index);
1819 }
1820 return;
1821 }
1822
1823 QL_PRINT_2(ha, "started, packet:\n");
1824 QL_DUMP_2((uint8_t *)pkt, 8, RESPONSE_ENTRY_SIZE);
1825
1826 if (pkt->entry_status & BIT_6) {
1827 EL(ha, "Request Queue DMA error\n");
1828 } else if (pkt->entry_status & BIT_5) {
1829 EL(ha, "Invalid Entry Order\n");
1830 } else if (pkt->entry_status & BIT_4) {
1831 EL(ha, "Invalid Entry Count\n");
1832 } else if (pkt->entry_status & BIT_3) {
1833 EL(ha, "Invalid Entry Parameter\n");
1834 } else if (pkt->entry_status & BIT_2) {
1835 EL(ha, "Invalid Entry Type\n");
1836 } else if (pkt->entry_status & BIT_1) {
1837 EL(ha, "Busy\n");
1838 } else {
1839 EL(ha, "UNKNOWN flag = %xh error\n", pkt->entry_status);
1840 }
1841
1842 /* Validate the response entry handle. */
1843 resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle, &pkt->handle);
1844 index = resp_identifier & OSC_INDEX_MASK;
1845 if (index < ha->osc_max_cnt) {
1846 /* the index seems reasonable */
1847 if ((sp = ha->outstanding_cmds[index]) == NULL) {
1848 sp = ql_verify_preprocessed_cmd(ha, rsp_q,
1849 (uint32_t *)&pkt->handle,
1850 (uint32_t *)&resp_identifier, set_flags,
1851 reset_flags);
1852 }
1853 if (sp != NULL) {
1854 if (sp == QL_ABORTED_SRB(ha)) {
1855 EL(ha, "QL_ABORTED_SRB handle=%xh\n",
1856 resp_identifier);
1857 sp = NULL;
1858 ha->outstanding_cmds[index] = NULL;
1859 } else if (sp->handle == resp_identifier) {
1860 /* Neo, you're the one... */
1861 ha->outstanding_cmds[index] = NULL;
1862 sp->handle = 0;
1863 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1864 } else {
1865 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1866 resp_identifier, sp->handle);
1867 sp = NULL;
1868 ql_signal_abort(ha, set_flags);
1869 }
1870 }
1871 } else {
1872 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1873 index, resp_identifier);
1874 ql_signal_abort(ha, set_flags);
1875 }
1876
1877 if (sp != NULL) {
1878 /* Bad payload or header */
1879 if (pkt->entry_status & (BIT_5 + BIT_4 + BIT_3 + BIT_2)) {
1880 /* Bad payload or header, set error status. */
1881 sp->pkt->pkt_reason = CS_BAD_PAYLOAD;
1882 } else if (pkt->entry_status & BIT_1) /* FULL flag */ {
1883 sp->pkt->pkt_reason = CS_QUEUE_FULL;
1884 } else {
1885 /* Set error status. */
1886 sp->pkt->pkt_reason = CS_UNKNOWN;
1887 }
1888
1889 /* Set completed status. */
1890 sp->flags |= SRB_ISP_COMPLETED;
1891
1892 /* Place command on done queue. */
1893 ql_add_link_b(done_q, &sp->cmd);
1894
1895 }
1896 QL_PRINT_3(ha, "done\n");
1897 }
1898
1899 /*
1900 * ql_status_entry
1901 * Processes received ISP2200-2300 status entry.
1902 *
1903 * Input:
1904 * ha: adapter state pointer.
1905 * rsp_q: response queue structure pointer.
1906 * pkt: entry pointer.
1907 * done_q: done queue pointer.
1908 * set_flags: task daemon flags to set.
1909 * reset_flags: task daemon flags to reset.
1910 *
1911 * Returns:
1912 * BIT_0 = CS_RESET status received.
1913 *
1914 * Context:
1915 * Interrupt or Kernel context, no mailbox commands allowed.
1916 */
1917 /* ARGSUSED */
1918 static int
1919 ql_status_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
1920 sts_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
1921 uint64_t *reset_flags)
1922 {
1923 ql_srb_t *sp = NULL;
1924 uint32_t index, resp_identifier;
1925 uint16_t comp_status;
1926 int rval = 0;
1927
1928 QL_PRINT_3(ha, "started\n");
1929
1930 /* Validate the response entry handle. */
1931 resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle, &pkt->handle);
1932 index = resp_identifier & OSC_INDEX_MASK;
1933 if (index < ha->osc_max_cnt) {
1934 /* the index seems reasonable */
1935 if ((sp = ha->outstanding_cmds[index]) == NULL) {
1936 sp = ql_verify_preprocessed_cmd(ha, rsp_q,
1937 (uint32_t *)&pkt->handle,
1938 (uint32_t *)&resp_identifier, set_flags,
1939 reset_flags);
1940 }
1941 if (sp != NULL) {
1942 if (sp == QL_ABORTED_SRB(ha)) {
1943 EL(ha, "QL_ABORTED_SRB handle=%xh\n",
1944 resp_identifier);
1945 sp = NULL;
1946 ha->outstanding_cmds[index] = NULL;
1947 } else if (sp->handle == resp_identifier) {
1948 /* Neo, you're the one... */
1949 ha->outstanding_cmds[index] = NULL;
1950 sp->handle = 0;
1951 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1952 } else {
1953 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1954 resp_identifier, sp->handle);
1955 sp = NULL;
1956 ql_signal_abort(ha, set_flags);
1957 }
1958 }
1959 } else {
1960 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1961 index, resp_identifier);
1962 ql_signal_abort(ha, set_flags);
1963 }
1964
1965 if (sp != NULL) {
1966 comp_status = (uint16_t)ddi_get16(rsp_q->rsp_ring.acc_handle,
1967 &pkt->comp_status);
1968
1969 /*
1970 * We dont care about SCSI QFULLs.
1971 */
1972 if (comp_status == CS_QUEUE_FULL) {
1973 EL(ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
1974 sp->lun_queue->target_queue->d_id.b24,
1975 sp->lun_queue->lun_no);
1976 comp_status = CS_COMPLETE;
1977 }
1978
1979 /*
1980 * 2300 firmware marks completion status as data underrun
1981 * for scsi qfulls. Make it transport complete.
1982 */
1983 if (CFG_IST(ha, CFG_CTRL_2363) &&
1984 comp_status == CS_DATA_UNDERRUN &&
1985 pkt->scsi_status_l != STATUS_GOOD) {
1986 comp_status = CS_COMPLETE;
1987 }
1988
1989 /*
1990 * Workaround T3 issue where we do not get any data xferred
1991 * but get back a good status.
1992 */
1993 if ((pkt->state_flags_h & SF_XFERRED_DATA) == 0 &&
1994 comp_status == CS_COMPLETE &&
1995 pkt->scsi_status_l == STATUS_GOOD &&
1996 (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
1997 pkt->residual_length == 0 &&
1998 sp->fcp &&
1999 sp->fcp->fcp_data_len != 0 &&
2000 (pkt->state_flags_l & (SF_DATA_OUT | SF_DATA_IN)) ==
2001 SF_DATA_OUT) {
2002 comp_status = CS_ABORTED;
2003 }
2004
2005 if (sp->flags & SRB_MS_PKT) {
2006 /*
2007 * Ideally it should never be true. But there
2008 * is a bug in FW which upon receiving invalid
2009 * parameters in MS IOCB returns it as
2010 * status entry and not as ms entry type.
2011 */
2012 ql_ms_entry(ha, rsp_q, (ms_entry_t *)pkt, done_q,
2013 set_flags, reset_flags);
2014 QL_PRINT_3(ha, "ql_ms_entry done\n");
2015 return (0);
2016 }
2017
2018 /*
2019 * Fast path to good SCSI I/O completion
2020 */
2021 if (comp_status == CS_COMPLETE &&
2022 pkt->scsi_status_l == STATUS_GOOD &&
2023 (pkt->scsi_status_h & FCP_RSP_MASK) == 0) {
2024 /* Set completed status. */
2025 sp->flags |= SRB_ISP_COMPLETED;
2026 sp->pkt->pkt_reason = comp_status;
2027 ql_fast_fcp_post(sp, rsp_q);
2028 QL_PRINT_3(ha, "ql_fast_fcp_post done\n");
2029 return (0);
2030 }
2031 rval = ql_status_error(ha, rsp_q, sp, pkt, done_q, set_flags,
2032 reset_flags);
2033 }
2034 QL_PRINT_3(ha, "done\n");
2035
2036 return (rval);
2037 }
2038
2039 /*
2040 * ql_24xx_status_entry
2041 * Processes received ISP24xx status entry.
2042 *
2043 * Input:
2044 * ha: adapter state pointer.
2045 * rsp_q: response queue structure pointer.
2046 * pkt: entry pointer.
2047 * done_q: done queue pointer.
2048 * set_flags: task daemon flags to set.
2049 * reset_flags: task daemon flags to reset.
2050 *
2051 * Returns:
2052 * BIT_0 = CS_RESET status received.
2053 *
2054 * Context:
2055 * Interrupt or Kernel context, no mailbox commands allowed.
2056 */
2057 /* ARGSUSED */
2058 static int
2059 ql_24xx_status_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
2060 sts_24xx_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
2061 uint64_t *reset_flags)
2062 {
2063 ql_srb_t *sp = NULL;
2064 uint16_t comp_status;
2065 uint32_t index, resp_identifier;
2066 int rval = 0;
2067
2068 QL_PRINT_3(ha, "started\n");
2069
2070 /* Validate the response entry handle. */
2071 resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle, &pkt->handle);
2072 index = resp_identifier & OSC_INDEX_MASK;
2073 if (index < ha->osc_max_cnt) {
2074 /* the index seems reasonable */
2075 if ((sp = ha->outstanding_cmds[index]) == NULL) {
2076 sp = ql_verify_preprocessed_cmd(ha, rsp_q,
2077 (uint32_t *)&pkt->handle,
2078 (uint32_t *)&resp_identifier, set_flags,
2079 reset_flags);
2080 }
2081 if (sp != NULL) {
2082 if (sp == QL_ABORTED_SRB(ha)) {
2083 EL(ha, "QL_ABORTED_SRB handle=%xh\n",
2084 resp_identifier);
2085 sp = NULL;
2086 ha->outstanding_cmds[index] = NULL;
2087 } else if (sp->handle == resp_identifier) {
2088 /* Neo, you're the one... */
2089 ha->outstanding_cmds[index] = NULL;
2090 sp->handle = 0;
2091 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2092 } else {
2093 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
2094 resp_identifier, sp->handle);
2095 sp = NULL;
2096 ql_signal_abort(ha, set_flags);
2097 }
2098 }
2099 } else {
2100 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
2101 index, resp_identifier);
2102 ql_signal_abort(ha, set_flags);
2103 }
2104
2105 if (sp != NULL) {
2106 comp_status = (uint16_t)ddi_get16(rsp_q->rsp_ring.acc_handle,
2107 &pkt->comp_status);
2108
2109 /* We dont care about SCSI QFULLs. */
2110 if (comp_status == CS_QUEUE_FULL) {
2111 EL(sp->ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
2112 sp->lun_queue->target_queue->d_id.b24,
2113 sp->lun_queue->lun_no);
2114 comp_status = CS_COMPLETE;
2115 }
2116
2117 /*
2118 * 2300 firmware marks completion status as data underrun
2119 * for scsi qfulls. Make it transport complete.
2120 */
2121 if (comp_status == CS_DATA_UNDERRUN &&
2122 pkt->scsi_status_l != STATUS_GOOD) {
2123 comp_status = CS_COMPLETE;
2124 }
2125
2126 /*
2127 * Workaround T3 issue where we do not get any data xferred
2128 * but get back a good status.
2129 */
2130 if (comp_status == CS_COMPLETE &&
2131 pkt->scsi_status_l == STATUS_GOOD &&
2132 (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
2133 pkt->residual_length != 0 &&
2134 sp->fcp &&
2135 sp->fcp->fcp_data_len != 0 &&
2136 sp->fcp->fcp_cntl.cntl_write_data) {
2137 comp_status = CS_ABORTED;
2138 }
2139
2140 /*
2141 * Fast path to good SCSI I/O completion
2142 */
2143 if (comp_status == CS_COMPLETE &&
2144 pkt->scsi_status_l == STATUS_GOOD &&
2145 (pkt->scsi_status_h & FCP_RSP_MASK) == 0) {
2146 /* Set completed status. */
2147 sp->flags |= SRB_ISP_COMPLETED;
2148 sp->pkt->pkt_reason = comp_status;
2149 ql_fast_fcp_post(sp, rsp_q);
2150 QL_PRINT_3(ha, "ql_fast_fcp_post done\n");
2151 return (0);
2152 }
2153 rval = ql_status_error(ha, rsp_q, sp, (sts_entry_t *)pkt,
2154 done_q, set_flags, reset_flags);
2155 }
2156 QL_PRINT_3(ha, "done\n");
2157
2158 return (rval);
2159 }
2160
2161 /*
2162 * ql_verify_preprocessed_cmd
2163 * Handles preprocessed cmds..
2164 *
2165 * Input:
2166 * ha: adapter state pointer.
2167 * rsp_q: response queue structure pointer.
2168 * pkt_handle: handle pointer.
2169 * resp_identifier: resp_identifier pointer.
2170 * set_flags: task daemon flags to set.
2171 * reset_flags: task daemon flags to reset.
2172 *
2173 * Returns:
2174 * srb pointer or NULL
2175 *
2176 * Context:
2177 * Interrupt or Kernel context, no mailbox commands allowed.
2178 */
2179 /* ARGSUSED */
2180 ql_srb_t *
2181 ql_verify_preprocessed_cmd(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
2182 uint32_t *pkt_handle, uint32_t *resp_identifier, uint64_t *set_flags,
2183 uint64_t *reset_flags)
2184 {
2185 ql_srb_t *sp = NULL;
2186 uint32_t index;
2187 uint32_t get_handle = 10;
2188
2189 while (get_handle) {
2190 /* Get handle. */
2191 *resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle,
2192 pkt_handle);
2193 index = *resp_identifier & OSC_INDEX_MASK;
2194 /* Validate handle. */
2195 if (index < ha->osc_max_cnt) {
2196 sp = ha->outstanding_cmds[index];
2197 }
2198
2199 if (sp != NULL) {
2200 EL(ha, "sp=%xh, resp_id=%xh, get=%d, index=%xh\n", sp,
2201 *resp_identifier, get_handle, index);
2202 break;
2203 } else {
2204 get_handle -= 1;
2205 drv_usecwait(10000);
2206 if (get_handle == 1 && rsp_q->rsp_ring.dma_handle) {
2207 /* Last chance, Sync whole DMA buffer. */
2208 (void) ddi_dma_sync(rsp_q->rsp_ring.dma_handle,
2209 0, 0, DDI_DMA_SYNC_FORCPU);
2210 EL(ha, "last chance DMA sync, index=%xh\n",
2211 index);
2212 }
2213 }
2214 }
2215 QL_PRINT_3(ha, "done\n");
2216
2217 return (sp);
2218 }
2219
2220
2221 /*
2222 * ql_status_error
2223 * Processes received ISP status entry error.
2224 *
2225 * Input:
2226 * ha: adapter state pointer.
2227 * rsp_q: response queue structure pointer.
2228 * sp: SRB pointer.
2229 * pkt: entry pointer.
2230 * done_q: done queue pointer.
2231 * set_flags: task daemon flags to set.
2232 * reset_flags: task daemon flags to reset.
2233 *
2234 * Returns:
2235 * BIT_0 = CS_RESET status received.
2236 *
2237 * Context:
2238 * Interrupt or Kernel context, no mailbox commands allowed.
2239 */
2240 /* ARGSUSED */
2241 static int
2242 ql_status_error(ql_adapter_state_t *ha, ql_response_q_t *rsp_q, ql_srb_t *sp,
2243 sts_entry_t *pkt23, ql_head_t *done_q, uint64_t *set_flags,
2244 uint64_t *reset_flags)
2245 {
2246 uint32_t sense_sz = 0;
2247 uint32_t cnt;
2248 ql_tgt_t *tq;
2249 fcp_rsp_t *fcpr;
2250 struct fcp_rsp_info *rsp;
2251 int rval = 0;
2252
2253 struct {
2254 uint8_t *rsp_info;
2255 uint8_t *req_sense_data;
2256 uint32_t residual_length;
2257 uint32_t fcp_residual_length;
2258 uint32_t rsp_info_length;
2259 uint32_t req_sense_length;
2260 uint16_t comp_status;
2261 uint8_t state_flags_l;
2262 uint8_t state_flags_h;
2263 uint8_t scsi_status_l;
2264 uint8_t scsi_status_h;
2265 } sts;
2266
2267 QL_PRINT_3(ha, "started\n");
2268
2269 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2270 sts_24xx_entry_t *pkt24 = (sts_24xx_entry_t *)pkt23;
2271
2272 /* Setup status. */
2273 sts.comp_status = (uint16_t)ddi_get16(
2274 rsp_q->rsp_ring.acc_handle, &pkt24->comp_status);
2275 sts.scsi_status_l = pkt24->scsi_status_l;
2276 sts.scsi_status_h = pkt24->scsi_status_h;
2277
2278 /* Setup firmware residuals. */
2279 sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2280 ddi_get32(rsp_q->rsp_ring.acc_handle,
2281 (uint32_t *)&pkt24->residual_length) : 0;
2282
2283 /* Setup FCP residuals. */
2284 sts.fcp_residual_length = sts.scsi_status_h &
2285 (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2286 ddi_get32(rsp_q->rsp_ring.acc_handle,
2287 (uint32_t *)&pkt24->fcp_rsp_residual_count) : 0;
2288
2289 if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2290 (sts.scsi_status_h & FCP_RESID_UNDER) &&
2291 (sts.residual_length != pkt24->fcp_rsp_residual_count)) {
2292
2293 EL(sp->ha, "mismatch resid's: fw=%xh, pkt=%xh\n",
2294 sts.residual_length,
2295 pkt24->fcp_rsp_residual_count);
2296 sts.scsi_status_h = (uint8_t)
2297 (sts.scsi_status_h & ~FCP_RESID_UNDER);
2298 }
2299
2300 /* Setup state flags. */
2301 sts.state_flags_l = pkt24->state_flags_l;
2302 sts.state_flags_h = pkt24->state_flags_h;
2303
2304 if (sp->fcp->fcp_data_len &&
2305 (sts.comp_status != CS_DATA_UNDERRUN ||
2306 sts.residual_length != sp->fcp->fcp_data_len)) {
2318 sts.state_flags_l = (uint8_t)
2319 (sts.state_flags_l | SF_DATA_OUT);
2320 } else if (sp->fcp->fcp_cntl.cntl_read_data) {
2321 sts.state_flags_l = (uint8_t)
2322 (sts.state_flags_l | SF_DATA_IN);
2323 }
2324 if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) {
2325 sts.state_flags_l = (uint8_t)
2326 (sts.state_flags_l | SF_HEAD_OF_Q);
2327 } else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) {
2328 sts.state_flags_l = (uint8_t)
2329 (sts.state_flags_l | SF_ORDERED_Q);
2330 } else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) {
2331 sts.state_flags_l = (uint8_t)
2332 (sts.state_flags_l | SF_SIMPLE_Q);
2333 }
2334
2335 /* Setup FCP response info. */
2336 sts.rsp_info = &pkt24->rsp_sense_data[0];
2337 if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2338 sts.rsp_info_length = ddi_get32(
2339 rsp_q->rsp_ring.acc_handle,
2340 (uint32_t *)&pkt24->fcp_rsp_data_length);
2341 if (sts.rsp_info_length >
2342 sizeof (struct fcp_rsp_info)) {
2343 sts.rsp_info_length =
2344 sizeof (struct fcp_rsp_info);
2345 }
2346 for (cnt = 0; cnt < sts.rsp_info_length; cnt += 4) {
2347 ql_chg_endian(sts.rsp_info + cnt, 4);
2348 }
2349 } else {
2350 sts.rsp_info_length = 0;
2351 }
2352
2353 /* Setup sense data. */
2354 sts.req_sense_data =
2355 &pkt24->rsp_sense_data[sts.rsp_info_length];
2356 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2357 sts.req_sense_length =
2358 ddi_get32(rsp_q->rsp_ring.acc_handle,
2359 (uint32_t *)&pkt24->fcp_sense_length);
2360 sts.state_flags_h = (uint8_t)
2361 (sts.state_flags_h | SF_ARQ_DONE);
2362 sense_sz = (uint32_t)
2363 (((uintptr_t)pkt24 + sizeof (sts_24xx_entry_t)) -
2364 (uintptr_t)sts.req_sense_data);
2365 for (cnt = 0; cnt < sense_sz; cnt += 4) {
2366 ql_chg_endian(sts.req_sense_data + cnt, 4);
2367 }
2368 } else {
2369 sts.req_sense_length = 0;
2370 }
2371 } else {
2372 /* Setup status. */
2373 sts.comp_status = (uint16_t)ddi_get16(
2374 rsp_q->rsp_ring.acc_handle, &pkt23->comp_status);
2375 sts.scsi_status_l = pkt23->scsi_status_l;
2376 sts.scsi_status_h = pkt23->scsi_status_h;
2377
2378 /* Setup firmware residuals. */
2379 sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2380 ddi_get32(rsp_q->rsp_ring.acc_handle,
2381 (uint32_t *)&pkt23->residual_length) : 0;
2382
2383 /* Setup FCP residuals. */
2384 sts.fcp_residual_length = sts.scsi_status_h &
2385 (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2386 sts.residual_length : 0;
2387
2388 /* Setup state flags. */
2389 sts.state_flags_l = pkt23->state_flags_l;
2390 sts.state_flags_h = pkt23->state_flags_h;
2391
2392 /* Setup FCP response info. */
2393 sts.rsp_info = &pkt23->rsp_info[0];
2394 if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2395 sts.rsp_info_length = ddi_get16(
2396 rsp_q->rsp_ring.acc_handle,
2397 (uint16_t *)&pkt23->rsp_info_length);
2398 if (sts.rsp_info_length >
2399 sizeof (struct fcp_rsp_info)) {
2400 sts.rsp_info_length =
2401 sizeof (struct fcp_rsp_info);
2402 }
2403 } else {
2404 sts.rsp_info_length = 0;
2405 }
2406
2407 /* Setup sense data. */
2408 sts.req_sense_data = &pkt23->req_sense_data[0];
2409 sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2410 ddi_get16(rsp_q->rsp_ring.acc_handle,
2411 (uint16_t *)&pkt23->req_sense_length) : 0;
2412 }
2413
2414 bzero(sp->pkt->pkt_resp, sp->pkt->pkt_rsplen);
2415
2416 fcpr = (fcp_rsp_t *)sp->pkt->pkt_resp;
2417 rsp = (struct fcp_rsp_info *)(sp->pkt->pkt_resp +
2418 sizeof (fcp_rsp_t));
2419
2420 tq = sp->lun_queue->target_queue;
2421
2422 fcpr->fcp_u.fcp_status.scsi_status = sts.scsi_status_l;
2423 if (sts.scsi_status_h & FCP_RSP_LEN_VALID) {
2424 fcpr->fcp_u.fcp_status.rsp_len_set = 1;
2425 }
2426 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2427 fcpr->fcp_u.fcp_status.sense_len_set = 1;
2428 }
2429 if (sts.scsi_status_h & FCP_RESID_OVER) {
2430 fcpr->fcp_u.fcp_status.resid_over = 1;
2431 }
2432 if (sts.scsi_status_h & FCP_RESID_UNDER) {
2433 fcpr->fcp_u.fcp_status.resid_under = 1;
2434 }
2435 fcpr->fcp_u.fcp_status.reserved_1 = 0;
2436
2437 /* Set ISP completion status */
2438 sp->pkt->pkt_reason = sts.comp_status;
2439
2440 /* Update statistics. */
2441 if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) &&
2442 (sp->pkt->pkt_rsplen > sizeof (fcp_rsp_t))) {
2443
2444 sense_sz = sp->pkt->pkt_rsplen - (uint32_t)sizeof (fcp_rsp_t);
2445 if (sense_sz > sts.rsp_info_length) {
2446 sense_sz = sts.rsp_info_length;
2447 }
2448
2449 /* copy response information data. */
2450 if (sense_sz) {
2451 ddi_rep_get8(rsp_q->rsp_ring.acc_handle,
2452 (uint8_t *)rsp, sts.rsp_info, sense_sz,
2453 DDI_DEV_AUTOINCR);
2454 }
2455 fcpr->fcp_response_len = sense_sz;
2456
2457 rsp = (struct fcp_rsp_info *)((caddr_t)rsp +
2458 fcpr->fcp_response_len);
2459
2460 switch (*(sts.rsp_info + 3)) {
2461 case FCP_NO_FAILURE:
2462 break;
2463 case FCP_DL_LEN_MISMATCH:
2464 ha->adapter_stats->d_stats[lobyte(
2465 tq->loop_id)].dl_len_mismatches++;
2466 break;
2467 case FCP_CMND_INVALID:
2468 break;
2469 case FCP_DATA_RO_MISMATCH:
2470 ha->adapter_stats->d_stats[lobyte(
2471 tq->loop_id)].data_ro_mismatches++;
2472 break;
2473 case FCP_TASK_MGMT_NOT_SUPPTD:
2474 break;
2475 case FCP_TASK_MGMT_FAILED:
2476 ha->adapter_stats->d_stats[lobyte(
2477 tq->loop_id)].task_mgmt_failures++;
2478 break;
2479 default:
2480 break;
2481 }
2482 } else {
2483 /*
2484 * EL(sp->ha, "scsi_h=%xh, pkt_rsplen=%xh\n",
2485 * sts.scsi_status_h, sp->pkt->pkt_rsplen);
2486 */
2487 fcpr->fcp_response_len = 0;
2488 }
2489
2490 /* Set reset status received. */
2491 if (sts.comp_status == CS_RESET && LOOP_READY(ha)) {
2492 *set_flags |= MARKER_NEEDED;
2493 rval |= BIT_0;
2494 }
2495
2496 if (!(tq->flags & TQF_TAPE_DEVICE) &&
2497 (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) ||
2498 ha->loop_down_abort_time < LOOP_DOWN_TIMER_START) &&
2499 ha->task_daemon_flags & LOOP_DOWN) {
2500 EL(sp->ha, "Loop Not Ready Retry, d_id=%xh, lun=%xh\n",
2501 tq->d_id.b24, sp->lun_queue->lun_no);
2502
2503 /* Set retry status. */
2504 sp->flags |= SRB_RETRY;
2505 } else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2506 tq->port_down_retry_count != 0 &&
2507 (sts.comp_status == CS_INCOMPLETE ||
2508 sts.comp_status == CS_PORT_UNAVAILABLE ||
2509 sts.comp_status == CS_PORT_LOGGED_OUT ||
2510 sts.comp_status == CS_PORT_CONFIG_CHG ||
2511 sts.comp_status == CS_PORT_BUSY)) {
2512 EL(sp->ha, "Port Down Retry=%xh, d_id=%xh, lun=%xh, count=%d"
2561 tq->qfull_retry_count);
2562 if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2563 tq->flags |= TQF_QUEUE_SUSPENDED;
2564
2565 tq->qfull_retry_count--;
2566
2567 ADAPTER_STATE_LOCK(ha);
2568 if (ha->port_retry_timer == 0) {
2569 if ((ha->port_retry_timer =
2570 ha->qfull_retry_delay) ==
2571 0) {
2572 *set_flags |=
2573 PORT_RETRY_NEEDED;
2574 }
2575 }
2576 ADAPTER_STATE_UNLOCK(ha);
2577 }
2578 } else {
2579 EL(sp->ha, "Abort Retry, d_id=%xh, lun=%xh\n",
2580 tq->d_id.b24, sp->lun_queue->lun_no);
2581
2582 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2) && LOOP_READY(ha)) {
2583 *set_flags |= MARKER_NEEDED;
2584 rval |= BIT_0;
2585 }
2586 }
2587
2588 /* Set retry status. */
2589 sp->flags |= SRB_RETRY;
2590 } else {
2591 fcpr->fcp_resid =
2592 sts.fcp_residual_length > sp->fcp->fcp_data_len ?
2593 sp->fcp->fcp_data_len : sts.fcp_residual_length;
2594
2595 if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2596 (sts.scsi_status_h & FCP_RESID_UNDER) == 0) {
2597
2598 if (sts.scsi_status_l == STATUS_CHECK) {
2599 sp->pkt->pkt_reason = CS_COMPLETE;
2600 } else {
2601 EL(ha, "transport error - "
2602 "underrun & invalid resid\n");
2603 EL(ha, "ssh=%xh, ssl=%xh\n",
2604 sts.scsi_status_h, sts.scsi_status_l);
2605 sp->pkt->pkt_reason = CS_FCP_RESPONSE_ERROR;
2606 }
2607 }
2608
2609 /* Ignore firmware underrun error. */
2610 if (sts.comp_status == CS_DATA_UNDERRUN &&
2611 (sts.scsi_status_h & FCP_RESID_UNDER ||
2612 (sts.scsi_status_l != STATUS_CHECK &&
2613 sts.scsi_status_l != STATUS_GOOD))) {
2614 sp->pkt->pkt_reason = CS_COMPLETE;
2615 }
2616
2617 if (sp->pkt->pkt_reason != CS_COMPLETE) {
2618 ha->xioctl->DeviceErrorCount++;
2619 EL(sp->ha, "Cmplt status err = %xh, d_id=%xh, lun=%xh,"
2620 " pkt_reason=%xh, spf=%xh, sp=%ph\n",
2621 sts.comp_status, tq->d_id.b24,
2622 sp->lun_queue->lun_no, sp->pkt->pkt_reason,
2623 sp->flags, sp);
2624 }
2625
2626 /* Set target request sense data. */
2627 if (sts.scsi_status_l == STATUS_CHECK) {
2628 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2629
2630 if (sp->pkt->pkt_reason == CS_COMPLETE &&
2631 sts.req_sense_data[2] != KEY_NO_SENSE &&
2632 sts.req_sense_data[2] !=
2633 KEY_UNIT_ATTENTION) {
2634 ha->xioctl->DeviceErrorCount++;
2635 }
2636
2637 sense_sz = sts.req_sense_length;
2638
2639 /* Insure data does not exceed buf. */
2640 if (sp->pkt->pkt_rsplen <=
2641 (uint32_t)sizeof (fcp_rsp_t) +
2642 fcpr->fcp_response_len) {
2643 sp->request_sense_length = 0;
2652 sp->request_sense_length) {
2653 sp->request_sense_length =
2654 sense_sz;
2655 }
2656
2657 sp->request_sense_ptr = (caddr_t)rsp;
2658
2659 sense_sz = (uint32_t)
2660 (((uintptr_t)pkt23 +
2661 sizeof (sts_entry_t)) -
2662 (uintptr_t)sts.req_sense_data);
2663 if (sp->request_sense_length <
2664 sense_sz) {
2665 sense_sz =
2666 sp->request_sense_length;
2667 }
2668
2669 fcpr->fcp_sense_len = sense_sz;
2670
2671 /* Move sense data. */
2672 ddi_rep_get8(rsp_q->rsp_ring.acc_handle,
2673 (uint8_t *)sp->request_sense_ptr,
2674 sts.req_sense_data,
2675 (size_t)sense_sz,
2676 DDI_DEV_AUTOINCR);
2677
2678 sp->request_sense_ptr += sense_sz;
2679 sp->request_sense_length -= sense_sz;
2680 if (sp->request_sense_length != 0 &&
2681 !(CFG_IST(ha, CFG_CTRL_82XX))) {
2682 rsp_q->status_srb = sp;
2683 }
2684 }
2685
2686 if (sense_sz != 0) {
2687 EL(sp->ha, "check condition sense data, "
2688 "d_id=%xh, lun=%xh\n%2xh%3xh%3xh%3xh"
2689 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
2690 "%3xh%3xh%3xh%3xh%3xh\n", tq->d_id.b24,
2691 sp->lun_queue->lun_no,
2692 sts.req_sense_data[0],
2693 sts.req_sense_data[1],
2694 sts.req_sense_data[2],
2695 sts.req_sense_data[3],
2696 sts.req_sense_data[4],
2697 sts.req_sense_data[5],
2698 sts.req_sense_data[6],
2699 sts.req_sense_data[7],
2700 sts.req_sense_data[8],
2701 sts.req_sense_data[9],
2702 sts.req_sense_data[10],
2703 sts.req_sense_data[11],
2704 sts.req_sense_data[12],
2705 sts.req_sense_data[13],
2706 sts.req_sense_data[14],
2707 sts.req_sense_data[15],
2708 sts.req_sense_data[16],
2709 sts.req_sense_data[17]);
2710 } else {
2711 EL(sp->ha, "check condition, d_id=%xh, lun=%xh"
2712 "\n", tq->d_id.b24, sp->lun_queue->lun_no);
2713 }
2714 }
2715 }
2716
2717 /* Set completed status. */
2718 sp->flags |= SRB_ISP_COMPLETED;
2719
2720 /* Place command on done queue. */
2721 if (rsp_q->status_srb == NULL) {
2722 ql_add_link_b(done_q, &sp->cmd);
2723 }
2724
2725 QL_PRINT_3(ha, "done\n");
2726
2727 return (rval);
2728 }
2729
2730 /*
2731 * ql_status_cont_entry
2732 * Processes status continuation entry.
2733 *
2734 * Input:
2735 * ha: adapter state pointer.
2736 * rsp_q: response queue structure pointer.
2737 * pkt: entry pointer.
2738 * done_q: done queue pointer.
2739 * set_flags: task daemon flags to set.
2740 * reset_flags: task daemon flags to reset.
2741 *
2742 * Context:
2743 * Interrupt or Kernel context, no mailbox commands allowed.
2744 */
2745 /* ARGSUSED */
2746 static void
2747 ql_status_cont_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
2748 sts_cont_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
2749 uint64_t *reset_flags)
2750 {
2751 uint32_t sense_sz, index;
2752 ql_srb_t *sp = rsp_q->status_srb;
2753
2754 QL_PRINT_3(ha, "started\n");
2755
2756 if (sp != NULL && sp->request_sense_length) {
2757 if (sp->request_sense_length > sizeof (pkt->req_sense_data)) {
2758 sense_sz = sizeof (pkt->req_sense_data);
2759 } else {
2760 sense_sz = sp->request_sense_length;
2761 }
2762
2763 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2764 for (index = 0; index < sense_sz; index += 4) {
2765 ql_chg_endian((uint8_t *)
2766 &pkt->req_sense_data[0] + index, 4);
2767 }
2768 }
2769
2770 /* Move sense data. */
2771 ddi_rep_get8(rsp_q->rsp_ring.acc_handle,
2772 (uint8_t *)sp->request_sense_ptr,
2773 (uint8_t *)&pkt->req_sense_data[0], (size_t)sense_sz,
2774 DDI_DEV_AUTOINCR);
2775
2776 sp->request_sense_ptr += sense_sz;
2777 sp->request_sense_length -= sense_sz;
2778
2779 /* Place command on done queue. */
2780 if (sp->request_sense_length == 0) {
2781 ql_add_link_b(done_q, &sp->cmd);
2782 rsp_q->status_srb = NULL;
2783 }
2784 }
2785
2786 QL_PRINT_3(ha, "done\n");
2787 }
2788
2789 /*
2790 * ql_ip_entry
2791 * Processes received ISP IP entry.
2792 *
2793 * Input:
2794 * ha: adapter state pointer.
2795 * rsp_q: response queue structure pointer.
2796 * pkt: entry pointer.
2797 * done_q: done queue pointer.
2798 * set_flags: task daemon flags to set.
2799 * reset_flags: task daemon flags to reset.
2800 *
2801 * Context:
2802 * Interrupt or Kernel context, no mailbox commands allowed.
2803 */
2804 /* ARGSUSED */
2805 static void
2806 ql_ip_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q, ip_entry_t *pkt23,
2807 ql_head_t *done_q, uint64_t *set_flags, uint64_t *reset_flags)
2808 {
2809 ql_srb_t *sp = NULL;
2810 uint32_t index, resp_identifier;
2811 ql_tgt_t *tq;
2812
2813 QL_PRINT_3(ha, "started\n");
2814
2815 /* Validate the response entry handle. */
2816 resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle,
2817 &pkt23->handle);
2818 index = resp_identifier & OSC_INDEX_MASK;
2819 if (index < ha->osc_max_cnt) {
2820 /* the index seems reasonable */
2821 if ((sp = ha->outstanding_cmds[index]) == NULL) {
2822 sp = ql_verify_preprocessed_cmd(ha, rsp_q,
2823 (uint32_t *)&pkt23->handle,
2824 (uint32_t *)&resp_identifier, set_flags,
2825 reset_flags);
2826 }
2827 if (sp != NULL) {
2828 if (sp == QL_ABORTED_SRB(ha)) {
2829 EL(ha, "QL_ABORTED_SRB handle=%xh\n",
2830 resp_identifier);
2831 sp = NULL;
2832 ha->outstanding_cmds[index] = NULL;
2833 } else if (sp->handle == resp_identifier) {
2834 /* Neo, you're the one... */
2835 ha->outstanding_cmds[index] = NULL;
2836 sp->handle = 0;
2837 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2838 } else {
2839 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
2840 resp_identifier, sp->handle);
2841 sp = NULL;
2842 ql_signal_abort(ha, set_flags);
2843 }
2844 }
2845 } else {
2846 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
2847 index, resp_identifier);
2848 ql_signal_abort(ha, set_flags);
2849 }
2850
2851 if (sp != NULL) {
2852 tq = sp->lun_queue->target_queue;
2853
2854 /* Set ISP completion status */
2855 if (CFG_IST(ha, CFG_CTRL_24XX)) {
2856 ip_cmd_entry_t *pkt24 = (ip_cmd_entry_t *)pkt23;
2857
2858 sp->pkt->pkt_reason = ddi_get16(
2859 rsp_q->rsp_ring.acc_handle, &pkt24->hdl_status);
2860 } else {
2861 sp->pkt->pkt_reason = ddi_get16(
2862 rsp_q->rsp_ring.acc_handle, &pkt23->comp_status);
2863 }
2864
2865 if (ha->task_daemon_flags & LOOP_DOWN) {
2866 EL(ha, "Loop Not Ready Retry, d_id=%xh\n",
2867 tq->d_id.b24);
2868
2869 /* Set retry status. */
2870 sp->flags |= SRB_RETRY;
2871
2872 } else if (tq->port_down_retry_count &&
2873 (sp->pkt->pkt_reason == CS_INCOMPLETE ||
2874 sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE ||
2875 sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
2876 sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
2877 sp->pkt->pkt_reason == CS_PORT_BUSY)) {
2878 EL(ha, "Port Down Retry=%xh, d_id=%xh, count=%d\n",
2879 sp->pkt->pkt_reason, tq->d_id.b24,
2880 tq->port_down_retry_count);
2881
2882 /* Set retry status. */
2912 DEVICE_QUEUE_UNLOCK(tq);
2913
2914 } else if (sp->pkt->pkt_reason == CS_RESET) {
2915 EL(ha, "Reset Retry, d_id=%xh\n", tq->d_id.b24);
2916
2917 /* Set retry status. */
2918 sp->flags |= SRB_RETRY;
2919 } else {
2920 if (sp->pkt->pkt_reason != CS_COMPLETE) {
2921 EL(ha, "Cmplt status err=%xh, d_id=%xh\n",
2922 sp->pkt->pkt_reason, tq->d_id.b24);
2923 }
2924 }
2925
2926 /* Set completed status. */
2927 sp->flags |= SRB_ISP_COMPLETED;
2928
2929 ql_add_link_b(done_q, &sp->cmd);
2930
2931 }
2932 QL_PRINT_3(ha, "done\n");
2933 }
2934
2935 /*
2936 * ql_ip_rcv_entry
2937 * Processes received ISP IP buffers entry.
2938 *
2939 * Input:
2940 * ha: adapter state pointer.
2941 * rsp_q: response queue structure pointer.
2942 * pkt: entry pointer.
2943 * done_q: done queue pointer.
2944 * set_flags: task daemon flags to set.
2945 * reset_flags: task daemon flags to reset.
2946 *
2947 * Context:
2948 * Interrupt or Kernel context, no mailbox commands allowed.
2949 */
2950 /* ARGSUSED */
2951 static void
2952 ql_ip_rcv_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
2953 ip_rcv_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
2954 uint64_t *reset_flags)
2955 {
2956 port_id_t s_id;
2957 uint16_t index;
2958 uint8_t cnt;
2959 ql_tgt_t *tq;
2960
2961 QL_PRINT_3(ha, "started\n");
2962
2963 /* Locate device queue. */
2964 s_id.b.al_pa = pkt->s_id[0];
2965 s_id.b.area = pkt->s_id[1];
2966 s_id.b.domain = pkt->s_id[2];
2967 if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
2968 EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
2969 return;
2970 }
2971
2972 tq->ub_sequence_length = (uint16_t)ddi_get16(
2973 rsp_q->rsp_ring.acc_handle, &pkt->seq_length);
2974 tq->ub_total_seg_cnt = pkt->segment_count;
2975 tq->ub_seq_id = ++ha->ub_seq_id;
2976 tq->ub_seq_cnt = 0;
2977 tq->ub_frame_ro = 0;
2978 tq->ub_loop_id = pkt->loop_id;
2979 ha->rcv_dev_q = tq;
2980
2981 for (cnt = 0; cnt < IP_RCVBUF_HANDLES && tq->ub_seq_cnt <
2982 tq->ub_total_seg_cnt; cnt++) {
2983
2984 index = (uint16_t)ddi_get16(rsp_q->rsp_ring.acc_handle,
2985 &pkt->buffer_handle[cnt]);
2986
2987 if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2988 EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2989 *set_flags |= ISP_ABORT_NEEDED;
2990 break;
2991 }
2992 }
2993
2994 QL_PRINT_3(ha, "done\n");
2995 }
2996
2997 /*
2998 * ql_ip_rcv_cont_entry
2999 * Processes received ISP IP buffers continuation entry.
3000 *
3001 * Input:
3002 * ha: adapter state pointer.
3003 * rsp_q: response queue structure pointer.
3004 * pkt: entry pointer.
3005 * done_q: done queue pointer.
3006 * set_flags: task daemon flags to set.
3007 * reset_flags: task daemon flags to reset.
3008 *
3009 * Context:
3010 * Interrupt or Kernel context, no mailbox commands allowed.
3011 */
3012 /* ARGSUSED */
3013 static void
3014 ql_ip_rcv_cont_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
3015 ip_rcv_cont_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
3016 uint64_t *reset_flags)
3017 {
3018 uint16_t index;
3019 uint8_t cnt;
3020 ql_tgt_t *tq;
3021
3022 QL_PRINT_3(ha, "started\n");
3023
3024 if ((tq = ha->rcv_dev_q) == NULL) {
3025 EL(ha, "No IP receive device\n");
3026 return;
3027 }
3028
3029 for (cnt = 0; cnt < IP_RCVBUF_CONT_HANDLES &&
3030 tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
3031
3032 index = (uint16_t)ddi_get16(rsp_q->rsp_ring.acc_handle,
3033 &pkt->buffer_handle[cnt]);
3034
3035 if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
3036 EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
3037 *set_flags |= ISP_ABORT_NEEDED;
3038 break;
3039 }
3040 }
3041
3042 QL_PRINT_3(ha, "done\n");
3043 }
3044
3045 /*
3046 * ip_rcv_24xx_entry_t
3047 * Processes received ISP24xx IP buffers entry.
3048 *
3049 * Input:
3050 * ha: adapter state pointer.
3051 * rsp_q: response queue structure pointer.
3052 * pkt: entry pointer.
3053 * done_q: done queue pointer.
3054 * set_flags: task daemon flags to set.
3055 * reset_flags: task daemon flags to reset.
3056 *
3057 * Context:
3058 * Interrupt or Kernel context, no mailbox commands allowed.
3059 */
3060 /* ARGSUSED */
3061 static void
3062 ql_ip_24xx_rcv_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
3063 ip_rcv_24xx_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
3064 uint64_t *reset_flags)
3065 {
3066 port_id_t s_id;
3067 uint16_t index;
3068 uint8_t cnt;
3069 ql_tgt_t *tq;
3070
3071 QL_PRINT_3(ha, "started\n");
3072
3073 /* Locate device queue. */
3074 s_id.b.al_pa = pkt->s_id[0];
3075 s_id.b.area = pkt->s_id[1];
3076 s_id.b.domain = pkt->s_id[2];
3077 if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
3078 EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
3079 return;
3080 }
3081
3082 if (tq->ub_total_seg_cnt == 0) {
3083 tq->ub_sequence_length = (uint16_t)ddi_get16(
3084 rsp_q->rsp_ring.acc_handle, &pkt->seq_length);
3085 tq->ub_total_seg_cnt = pkt->segment_count;
3086 tq->ub_seq_id = ++ha->ub_seq_id;
3087 tq->ub_seq_cnt = 0;
3088 tq->ub_frame_ro = 0;
3089 tq->ub_loop_id = (uint16_t)ddi_get16(
3090 rsp_q->rsp_ring.acc_handle, &pkt->n_port_hdl);
3091 }
3092
3093 for (cnt = 0; cnt < IP_24XX_RCVBUF_HANDLES && tq->ub_seq_cnt <
3094 tq->ub_total_seg_cnt; cnt++) {
3095
3096 index = (uint16_t)ddi_get16(rsp_q->rsp_ring.acc_handle,
3097 &pkt->buffer_handle[cnt]);
3098
3099 if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
3100 EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
3101 *set_flags |= ISP_ABORT_NEEDED;
3102 break;
3103 }
3104 }
3105
3106 QL_PRINT_3(ha, "done\n");
3107 }
3108
3109 /*
3110 * ql_ms_entry
3111 * Processes received Name/Management/CT Pass-Through entry.
3112 *
3113 * Input:
3114 * ha: adapter state pointer.
3115 * rsp_q: response queue structure pointer.
3116 * pkt23: entry pointer.
3117 * done_q: done queue pointer.
3118 * set_flags: task daemon flags to set.
3119 * reset_flags: task daemon flags to reset.
3120 *
3121 * Context:
3122 * Interrupt or Kernel context, no mailbox commands allowed.
3123 */
3124 /* ARGSUSED */
3125 static void
3126 ql_ms_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q, ms_entry_t *pkt23,
3127 ql_head_t *done_q, uint64_t *set_flags, uint64_t *reset_flags)
3128 {
3129 ql_srb_t *sp = NULL;
3130 uint32_t index, cnt, resp_identifier;
3131 ql_tgt_t *tq;
3132 ct_passthru_entry_t *pkt24 = (ct_passthru_entry_t *)pkt23;
3133
3134 QL_PRINT_3(ha, "started\n");
3135
3136 /* Validate the response entry handle. */
3137 resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle,
3138 &pkt23->handle);
3139 index = resp_identifier & OSC_INDEX_MASK;
3140 if (index < ha->osc_max_cnt) {
3141 /* the index seems reasonable */
3142 if ((sp = ha->outstanding_cmds[index]) == NULL) {
3143 sp = ql_verify_preprocessed_cmd(ha, rsp_q,
3144 (uint32_t *)&pkt23->handle,
3145 (uint32_t *)&resp_identifier, set_flags,
3146 reset_flags);
3147 }
3148 if (sp != NULL) {
3149 if (sp == QL_ABORTED_SRB(ha)) {
3150 EL(ha, "QL_ABORTED_SRB handle=%xh\n",
3151 resp_identifier);
3152 sp = NULL;
3153 ha->outstanding_cmds[index] = NULL;
3154 } else if (sp->handle == resp_identifier) {
3155 /* Neo, you're the one... */
3156 ha->outstanding_cmds[index] = NULL;
3157 sp->handle = 0;
3158 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
3159 } else {
3160 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
3161 resp_identifier, sp->handle);
3162 sp = NULL;
3163 ql_signal_abort(ha, set_flags);
3164 }
3165 }
3166 } else {
3167 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
3168 index, resp_identifier);
3169 ql_signal_abort(ha, set_flags);
3170 }
3171
3172 if (sp != NULL) {
3173 if (!(sp->flags & SRB_MS_PKT)) {
3174 EL(ha, "Not SRB_MS_PKT flags=%xh, isp_abort_needed",
3175 sp->flags);
3176 *set_flags |= ISP_ABORT_NEEDED;
3177 return;
3178 }
3179
3180 tq = sp->lun_queue->target_queue;
3181
3182 /* Set ISP completion status */
3183 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
3184 sp->pkt->pkt_reason = ddi_get16(
3185 rsp_q->rsp_ring.acc_handle, &pkt24->status);
3186 } else {
3187 sp->pkt->pkt_reason = ddi_get16(
3188 rsp_q->rsp_ring.acc_handle, &pkt23->comp_status);
3189 }
3190
3191 if (sp->pkt->pkt_reason == CS_RESOUCE_UNAVAILABLE &&
3192 sp->retry_count) {
3193 EL(ha, "Resouce Unavailable Retry = %d\n",
3194 sp->retry_count);
3195
3196 /* Set retry status. */
3197 sp->retry_count--;
3198 sp->flags |= SRB_RETRY;
3199
3200 /* Acquire device queue lock. */
3201 DEVICE_QUEUE_LOCK(tq);
3202
3203 if (!(tq->flags & TQF_QUEUE_SUSPENDED)) {
3204 tq->flags |= TQF_QUEUE_SUSPENDED;
3205
3206 ADAPTER_STATE_LOCK(ha);
3207 if (ha->port_retry_timer == 0) {
3208 ha->port_retry_timer = 2;
3231
3232 ADAPTER_STATE_LOCK(ha);
3233 if (ha->port_retry_timer == 0) {
3234 if ((ha->port_retry_timer =
3235 ha->port_down_retry_delay) == 0) {
3236 *set_flags |=
3237 PORT_RETRY_NEEDED;
3238 }
3239 }
3240 ADAPTER_STATE_UNLOCK(ha);
3241 }
3242 /* Release device queue specific lock. */
3243 DEVICE_QUEUE_UNLOCK(tq);
3244
3245 } else if (sp->pkt->pkt_reason == CS_RESET) {
3246 EL(ha, "Reset Retry\n");
3247
3248 /* Set retry status. */
3249 sp->flags |= SRB_RETRY;
3250
3251 } else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2) &&
3252 sp->pkt->pkt_reason == CS_DATA_UNDERRUN) {
3253 cnt = ddi_get32(rsp_q->rsp_ring.acc_handle,
3254 &pkt24->resp_byte_count);
3255 if (cnt < sizeof (fc_ct_header_t)) {
3256 EL(ha, "Data underrun\n");
3257 } else {
3258 sp->pkt->pkt_reason = CS_COMPLETE;
3259 }
3260
3261 } else if (sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE ||
3262 sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT) {
3263 EL(ha, "Port unavailable %xh\n", sp->pkt->pkt_reason);
3264 DEVICE_QUEUE_LOCK(tq);
3265 tq->flags |= TQF_LOGIN_NEEDED;
3266 DEVICE_QUEUE_UNLOCK(tq);
3267 sp->pkt->pkt_reason = CS_TIMEOUT;
3268
3269 } else if (sp->pkt->pkt_reason != CS_COMPLETE) {
3270 EL(ha, "status err=%xh\n", sp->pkt->pkt_reason);
3271 }
3272
3273 if (sp->pkt->pkt_reason == CS_COMPLETE) {
3274 /*EMPTY*/
3275 QL_PRINT_3(ha, "ct_cmdrsp=%x%02xh resp\n",
3276 sp->pkt->pkt_cmd[8], sp->pkt->pkt_cmd[9]);
3277 QL_DUMP_3(sp->pkt->pkt_resp, 8, sp->pkt->pkt_rsplen);
3278 }
3279
3280 /* For nameserver restore command, management change header. */
3281 if ((sp->flags & SRB_RETRY) == 0) {
3282 tq->d_id.b24 == FS_NAME_SERVER ?
3283 ql_cthdr_endian(sp->pkt->pkt_cmd_acc,
3284 sp->pkt->pkt_cmd, B_TRUE) :
3285 ql_cthdr_endian(sp->pkt->pkt_resp_acc,
3286 sp->pkt->pkt_resp, B_TRUE);
3287 }
3288
3289 /* Set completed status. */
3290 sp->flags |= SRB_ISP_COMPLETED;
3291
3292 /* Place command on done queue. */
3293 ql_add_link_b(done_q, &sp->cmd);
3294
3295 }
3296 QL_PRINT_3(ha, "done\n");
3297 }
3298
3299 /*
3300 * ql_report_id_entry
3301 * Processes received Name/Management/CT Pass-Through entry.
3302 *
3303 * Input:
3304 * ha: adapter state pointer.
3305 * rsp_q: response queue structure pointer.
3306 * pkt: entry pointer.
3307 * done_q: done queue pointer.
3308 * set_flags: task daemon flags to set.
3309 * reset_flags: task daemon flags to reset.
3310 *
3311 * Context:
3312 * Interrupt or Kernel context, no mailbox commands allowed.
3313 */
3314 /* ARGSUSED */
3315 static void
3316 ql_report_id_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
3317 report_id_acq_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
3318 uint64_t *reset_flags)
3319 {
3320 ql_adapter_state_t *vha;
3321
3322 QL_PRINT_3(ha, "started\n");
3323
3324 EL(ha, "format=%d, index=%d, status=%d\n",
3325 pkt->format, pkt->vp_index, pkt->vp_status);
3326
3327 if (pkt->format == 1) {
3328 /* Locate port state structure. */
3329 for (vha = ha; vha != NULL; vha = vha->vp_next) {
3330 if (vha->vp_index == pkt->vp_index) {
3331 break;
3332 }
3333 }
3334 if (vha != NULL) {
3335 if (pkt->vp_status == CS_COMPLETE ||
3336 pkt->vp_status == CS_PORT_ID_CHANGE) {
3337 if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
3338 vha->fcoe_fcf_idx = pkt->fcf_index;
3339 }
3340 if (vha->vp_index != 0) {
3341 *set_flags |= LOOP_RESYNC_NEEDED;
3342 *reset_flags &= ~LOOP_RESYNC_NEEDED;
3343 vha->loop_down_timer =
3344 LOOP_DOWN_TIMER_OFF;
3345 TASK_DAEMON_LOCK(ha);
3346 vha->task_daemon_flags |=
3347 LOOP_RESYNC_NEEDED;
3348 vha->task_daemon_flags &= ~LOOP_DOWN;
3349 TASK_DAEMON_UNLOCK(ha);
3350 }
3351 ADAPTER_STATE_LOCK(ha);
3352 vha->flags &= ~VP_ID_NOT_ACQUIRED;
3353 ADAPTER_STATE_UNLOCK(ha);
3354 } else {
3355 /* FA-WWPN failure. */
3356 if (pkt->vp_status == CS_INCOMPLETE &&
3357 pkt->ls_rjt_reason_code == 0xff &&
3358 pkt->ls_rjt_explanation == 0x44) {
3359 *set_flags |= ISP_ABORT_NEEDED;
3360 }
3361 if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
3362 EL(ha, "sts sc=%d, rjt_rea=%xh, "
3363 "rjt_exp=%xh, rjt_sc=%xh\n",
3364 pkt->status_subcode,
3365 pkt->ls_rjt_reason_code,
3366 pkt->ls_rjt_explanation,
3367 pkt->ls_rjt_subcode);
3368 }
3369 ADAPTER_STATE_LOCK(ha);
3370 vha->flags |= VP_ID_NOT_ACQUIRED;
3371 ADAPTER_STATE_UNLOCK(ha);
3372 }
3373 }
3374 }
3375
3376 QL_PRINT_3(ha, "done\n");
3377 }
3378
3379 /*
3380 * ql_els_entry
3381 * Processes received ELS Pass-Through entry.
3382 *
3383 * Input:
3384 * ha: adapter state pointer.
3385 * rsp_q: response queue structure pointer.
3386 * pkt23: entry pointer.
3387 * done_q: done queue pointer.
3388 * set_flags: task daemon flags to set.
3389 * reset_flags: task daemon flags to reset.
3390 *
3391 * Context:
3392 * Interrupt or Kernel context, no mailbox commands allowed.
3393 */
3394 /* ARGSUSED */
3395 static void
3396 ql_els_passthru_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
3397 els_passthru_entry_rsp_t *rsp, ql_head_t *done_q, uint64_t *set_flags,
3398 uint64_t *reset_flags)
3399 {
3400 ql_tgt_t *tq;
3401 port_id_t s_id;
3402 ql_srb_t *srb = NULL;
3403 uint32_t index, resp_identifier;
3404
3405 QL_PRINT_3(ha, "started\n");
3406
3407 /* Validate the response entry handle. */
3408 resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle, &rsp->handle);
3409 index = resp_identifier & OSC_INDEX_MASK;
3410 if (index < ha->osc_max_cnt) {
3411 /* the index seems reasonable */
3412 if ((srb = ha->outstanding_cmds[index]) == NULL) {
3413 srb = ql_verify_preprocessed_cmd(ha, rsp_q,
3414 (uint32_t *)&rsp->handle,
3415 (uint32_t *)&resp_identifier, set_flags,
3416 reset_flags);
3417 }
3418 if (srb != NULL) {
3419 if (srb == QL_ABORTED_SRB(ha)) {
3420 EL(ha, "QL_ABORTED_SRB handle=%xh\n",
3421 resp_identifier);
3422 srb = NULL;
3423 ha->outstanding_cmds[index] = NULL;
3424 } else if (srb->handle == resp_identifier) {
3425 /* Neo, you're the one... */
3426 ha->outstanding_cmds[index] = NULL;
3427 srb->handle = 0;
3428 srb->flags &= ~SRB_IN_TOKEN_ARRAY;
3429 } else {
3430 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
3431 resp_identifier, srb->handle);
3432 srb = NULL;
3433 ql_signal_abort(ha, set_flags);
3434 }
3435 }
3436 } else {
3437 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
3438 index, resp_identifier);
3439 ql_signal_abort(ha, set_flags);
3440 }
3441
3442 if (srb != NULL) {
3443 if (!(srb->flags & SRB_ELS_PKT)) {
3444 EL(ha, "Not SRB_ELS_PKT flags=%xh, isp_abort_needed\n",
3445 srb->flags);
3446 *set_flags |= ISP_ABORT_NEEDED;
3447 return;
3448 }
3449
3450 (void) ddi_dma_sync(srb->pkt->pkt_resp_dma, 0, 0,
3451 DDI_DMA_SYNC_FORKERNEL);
3452
3453 /* Set ISP completion status */
3454 srb->pkt->pkt_reason = ddi_get16(rsp_q->rsp_ring.acc_handle,
3455 &rsp->comp_status);
3456
3457 if (srb->pkt->pkt_reason != CS_COMPLETE) {
3458 la_els_rjt_t rjt;
3459
3460 EL(ha, "srb=%ph,status err=%xh\n",
3461 srb, srb->pkt->pkt_reason);
3462
3463 if (srb->pkt->pkt_reason == CS_LOGIN_LOGOUT_ERROR) {
3464 EL(ha, "e1=%xh e2=%xh\n",
3465 rsp->error_subcode1, rsp->error_subcode2);
3466 }
3467
3468 srb->pkt->pkt_state = FC_PKT_TRAN_ERROR;
3469
3470 /* Build RJT in the response. */
3471 rjt.ls_code.ls_code = LA_ELS_RJT;
3472 rjt.reason = FC_REASON_NO_CONNECTION;
3473
3474 ddi_rep_put8(srb->pkt->pkt_resp_acc, (uint8_t *)&rjt,
3475 (uint8_t *)srb->pkt->pkt_resp,
3476 sizeof (rjt), DDI_DEV_AUTOINCR);
3477
3478 srb->pkt->pkt_state = FC_PKT_TRAN_ERROR;
3479 srb->pkt->pkt_reason = FC_REASON_NO_CONNECTION;
3480 }
3481
3482 if (srb->pkt->pkt_reason == CS_COMPLETE) {
3483 uint8_t opcode;
3484 uint16_t loop_id;
3485
3486 /* Indicate ISP completion */
3487 srb->flags |= SRB_ISP_COMPLETED;
3488
3489 loop_id = ddi_get16(rsp_q->rsp_ring.acc_handle,
3490 &rsp->n_port_hdl);
3491
3492 /* tq is obtained from lun_queue */
3493 tq = srb->lun_queue->target_queue;
3494
3495 if (ha->topology & QL_N_PORT) {
3496 /* on plogi success assume the chosen s_id */
3497 opcode = ddi_get8(rsp_q->rsp_ring.acc_handle,
3498 &rsp->els_cmd_opcode);
3499
3500 EL(ha, "els opcode=%x srb=%ph,pkt=%ph, tq=%ph"
3501 ", portid=%xh, tqlpid=%xh, loop_id=%xh\n",
3502 opcode, srb, srb->pkt, tq, tq->d_id.b24,
3503 tq->loop_id, loop_id);
3504
3505 if (opcode == LA_ELS_PLOGI) {
3506 s_id.b.al_pa = rsp->s_id_7_0;
3507 s_id.b.area = rsp->s_id_15_8;
3508 s_id.b.domain = rsp->s_id_23_16;
3509
3510 ha->d_id.b24 = s_id.b24;
3511 EL(ha, "Set port's source ID %xh\n",
3512 ha->d_id.b24);
3513 }
3514 }
3515 ql_isp_els_handle_rsp_endian(ha, srb);
3516
3517 if (ha != srb->ha) {
3518 EL(ha, "ha=%x srb->ha=%x\n", ha, srb->ha);
3519 }
3520
3521 if (tq != NULL) {
3522 tq->logout_sent = 0;
3523 tq->flags &= ~TQF_NEED_AUTHENTICATION;
3524
3525 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
3526 tq->flags |= TQF_IIDMA_NEEDED;
3527 }
3528 srb->pkt->pkt_state = FC_PKT_SUCCESS;
3529 }
3530 }
3531
3532 /* Remove command from watchdog queue. */
3533 if (srb->flags & SRB_WATCHDOG_ENABLED) {
3534 tq = srb->lun_queue->target_queue;
3535
3536 DEVICE_QUEUE_LOCK(tq);
3537 ql_remove_link(&tq->wdg, &srb->wdg);
3538 srb->flags &= ~SRB_WATCHDOG_ENABLED;
3539 DEVICE_QUEUE_UNLOCK(tq);
3540 }
3541
3542 /* invoke the callback */
3543 ql_io_comp(srb);
3544 }
3545 QL_PRINT_3(ha, "done\n");
3546 }
3547
3548 /*
3549 * ql_signal_abort
3550 * Signal to the task daemon that a condition warranting an
3551 * isp reset has been detected.
3552 *
3553 * Input:
3554 * ha: adapter state pointer.
3555 * set_flags: task daemon flags to set.
3556 *
3557 * Context:
3558 * Interrupt or Kernel context, no mailbox commands allowed.
3559 */
3560 static void
3561 ql_signal_abort(ql_adapter_state_t *ha, uint64_t *set_flags)
3562 {
3563 if (!CFG_IST(ha, CFG_CTRL_82XX) &&
3564 !(ha->task_daemon_flags & (ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE))) {
3565 *set_flags |= ISP_ABORT_NEEDED;
3566 }
3567 }
|