Print this page
NEX-5717 import QLogic 16G FC drivers
Reviewed by: Steve Peng <steve.peng@nexenta.com>
Reviewed by: Josef 'Jeff' Sipek <josef.sipek@nexenta.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_isr.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_isr.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 -/* Copyright 2010 QLogic Corporation */
22 +/* Copyright 2015 QLogic Corporation */
23 23
24 24 /*
25 - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
25 + * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
26 26 */
27 27
28 -#pragma ident "Copyright 2010 QLogic Corporation; ql_isr.c"
28 +#pragma ident "Copyright 2015 QLogic Corporation; ql_isr.c"
29 29
30 30 /*
31 31 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
32 32 *
33 33 * ***********************************************************************
34 34 * * **
35 35 * * NOTICE **
36 - * * COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION **
36 + * * COPYRIGHT (C) 1996-2015 QLOGIC CORPORATION **
37 37 * * ALL RIGHTS RESERVED **
38 38 * * **
39 39 * ***********************************************************************
40 40 *
41 41 */
42 42
43 43 #include <ql_apps.h>
44 44 #include <ql_api.h>
45 45 #include <ql_debug.h>
46 46 #include <ql_iocb.h>
47 47 #include <ql_isr.h>
48 48 #include <ql_init.h>
49 49 #include <ql_mbx.h>
50 50 #include <ql_nx.h>
51 51 #include <ql_xioctl.h>
52 +#include <ql_fm.h>
52 53
53 54 /*
54 55 * Local Function Prototypes.
55 56 */
56 -static void ql_handle_uncommon_risc_intr(ql_adapter_state_t *, uint32_t,
57 - uint32_t *);
58 -static void ql_spurious_intr(ql_adapter_state_t *, int);
59 -static void ql_mbx_completion(ql_adapter_state_t *, uint16_t, uint32_t *,
60 - uint32_t *, int);
61 -static void ql_async_event(ql_adapter_state_t *, uint32_t, ql_head_t *,
62 - uint32_t *, uint32_t *, int);
63 -static void ql_fast_fcp_post(ql_srb_t *);
64 -static void ql_response_pkt(ql_adapter_state_t *, ql_head_t *, uint32_t *,
65 - uint32_t *, int);
66 -static void ql_error_entry(ql_adapter_state_t *, response_t *, ql_head_t *,
67 - uint32_t *, uint32_t *);
68 -static int ql_status_entry(ql_adapter_state_t *, sts_entry_t *, ql_head_t *,
69 - uint32_t *, uint32_t *);
70 -static int ql_24xx_status_entry(ql_adapter_state_t *, sts_24xx_entry_t *,
71 - ql_head_t *, uint32_t *, uint32_t *);
72 -static int ql_status_error(ql_adapter_state_t *, ql_srb_t *, sts_entry_t *,
73 - ql_head_t *, uint32_t *, uint32_t *);
74 -static void ql_status_cont_entry(ql_adapter_state_t *, sts_cont_entry_t *,
75 - ql_head_t *, uint32_t *, uint32_t *);
76 -static void ql_ip_entry(ql_adapter_state_t *, ip_entry_t *, ql_head_t *,
77 - uint32_t *, uint32_t *);
78 -static void ql_ip_rcv_entry(ql_adapter_state_t *, ip_rcv_entry_t *,
79 - ql_head_t *, uint32_t *, uint32_t *);
80 -static void ql_ip_rcv_cont_entry(ql_adapter_state_t *,
81 - ip_rcv_cont_entry_t *, ql_head_t *, uint32_t *, uint32_t *);
82 -static void ql_ip_24xx_rcv_entry(ql_adapter_state_t *, ip_rcv_24xx_entry_t *,
83 - ql_head_t *, uint32_t *, uint32_t *);
84 -static void ql_ms_entry(ql_adapter_state_t *, ms_entry_t *, ql_head_t *,
85 - uint32_t *, uint32_t *);
86 -static void ql_report_id_entry(ql_adapter_state_t *, report_id_1_t *,
87 - ql_head_t *, uint32_t *, uint32_t *);
88 -static void ql_els_passthru_entry(ql_adapter_state_t *,
89 - els_passthru_entry_rsp_t *, ql_head_t *, uint32_t *, uint32_t *);
90 -static ql_srb_t *ql_verify_preprocessed_cmd(ql_adapter_state_t *, uint32_t *,
91 - uint32_t *, uint32_t *);
92 -static void ql_signal_abort(ql_adapter_state_t *ha, uint32_t *set_flags);
57 +static void ql_clr_risc_intr(ql_adapter_state_t *);
58 +static void ql_handle_uncommon_risc_intr(ql_adapter_state_t *, int, uint32_t,
59 + uint64_t *);
60 +static void ql_mbx_completion(ql_adapter_state_t *, uint16_t, uint64_t *,
61 + uint64_t *);
62 +static void ql_async_event(ql_adapter_state_t *, ql_response_q_t *, uint32_t,
63 + ql_head_t *, uint64_t *, uint64_t *);
64 +static void ql_fast_fcp_post(ql_srb_t *, ql_response_q_t *);
65 +static void ql_response_pkt(ql_adapter_state_t *, ql_response_q_t *,
66 + ql_head_t *, uint64_t *, uint64_t *);
67 +static void ql_error_entry(ql_adapter_state_t *, ql_response_q_t *,
68 + response_t *, ql_head_t *, uint64_t *, uint64_t *);
69 +static int ql_status_entry(ql_adapter_state_t *, ql_response_q_t *,
70 + sts_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
71 +static int ql_24xx_status_entry(ql_adapter_state_t *, ql_response_q_t *,
72 + sts_24xx_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
73 +static int ql_status_error(ql_adapter_state_t *, ql_response_q_t *, ql_srb_t *,
74 + sts_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
75 +static void ql_status_cont_entry(ql_adapter_state_t *, ql_response_q_t *,
76 + sts_cont_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
77 +static void ql_ip_entry(ql_adapter_state_t *, ql_response_q_t *, ip_entry_t *,
78 + ql_head_t *, uint64_t *, uint64_t *);
79 +static void ql_ip_rcv_entry(ql_adapter_state_t *, ql_response_q_t *,
80 + ip_rcv_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
81 +static void ql_ip_rcv_cont_entry(ql_adapter_state_t *, ql_response_q_t *,
82 + ip_rcv_cont_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
83 +static void ql_ip_24xx_rcv_entry(ql_adapter_state_t *, ql_response_q_t *,
84 + ip_rcv_24xx_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
85 +static void ql_ms_entry(ql_adapter_state_t *, ql_response_q_t *, ms_entry_t *,
86 + ql_head_t *, uint64_t *, uint64_t *);
87 +static void ql_report_id_entry(ql_adapter_state_t *, ql_response_q_t *,
88 + report_id_acq_t *, ql_head_t *, uint64_t *, uint64_t *);
89 +static void ql_els_passthru_entry(ql_adapter_state_t *, ql_response_q_t *,
90 + els_passthru_entry_rsp_t *, ql_head_t *, uint64_t *, uint64_t *);
91 +static ql_srb_t *ql_verify_preprocessed_cmd(ql_adapter_state_t *,
92 + ql_response_q_t *, uint32_t *, uint32_t *, uint64_t *, uint64_t *);
93 +static void ql_signal_abort(ql_adapter_state_t *, uint64_t *);
93 94
94 95 /*
95 - * Spurious interrupt counter
96 + * ql_disable_intr
97 + * Disable interrupts.
98 + *
99 + * Input:
100 + * ha: adapter state pointer.
101 + *
102 + * Context:
103 + * Interrupt or Kernel context, no mailbox commands allowed.
96 104 */
97 -uint32_t ql_spurious_cnt = 4;
98 -uint32_t ql_max_intr_loop = 16;
105 +void
106 +ql_disable_intr(ql_adapter_state_t *ha)
107 +{
108 + int i, rval;
99 109
110 + QL_PRINT_10(ha, "started\n");
111 +
112 + if (CFG_IST(ha, CFG_CTRL_82XX)) {
113 + ql_8021_disable_intrs(ha);
114 + } else {
115 + if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
116 + WRT32_IO_REG(ha, ictrl, 0);
117 + (void) RD32_IO_REG(ha, ictrl); /* PCI posting */
118 + } else {
119 + WRT16_IO_REG(ha, ictrl, 0);
120 + (void) RD16_IO_REG(ha, ictrl); /* PCI posting */
121 + }
122 + }
123 + if (ha->intr_cap & DDI_INTR_FLAG_MASKABLE) {
124 + for (i = 0; i < ha->intr_cnt; i++) {
125 + QL_PRINT_10(ha, "intr_set_mask %d\n", i);
126 + if ((rval = ddi_intr_set_mask(ha->htable[i])) !=
127 + DDI_SUCCESS) {
128 + EL(ha, "intr_set_mask status=%xh\n", rval);
129 + }
130 + }
131 + }
132 + ADAPTER_STATE_LOCK(ha);
133 + ha->flags &= ~INTERRUPTS_ENABLED;
134 + ADAPTER_STATE_UNLOCK(ha);
135 +
136 + QL_PRINT_10(ha, "done\n");
137 +}
138 +
100 139 /*
101 - * ql_isr
102 - * Process all INTX intr types.
140 + * ql_enaable_intr
141 + * Enable interrupts.
103 142 *
104 143 * Input:
105 - * arg1: adapter state pointer.
144 + * ha: adapter state pointer.
106 145 *
107 - * Returns:
108 - * DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
146 + * Context:
147 + * Interrupt or Kernel context, no mailbox commands allowed.
148 + */
149 +void
150 +ql_enable_intr(ql_adapter_state_t *ha)
151 +{
152 + int i, rval;
153 +
154 + QL_PRINT_10(ha, "started\n");
155 +
156 + if (CFG_IST(ha, CFG_CTRL_82XX)) {
157 + ql_8021_enable_intrs(ha);
158 + } else {
159 + if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
160 + WRT32_IO_REG(ha, ictrl, ISP_EN_RISC);
161 + (void) RD32_IO_REG(ha, ictrl); /* PCI posting */
162 + } else {
163 + WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC);
164 + (void) RD16_IO_REG(ha, ictrl); /* PCI posting */
165 + }
166 + }
167 + if (ha->intr_cap & DDI_INTR_FLAG_MASKABLE) {
168 + for (i = 0; i < ha->intr_cnt; i++) {
169 + QL_PRINT_10(ha, "intr_clr_mask %d\n", i);
170 + if ((rval = ddi_intr_clr_mask(ha->htable[i])) !=
171 + DDI_SUCCESS) {
172 + EL(ha, "intr_clr_mask status=%xh\n", rval);
173 + }
174 + }
175 + }
176 + ADAPTER_STATE_LOCK(ha);
177 + ha->flags |= INTERRUPTS_ENABLED;
178 + ADAPTER_STATE_UNLOCK(ha);
179 +
180 + QL_PRINT_10(ha, "done\n");
181 +}
182 +
183 +/*
184 + * ql_clr_risc_intr
185 + * Clear firmware interrupt.
109 186 *
187 + * Input:
188 + * ha: adapter state pointer.
189 + *
110 190 * Context:
111 191 * Interrupt or Kernel context, no mailbox commands allowed.
112 192 */
113 -/* ARGSUSED */
114 -uint_t
115 -ql_isr(caddr_t arg1)
193 +static void
194 +ql_clr_risc_intr(ql_adapter_state_t *ha)
116 195 {
117 - return (ql_isr_aif(arg1, 0));
196 + QL_PRINT_3(ha, "started\n");
197 +
198 + if (CFG_IST(ha, CFG_CTRL_82XX)) {
199 + ql_8021_clr_fw_intr(ha);
200 + } else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
201 + WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
202 + RD32_IO_REG(ha, hccr); /* PCI posting. */
203 + } else {
204 + WRT16_IO_REG(ha, semaphore, 0);
205 + WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
206 + RD16_IO_REG(ha, hccr); /* PCI posting. */
207 + }
208 +
209 + QL_PRINT_3(ha, "done\n");
118 210 }
119 211
120 212 /*
121 - * ql_isr_default
122 - * Process unknown/unvectored intr types
213 + * ql_isr
214 + * Process all INTX intr types.
123 215 *
124 216 * Input:
125 217 * arg1: adapter state pointer.
126 - * arg2: interrupt vector.
127 218 *
128 219 * Returns:
129 220 * DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
130 221 *
131 222 * Context:
132 223 * Interrupt or Kernel context, no mailbox commands allowed.
133 224 */
134 225 /* ARGSUSED */
135 226 uint_t
136 -ql_isr_default(caddr_t arg1, caddr_t arg2)
227 +ql_isr(caddr_t arg1)
137 228 {
138 - ql_adapter_state_t *ha = (void *)arg1;
139 -
140 - EL(ha, "isr_default called: idx=%x\n", arg2);
141 - return (ql_isr_aif(arg1, arg2));
229 + return (ql_isr_aif(arg1, 0));
142 230 }
143 231
144 232 /*
145 233 * ql_isr_aif
146 234 * Process mailbox and I/O command completions.
147 235 *
148 236 * Input:
149 237 * arg: adapter state pointer.
150 - * intvec: interrupt vector.
238 + * arg2: interrupt vector.
151 239 *
152 240 * Returns:
153 241 * DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
154 242 *
155 243 * Context:
156 244 * Interrupt or Kernel context, no mailbox commands allowed.
157 245 */
158 246 /* ARGSUSED */
159 247 uint_t
160 -ql_isr_aif(caddr_t arg, caddr_t intvec)
248 +ql_isr_aif(caddr_t arg, caddr_t arg2)
161 249 {
162 - uint16_t mbx;
163 - uint32_t stat;
250 + uint32_t mbx, stat;
164 251 ql_adapter_state_t *ha = (void *)arg;
165 - uint32_t set_flags = 0;
166 - uint32_t reset_flags = 0;
252 + uint64_t set_flags = 0, reset_flags = 0;
167 253 ql_head_t isr_done_q = {NULL, NULL};
168 254 uint_t rval = DDI_INTR_UNCLAIMED;
169 - int spurious_intr = 0;
170 - boolean_t intr = B_FALSE, daemon = B_FALSE;
171 - int intr_loop = 4;
172 - boolean_t clear_spurious = B_TRUE;
255 + ql_response_q_t *rsp_q = NULL;
256 + int intr, index = (int)((uintptr_t)arg2);
173 257
174 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
258 + QL_PRINT_3(ha, "started, index=%d\n", index);
175 259
176 - QL_PM_LOCK(ha);
260 + /* Exit if not attached. */
261 + if (ha == NULL || ha->intr_pri == NULL) {
262 + EL(ha, "ha=%p, intr_pri=%p not attached\n", (void *)ha,
263 + ha != NULL ? ha->intr_pri : NULL);
264 + return (DDI_INTR_UNCLAIMED);
265 + }
266 +
267 + /* Exit if chip not powered up. */
177 268 if (ha->power_level != PM_LEVEL_D0) {
178 - /*
179 - * Looks like we are about to go down soon, exit early.
180 - */
181 - QL_PM_UNLOCK(ha);
182 - QL_PRINT_3(CE_CONT, "(%d): power down exit\n", ha->instance);
269 + EL(ha, "power down exit\n");
183 270 return (DDI_INTR_UNCLAIMED);
184 271 }
185 - ha->busy++;
272 + QL_PM_LOCK(ha);
273 + ha->pm_busy++;
186 274 QL_PM_UNLOCK(ha);
187 275
188 276 /* Acquire interrupt lock. */
189 - INTR_LOCK(ha);
277 + if (index > ha->rsp_queues_cnt) {
278 + intr = index = 0;
279 + } else if (index) {
280 + intr = index - 1;
281 + } else {
282 + intr = 0;
283 + }
284 + INDX_INTR_LOCK(ha, intr);
190 285
191 - if (CFG_IST(ha, CFG_CTRL_2200)) {
192 - while (RD16_IO_REG(ha, istatus) & RISC_INT) {
193 - /* Reset idle timer. */
194 - ha->idle_timer = 0;
195 - rval = DDI_INTR_CLAIMED;
196 - if (intr_loop) {
197 - intr_loop--;
198 - }
286 + if (index && ha->flags & NO_INTR_HANDSHAKE) {
287 + QL_PRINT_3(ha, "MULTI_Q_RSP_UPDATE, index=%xh\n", index);
288 + index--;
289 + if (index < ha->rsp_queues_cnt) {
290 + rsp_q = ha->rsp_queues[index];
291 + }
292 + if (rsp_q == NULL) {
293 + EL(ha, "unsupported MULTI_Q_RSP_UPDATE, index=%d\n",
294 + index);
295 + rsp_q = ha->rsp_queues[0];
296 + }
199 297
200 - /* Special Fast Post 2200. */
201 - stat = 0;
202 - if (ha->task_daemon_flags & FIRMWARE_LOADED &&
203 - ha->flags & ONLINE) {
204 - ql_srb_t *sp;
298 + if (ha->flags & QUEUE_SHADOW_PTRS) {
299 + (void) ddi_dma_sync(rsp_q->rsp_ring.dma_handle,
300 + (off_t)rsp_q->rsp_in_shadow_ofst,
301 + SHADOW_ENTRY_SIZE, DDI_DMA_SYNC_FORCPU);
302 + mbx = ddi_get32(rsp_q->rsp_ring.acc_handle,
303 + rsp_q->rsp_in_shadow_ptr);
304 + } else {
305 + mbx = RD32_MBAR_REG(ha, rsp_q->mbar_rsp_in);
306 + }
205 307
206 - mbx = RD16_IO_REG(ha, mailbox_out[23]);
308 + if (mbx != rsp_q->rsp_ring_index) {
309 + rsp_q->isp_rsp_index = (uint16_t)mbx;
310 + ql_response_pkt(ha, rsp_q, &isr_done_q,
311 + &set_flags, &reset_flags);
312 + /* PCI posting */
313 + (void) RD32_MBAR_REG(ha, rsp_q->mbar_rsp_in);
314 + } else if (ha->flags & INTERRUPTS_ENABLED) {
315 + /*EMPTY*/
316 + QL_PRINT_3(ha, "MULTI_Q_RSP_UPDATE mbar_rsp_in "
317 + "same as before\n");
318 + }
207 319
208 - if ((mbx & 3) == MBX23_SCSI_COMPLETION) {
209 - /* Release mailbox registers. */
210 - WRT16_IO_REG(ha, semaphore, 0);
320 + /* Set interrupt claimed status. */
321 + rval = DDI_INTR_CLAIMED;
211 322
212 - if (intr_loop) {
213 - WRT16_IO_REG(ha, hccr,
214 - HC_CLR_RISC_INT);
215 - }
323 + } else if (CFG_IST(ha, CFG_CTRL_22XX)) {
324 + rsp_q = ha->rsp_queues[0];
325 + if (RD16_IO_REG(ha, istatus) & RISC_INT) {
326 + rval = DDI_INTR_CLAIMED;
216 327
217 - /* Get handle. */
218 - mbx >>= 4;
219 - stat = mbx & OSC_INDEX_MASK;
220 -
221 - /* Validate handle. */
222 - sp = stat < MAX_OUTSTANDING_COMMANDS ?
223 - ha->outstanding_cmds[stat] : NULL;
224 -
225 - if (sp != NULL && (sp->handle & 0xfff)
226 - == mbx) {
227 - ha->outstanding_cmds[stat] =
228 - NULL;
229 - sp->handle = 0;
230 - sp->flags &=
231 - ~SRB_IN_TOKEN_ARRAY;
232 -
233 - /* Set completed status. */
234 - sp->flags |= SRB_ISP_COMPLETED;
235 -
236 - /* Set completion status */
237 - sp->pkt->pkt_reason =
238 - CS_COMPLETE;
239 -
240 - ql_fast_fcp_post(sp);
241 - } else if (mbx !=
242 - (QL_FCA_BRAND & 0xfff)) {
243 - if (sp == NULL) {
244 - EL(ha, "unknown IOCB"
245 - " handle=%xh\n",
246 - mbx);
247 - } else {
248 - EL(ha, "mismatch IOCB"
249 - " handle pkt=%xh, "
250 - "sp=%xh\n", mbx,
251 - sp->handle & 0xfff);
252 - }
253 -
254 - (void) ql_binary_fw_dump(ha,
255 - FALSE);
256 -
257 - if (!(ha->task_daemon_flags &
258 - (ISP_ABORT_NEEDED |
259 - ABORT_ISP_ACTIVE))) {
260 - EL(ha, "ISP Invalid "
261 - "handle, "
262 - "isp_abort_needed"
263 - "\n");
264 - set_flags |=
265 - ISP_ABORT_NEEDED;
266 - }
267 - }
328 + /* Check for mailbox interrupt. */
329 + stat = RD16_IO_REG(ha, semaphore);
330 + if (stat & BIT_0) {
331 + /* Get mailbox data. */
332 + mbx = RD16_IO_REG(ha, mailbox_out[0]);
333 + if (mbx > 0x3fff && mbx < 0x8000) {
334 + ql_mbx_completion(ha, mbx,
335 + &set_flags, &reset_flags);
336 + } else if (mbx > 0x7fff && mbx < 0xc000) {
337 + ql_async_event(ha, rsp_q, mbx,
338 + &isr_done_q, &set_flags,
339 + &reset_flags);
340 + } else {
341 + EL(ha, "22XX unknown interrupt type\n");
268 342 }
269 - }
270 -
271 - if (stat == 0) {
272 - /* Check for mailbox interrupt. */
273 - mbx = RD16_IO_REG(ha, semaphore);
274 - if (mbx & BIT_0) {
275 - /* Release mailbox registers. */
276 - WRT16_IO_REG(ha, semaphore, 0);
277 -
278 - /* Get mailbox data. */
279 - mbx = RD16_IO_REG(ha, mailbox_out[0]);
280 - if (mbx > 0x3fff && mbx < 0x8000) {
281 - ql_mbx_completion(ha, mbx,
282 - &set_flags, &reset_flags,
283 - intr_loop);
284 - } else if (mbx > 0x7fff &&
285 - mbx < 0xc000) {
286 - ql_async_event(ha, mbx,
287 - &isr_done_q, &set_flags,
288 - &reset_flags, intr_loop);
289 - } else {
290 - EL(ha, "UNKNOWN interrupt "
291 - "type\n");
292 - intr = B_TRUE;
293 - }
343 + } else {
344 + rsp_q->isp_rsp_index = RD16_IO_REG(ha, resp_in);
345 + if (rsp_q->isp_rsp_index !=
346 + rsp_q->rsp_ring_index) {
347 + ql_response_pkt(ha, rsp_q,
348 + &isr_done_q, &set_flags,
349 + &reset_flags);
294 350 } else {
295 - ha->isp_rsp_index = RD16_IO_REG(ha,
296 - resp_in);
297 -
298 - if (ha->isp_rsp_index !=
299 - ha->rsp_ring_index) {
300 - ql_response_pkt(ha,
301 - &isr_done_q, &set_flags,
302 - &reset_flags, intr_loop);
303 - } else if (++spurious_intr ==
304 - MAX_SPURIOUS_INTR) {
305 - /*
306 - * Process excessive
307 - * spurious intrrupts
308 - */
309 - ql_spurious_intr(ha,
310 - intr_loop);
311 - EL(ha, "excessive spurious "
312 - "interrupts, "
313 - "isp_abort_needed\n");
314 - set_flags |= ISP_ABORT_NEEDED;
315 - } else {
316 - intr = B_TRUE;
317 - }
351 + /*EMPTY*/
352 + QL_PRINT_10(ha, "22XX isp_rsp_index "
353 + "same as before\n");
318 354 }
319 355 }
320 -
321 356 /* Clear RISC interrupt */
322 - if (intr || intr_loop == 0) {
323 - intr = B_FALSE;
324 - WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
325 - }
326 -
327 - if (set_flags != 0 || reset_flags != 0) {
328 - TASK_DAEMON_LOCK(ha);
329 - ha->task_daemon_flags |= set_flags;
330 - ha->task_daemon_flags &= ~reset_flags;
331 - TASK_DAEMON_UNLOCK(ha);
332 - set_flags = 0;
333 - reset_flags = 0;
334 - daemon = B_TRUE;
335 - }
357 + ql_clr_risc_intr(ha);
336 358 }
337 359 } else {
338 - uint32_t ql_max_intr_loop_cnt = 0;
339 -
340 - if (CFG_IST(ha, CFG_CTRL_8021)) {
360 + if (CFG_IST(ha, CFG_CTRL_82XX)) {
341 361 ql_8021_clr_hw_intr(ha);
342 - intr_loop = 1;
343 362 }
344 - while (((stat = RD32_IO_REG(ha, risc2host)) & RH_RISC_INT) &&
345 - (++ql_max_intr_loop_cnt < ql_max_intr_loop)) {
346 363
347 - clear_spurious = B_TRUE; /* assume ok */
364 + if (((stat = RD32_IO_REG(ha, risc2host)) & RH_RISC_INT) == 0) {
365 + QL_PRINT_10(ha, "done, index=%d, no interrupt "
366 + "stat=%xh\n", index, stat);
367 + rval = DDI_INTR_UNCLAIMED;
368 + } else if (ha->ql_dump_state & QL_DUMPING) {
369 + EL(ha, "fw_dump, index=%d, active stat=%xh\n",
370 + index, stat);
371 + rval = DDI_INTR_CLAIMED;
372 + } else if (CFG_IST(ha, CFG_CTRL_82XX) &&
373 + RD32_IO_REG(ha, nx_risc_int) == 0) {
374 + QL_PRINT_10(ha, "done, index=%d, no nx_risc_int "
375 + "stat=%xh\n", index, stat);
376 + rval = DDI_INTR_UNCLAIMED;
377 + } else {
378 + rval = DDI_INTR_CLAIMED;
379 + QL_PRINT_3(ha, "index=%d, interrupt stat=%xh\n",
380 + index, stat);
348 381
349 382 /* Capture FW defined interrupt info */
350 383 mbx = MSW(stat);
351 384
352 - /* Reset idle timer. */
353 - ha->idle_timer = 0;
354 - rval = DDI_INTR_CLAIMED;
355 -
356 - if (CFG_IST(ha, CFG_CTRL_8021) &&
357 - (RD32_IO_REG(ha, nx_risc_int) == 0 ||
358 - intr_loop == 0)) {
359 - break;
385 + if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
386 + != DDI_FM_OK) {
387 + qlc_fm_report_err_impact(ha,
388 + QL_FM_EREPORT_ACC_HANDLE_CHECK);
360 389 }
361 390
362 - if (intr_loop) {
363 - intr_loop--;
364 - }
365 -
366 391 switch (stat & 0x1ff) {
367 392 case ROM_MBX_SUCCESS:
368 393 case ROM_MBX_ERR:
369 394 ql_mbx_completion(ha, mbx, &set_flags,
370 - &reset_flags, intr_loop);
371 -
372 - /* Release mailbox registers. */
373 - if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
374 - WRT16_IO_REG(ha, semaphore, 0);
375 - }
395 + &reset_flags);
376 396 break;
377 397
378 398 case MBX_SUCCESS:
379 399 case MBX_ERR:
380 - /* Sun FW, Release mailbox registers. */
381 - if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
382 - WRT16_IO_REG(ha, semaphore, 0);
383 - }
384 400 ql_mbx_completion(ha, mbx, &set_flags,
385 - &reset_flags, intr_loop);
401 + &reset_flags);
386 402 break;
387 403
388 404 case ASYNC_EVENT:
389 - /* Sun FW, Release mailbox registers. */
390 - if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
391 - WRT16_IO_REG(ha, semaphore, 0);
392 - }
393 - ql_async_event(ha, (uint32_t)mbx, &isr_done_q,
394 - &set_flags, &reset_flags, intr_loop);
405 + ql_async_event(ha, ha->rsp_queues[0],
406 + (uint32_t)mbx, &isr_done_q,
407 + &set_flags, &reset_flags);
395 408 break;
396 409
410 + case MULTI_Q_RSP_UPDATE:
411 + QL_PRINT_3(ha, "MULTI_Q_RSP_UPDATE mbx=%xh\n",
412 + mbx);
413 + if (mbx < ha->rsp_queues_cnt) {
414 + rsp_q = ha->rsp_queues[mbx];
415 + }
416 + if (rsp_q == NULL) {
417 + EL(ha, "unsupported MULTI_Q_RSP_UPDATE"
418 + " mbx=%d\n", mbx);
419 + rsp_q = ha->rsp_queues[0];
420 + }
421 + if (ha->flags & QUEUE_SHADOW_PTRS) {
422 + (void) ddi_dma_sync(
423 + rsp_q->rsp_ring.dma_handle,
424 + (off_t)rsp_q->rsp_in_shadow_ofst,
425 + SHADOW_ENTRY_SIZE,
426 + DDI_DMA_SYNC_FORCPU);
427 + mbx = ddi_get32(
428 + rsp_q->rsp_ring.acc_handle,
429 + rsp_q->rsp_in_shadow_ptr);
430 + } else {
431 + mbx = RD32_MBAR_REG(ha,
432 + rsp_q->mbar_rsp_in);
433 + }
434 + /* FALLTHRU */
435 +
397 436 case RESP_UPDATE:
398 - if (mbx != ha->rsp_ring_index) {
399 - ha->isp_rsp_index = mbx;
400 - ql_response_pkt(ha, &isr_done_q,
401 - &set_flags, &reset_flags,
402 - intr_loop);
403 - } else if (++spurious_intr ==
404 - ql_spurious_cnt) {
405 - /* Process excessive spurious intr. */
406 - ql_spurious_intr(ha, intr_loop);
407 - EL(ha, "excessive spurious "
408 - "interrupts, isp_abort_needed\n");
409 - set_flags |= ISP_ABORT_NEEDED;
410 - clear_spurious = B_FALSE;
437 + /* Clear RISC interrupt */
438 + ql_clr_risc_intr(ha);
439 +
440 + if (rsp_q == NULL) {
441 + rsp_q = ha->rsp_queues[0];
442 + }
443 + if (mbx != rsp_q->rsp_ring_index) {
444 + rsp_q->isp_rsp_index = (uint16_t)mbx;
445 + ql_response_pkt(ha, rsp_q, &isr_done_q,
446 + &set_flags, &reset_flags);
411 447 } else {
412 - QL_PRINT_10(CE_CONT, "(%d): response "
413 - "ring index same as before\n",
414 - ha->instance);
415 - intr = B_TRUE;
416 - clear_spurious = B_FALSE;
448 + /*EMPTY*/
449 + QL_PRINT_3(ha, "response "
450 + "ring index same as before\n");
417 451 }
418 452 break;
419 453
420 454 case SCSI_FAST_POST_16:
421 455 stat = (stat & 0xffff0000) | MBA_CMPLT_1_16BIT;
422 - ql_async_event(ha, stat, &isr_done_q,
423 - &set_flags, &reset_flags, intr_loop);
456 + ql_async_event(ha, ha->rsp_queues[0],
457 + stat, &isr_done_q, &set_flags,
458 + &reset_flags);
424 459 break;
425 460
426 461 case SCSI_FAST_POST_32:
427 462 stat = (stat & 0xffff0000) | MBA_CMPLT_1_32BIT;
428 - ql_async_event(ha, stat, &isr_done_q,
429 - &set_flags, &reset_flags, intr_loop);
463 + ql_async_event(ha, ha->rsp_queues[0],
464 + stat, &isr_done_q, &set_flags,
465 + &reset_flags);
430 466 break;
431 467
432 468 case CTIO_FAST_POST:
433 469 stat = (stat & 0xffff0000) |
434 470 MBA_CTIO_COMPLETION;
435 - ql_async_event(ha, stat, &isr_done_q,
436 - &set_flags, &reset_flags, intr_loop);
471 + ql_async_event(ha, ha->rsp_queues[0],
472 + stat, &isr_done_q, &set_flags,
473 + &reset_flags);
437 474 break;
438 475
439 476 case IP_FAST_POST_XMT:
440 477 stat = (stat & 0xffff0000) | MBA_IP_COMPLETION;
441 - ql_async_event(ha, stat, &isr_done_q,
442 - &set_flags, &reset_flags, intr_loop);
478 + ql_async_event(ha, ha->rsp_queues[0],
479 + stat, &isr_done_q, &set_flags,
480 + &reset_flags);
443 481 break;
444 482
445 483 case IP_FAST_POST_RCV:
446 484 stat = (stat & 0xffff0000) | MBA_IP_RECEIVE;
447 - ql_async_event(ha, stat, &isr_done_q,
448 - &set_flags, &reset_flags, intr_loop);
485 + ql_async_event(ha, ha->rsp_queues[0],
486 + stat, &isr_done_q, &set_flags,
487 + &reset_flags);
449 488 break;
450 489
451 490 case IP_FAST_POST_BRD:
452 491 stat = (stat & 0xffff0000) | MBA_IP_BROADCAST;
453 - ql_async_event(ha, stat, &isr_done_q,
454 - &set_flags, &reset_flags, intr_loop);
492 + ql_async_event(ha, ha->rsp_queues[0],
493 + stat, &isr_done_q, &set_flags,
494 + &reset_flags);
455 495 break;
456 496
457 497 case IP_FAST_POST_RCV_ALN:
458 498 stat = (stat & 0xffff0000) |
459 499 MBA_IP_HDR_DATA_SPLIT;
460 - ql_async_event(ha, stat, &isr_done_q,
461 - &set_flags, &reset_flags, intr_loop);
500 + ql_async_event(ha, ha->rsp_queues[0],
501 + stat, &isr_done_q, &set_flags,
502 + &reset_flags);
462 503 break;
463 504
464 505 case ATIO_UPDATE:
465 506 EL(ha, "unsupported ATIO queue update"
466 507 " interrupt, status=%xh\n", stat);
467 - intr = B_TRUE;
468 508 break;
469 509
470 510 case ATIO_RESP_UPDATE:
471 511 EL(ha, "unsupported ATIO response queue "
472 512 "update interrupt, status=%xh\n", stat);
473 - intr = B_TRUE;
474 513 break;
475 514
476 515 default:
477 - ql_handle_uncommon_risc_intr(ha, stat,
516 + ql_handle_uncommon_risc_intr(ha, intr, stat,
478 517 &set_flags);
479 - intr = B_TRUE;
480 518 break;
481 519 }
520 + }
482 521
483 - /* Clear RISC interrupt */
484 - if (intr || intr_loop == 0) {
485 - intr = B_FALSE;
486 - if (CFG_IST(ha, CFG_CTRL_8021)) {
487 - ql_8021_clr_fw_intr(ha);
488 - } else if (CFG_IST(ha, CFG_CTRL_242581)) {
489 - WRT32_IO_REG(ha, hccr,
490 - HC24_CLR_RISC_INT);
491 - } else {
492 - WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
493 - }
494 - }
522 + /* Clear RISC interrupt */
523 + if (rval == DDI_INTR_CLAIMED && rsp_q == NULL) {
524 + ql_clr_risc_intr(ha);
525 + }
495 526
496 - if (set_flags != 0 || reset_flags != 0) {
497 - TASK_DAEMON_LOCK(ha);
498 - ha->task_daemon_flags |= set_flags;
499 - ha->task_daemon_flags &= ~reset_flags;
500 - TASK_DAEMON_UNLOCK(ha);
501 - set_flags = 0;
502 - reset_flags = 0;
503 - daemon = B_TRUE;
504 - }
505 -
506 - if (ha->flags & PARITY_ERROR) {
507 - EL(ha, "parity/pause exit\n");
508 - mbx = RD16_IO_REG(ha, hccr); /* PCI posting */
509 - break;
510 - }
511 -
512 - if (clear_spurious) {
513 - spurious_intr = 0;
514 - }
527 + /* A0 chip delay */
528 + if (CFG_IST(ha, CFG_CTRL_83XX) && ha->rev_id == 1 &&
529 + ha->iflags & (IFLG_INTR_LEGACY | IFLG_INTR_FIXED)) {
530 + drv_usecwait(4);
515 531 }
516 532 }
517 533
518 534 /* Process claimed interrupts during polls. */
519 535 if (rval == DDI_INTR_UNCLAIMED && ha->intr_claimed == B_TRUE) {
520 536 ha->intr_claimed = B_FALSE;
521 537 rval = DDI_INTR_CLAIMED;
522 538 }
523 539
524 540 /* Release interrupt lock. */
525 - INTR_UNLOCK(ha);
541 + INDX_INTR_UNLOCK(ha, intr);
526 542
527 - if (daemon) {
528 - ql_awaken_task_daemon(ha, NULL, 0, 0);
543 + if (set_flags || reset_flags) {
544 + ql_awaken_task_daemon(ha, NULL, set_flags, reset_flags);
529 545 }
530 546
531 547 if (isr_done_q.first != NULL) {
532 - ql_done(isr_done_q.first);
548 + ql_done(isr_done_q.first, B_FALSE);
533 549 }
534 550
551 + QL_PM_LOCK(ha);
552 + if (ha->pm_busy) {
553 + ha->pm_busy--;
554 + }
555 + QL_PM_UNLOCK(ha);
556 +
535 557 if (rval == DDI_INTR_CLAIMED) {
536 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
558 + QL_PRINT_3(ha, "done\n");
559 + ha->idle_timer = 0;
537 560 ha->xioctl->TotalInterrupts++;
538 561 } else {
539 562 /*EMPTY*/
540 - QL_PRINT_3(CE_CONT, "(%d): interrupt not claimed\n",
541 - ha->instance);
563 + QL_PRINT_10(ha, "interrupt not claimed\n");
542 564 }
543 565
544 - QL_PM_LOCK(ha);
545 - ha->busy--;
546 - QL_PM_UNLOCK(ha);
547 -
548 566 return (rval);
549 567 }
550 568
551 569 /*
552 570 * ql_handle_uncommon_risc_intr
553 571 * Handle an uncommon RISC interrupt.
554 572 *
555 573 * Input:
556 574 * ha: adapter state pointer.
575 + * intr: interrupt index.
557 576 * stat: interrupt status
577 + * set_flags: task daemon flags to set.
558 578 *
559 579 * Context:
560 580 * Interrupt or Kernel context, no mailbox commands allowed.
561 581 */
562 582 static void
563 -ql_handle_uncommon_risc_intr(ql_adapter_state_t *ha, uint32_t stat,
564 - uint32_t *set_flags)
583 +ql_handle_uncommon_risc_intr(ql_adapter_state_t *ha, int intr, uint32_t stat,
584 + uint64_t *set_flags)
565 585 {
566 586 uint16_t hccr_reg;
567 587
568 588 hccr_reg = RD16_IO_REG(ha, hccr);
569 589
570 590 if (stat & RH_RISC_PAUSED ||
571 591 (hccr_reg & (BIT_15 | BIT_13 | BIT_11 | BIT_8))) {
572 592
573 593 ADAPTER_STATE_LOCK(ha);
574 594 ha->flags |= PARITY_ERROR;
575 595 ADAPTER_STATE_UNLOCK(ha);
576 596
577 597 if (ha->parity_pause_errors == 0 ||
578 598 ha->parity_hccr_err != hccr_reg ||
579 599 ha->parity_stat_err != stat) {
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
580 600 cmn_err(CE_WARN, "qlc(%d): isr, Internal Parity/"
581 601 "Pause Error - hccr=%xh, stat=%xh, count=%d",
582 602 ha->instance, hccr_reg, stat,
583 603 ha->parity_pause_errors);
584 604 ha->parity_hccr_err = hccr_reg;
585 605 ha->parity_stat_err = stat;
586 606 }
587 607
588 608 EL(ha, "parity/pause error, isp_abort_needed\n");
589 609
610 + INDX_INTR_UNLOCK(ha, intr);
590 611 if (ql_binary_fw_dump(ha, FALSE) != QL_SUCCESS) {
591 612 ql_reset_chip(ha);
592 613 }
614 + INDX_INTR_LOCK(ha, intr);
593 615
594 616 if (ha->parity_pause_errors == 0) {
595 617 ha->log_parity_pause = B_TRUE;
596 618 }
597 619
598 620 if (ha->parity_pause_errors < 0xffffffff) {
599 621 ha->parity_pause_errors++;
600 622 }
601 623
602 624 *set_flags |= ISP_ABORT_NEEDED;
603 625
604 626 /* Disable ISP interrupts. */
605 - CFG_IST(ha, CFG_CTRL_8021) ? ql_8021_disable_intrs(ha) :
606 - WRT16_IO_REG(ha, ictrl, 0);
607 - ADAPTER_STATE_LOCK(ha);
608 - ha->flags &= ~INTERRUPTS_ENABLED;
609 - ADAPTER_STATE_UNLOCK(ha);
627 + ql_disable_intr(ha);
610 628 } else {
611 629 EL(ha, "UNKNOWN interrupt status=%xh, hccr=%xh\n",
612 630 stat, hccr_reg);
613 631 }
614 632 }
615 633
616 634 /*
617 - * ql_spurious_intr
618 - * Inform Solaris of spurious interrupts.
619 - *
620 - * Input:
621 - * ha: adapter state pointer.
622 - * intr_clr: early interrupt clear
623 - *
624 - * Context:
625 - * Interrupt or Kernel context, no mailbox commands allowed.
626 - */
627 -static void
628 -ql_spurious_intr(ql_adapter_state_t *ha, int intr_clr)
629 -{
630 - ddi_devstate_t state;
631 -
632 - EL(ha, "Spurious interrupt\n");
633 -
634 - /* Disable ISP interrupts. */
635 - WRT16_IO_REG(ha, ictrl, 0);
636 - ADAPTER_STATE_LOCK(ha);
637 - ha->flags &= ~INTERRUPTS_ENABLED;
638 - ADAPTER_STATE_UNLOCK(ha);
639 -
640 - /* Clear RISC interrupt */
641 - if (intr_clr) {
642 - if (CFG_IST(ha, CFG_CTRL_8021)) {
643 - ql_8021_clr_fw_intr(ha);
644 - } else if (CFG_IST(ha, CFG_CTRL_242581)) {
645 - WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
646 - } else {
647 - WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
648 - }
649 - }
650 -
651 - state = ddi_get_devstate(ha->dip);
652 - if (state == DDI_DEVSTATE_UP) {
653 - /*EMPTY*/
654 - ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
655 - DDI_DEVICE_FAULT, "spurious interrupts");
656 - }
657 -}
658 -
659 -/*
660 635 * ql_mbx_completion
661 636 * Processes mailbox completions.
662 637 *
663 638 * Input:
664 639 * ha: adapter state pointer.
665 640 * mb0: Mailbox 0 contents.
666 641 * set_flags: task daemon flags to set.
667 642 * reset_flags: task daemon flags to reset.
668 - * intr_clr: early interrupt clear
669 643 *
670 644 * Context:
671 645 * Interrupt context.
672 646 */
673 647 /* ARGSUSED */
674 648 static void
675 -ql_mbx_completion(ql_adapter_state_t *ha, uint16_t mb0, uint32_t *set_flags,
676 - uint32_t *reset_flags, int intr_clr)
649 +ql_mbx_completion(ql_adapter_state_t *ha, uint16_t mb0, uint64_t *set_flags,
650 + uint64_t *reset_flags)
677 651 {
678 652 uint32_t index;
679 653 uint16_t cnt;
680 654
681 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
655 + QL_PRINT_3(ha, "started\n");
682 656
683 657 /* Load return mailbox registers. */
684 658 MBX_REGISTER_LOCK(ha);
685 659
686 660 if (ha->mcp != NULL) {
687 661 ha->mcp->mb[0] = mb0;
688 662 index = ha->mcp->in_mb & ~MBX_0;
689 663
690 664 for (cnt = 1; cnt < MAX_MBOX_COUNT && index != 0; cnt++) {
691 665 index >>= 1;
692 666 if (index & MBX_0) {
693 667 ha->mcp->mb[cnt] = RD16_IO_REG(ha,
694 668 mailbox_out[cnt]);
695 669 }
696 670 }
697 671
698 672 } else {
699 673 EL(ha, "mcp == NULL\n");
700 674 }
701 675
702 - if (intr_clr) {
703 - /* Clear RISC interrupt. */
704 - if (CFG_IST(ha, CFG_CTRL_8021)) {
705 - ql_8021_clr_fw_intr(ha);
706 - } else if (CFG_IST(ha, CFG_CTRL_242581)) {
707 - WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
708 - } else {
709 - WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
710 - }
711 - }
712 -
713 676 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_INTERRUPT);
714 677 if (ha->flags & INTERRUPTS_ENABLED) {
715 678 cv_broadcast(&ha->cv_mbx_intr);
716 679 }
717 680
718 681 MBX_REGISTER_UNLOCK(ha);
719 682
720 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
683 + QL_PRINT_3(ha, "done\n");
721 684 }
722 685
723 686 /*
724 687 * ql_async_event
725 688 * Processes asynchronous events.
726 689 *
727 690 * Input:
728 691 * ha: adapter state pointer.
692 + * rsp_q: response queue structure pointer.
729 693 * mbx: Mailbox 0 register.
730 694 * done_q: head pointer to done queue.
731 695 * set_flags: task daemon flags to set.
732 696 * reset_flags: task daemon flags to reset.
733 - * intr_clr: early interrupt clear
734 697 *
735 698 * Context:
736 699 * Interrupt or Kernel context, no mailbox commands allowed.
737 700 */
738 701 static void
739 -ql_async_event(ql_adapter_state_t *ha, uint32_t mbx, ql_head_t *done_q,
740 - uint32_t *set_flags, uint32_t *reset_flags, int intr_clr)
702 +ql_async_event(ql_adapter_state_t *ha, ql_response_q_t *rsp_q, uint32_t mbx,
703 + ql_head_t *done_q, uint64_t *set_flags, uint64_t *reset_flags)
741 704 {
742 - uint32_t handle;
743 - uint32_t index;
744 - uint16_t cnt;
745 - uint16_t mb[MAX_MBOX_COUNT];
705 + uint32_t index, handles[5];
706 + uint16_t cnt, handle_cnt, mb[MAX_MBOX_COUNT];
746 707 ql_srb_t *sp;
747 708 port_id_t s_id;
748 709 ql_tgt_t *tq;
749 - boolean_t intr = B_TRUE;
750 710 ql_adapter_state_t *vha;
751 711
752 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
712 + QL_PRINT_3(ha, "started\n");
753 713
754 714 /* Setup to process fast completion. */
755 715 mb[0] = LSW(mbx);
756 716 switch (mb[0]) {
757 717 case MBA_SCSI_COMPLETION:
758 - handle = SHORT_TO_LONG(RD16_IO_REG(ha, mailbox_out[1]),
718 + handles[0] = SHORT_TO_LONG(RD16_IO_REG(ha, mailbox_out[1]),
759 719 RD16_IO_REG(ha, mailbox_out[2]));
720 + handle_cnt = 1;
760 721 break;
761 722
762 723 case MBA_CMPLT_1_16BIT:
763 - handle = MSW(mbx);
724 + handles[0] = MSW(mbx);
725 + handle_cnt = 1;
764 726 mb[0] = MBA_SCSI_COMPLETION;
765 727 break;
766 728
729 + case MBA_CMPLT_2_16BIT:
730 + handles[0] = (uint32_t)RD16_IO_REG(ha, mailbox_out[1]);
731 + handles[1] = (uint32_t)RD16_IO_REG(ha, mailbox_out[2]);
732 + handle_cnt = 2;
733 + mb[0] = MBA_SCSI_COMPLETION;
734 + break;
735 +
736 + case MBA_CMPLT_3_16BIT:
737 + handles[0] = (uint32_t)RD16_IO_REG(ha, mailbox_out[1]);
738 + handles[1] = (uint32_t)RD16_IO_REG(ha, mailbox_out[2]);
739 + handles[2] = (uint32_t)RD16_IO_REG(ha, mailbox_out[3]);
740 + handle_cnt = 3;
741 + mb[0] = MBA_SCSI_COMPLETION;
742 + break;
743 +
744 + case MBA_CMPLT_4_16BIT:
745 + handles[0] = (uint32_t)RD16_IO_REG(ha, mailbox_out[1]);
746 + handles[1] = (uint32_t)RD16_IO_REG(ha, mailbox_out[2]);
747 + handles[2] = (uint32_t)RD16_IO_REG(ha, mailbox_out[3]);
748 + handles[3] = (uint32_t)RD16_IO_REG(ha, mailbox_out[6]);
749 + handle_cnt = 4;
750 + mb[0] = MBA_SCSI_COMPLETION;
751 + break;
752 +
753 + case MBA_CMPLT_5_16BIT:
754 + handles[0] = (uint32_t)RD16_IO_REG(ha, mailbox_out[1]);
755 + handles[1] = (uint32_t)RD16_IO_REG(ha, mailbox_out[2]);
756 + handles[2] = (uint32_t)RD16_IO_REG(ha, mailbox_out[3]);
757 + handles[3] = (uint32_t)RD16_IO_REG(ha, mailbox_out[6]);
758 + handles[4] = (uint32_t)RD16_IO_REG(ha, mailbox_out[7]);
759 + handle_cnt = 5;
760 + mb[0] = MBA_SCSI_COMPLETION;
761 + break;
762 +
767 763 case MBA_CMPLT_1_32BIT:
768 - handle = SHORT_TO_LONG(MSW(mbx),
764 + handles[0] = SHORT_TO_LONG(MSW(mbx),
769 765 RD16_IO_REG(ha, mailbox_out[2]));
766 + handle_cnt = 1;
770 767 mb[0] = MBA_SCSI_COMPLETION;
771 768 break;
772 769
770 + case MBA_CMPLT_2_32BIT:
771 + handles[0] = SHORT_TO_LONG(
772 + RD16_IO_REG(ha, mailbox_out[1]),
773 + RD16_IO_REG(ha, mailbox_out[2]));
774 + handles[1] = SHORT_TO_LONG(
775 + RD16_IO_REG(ha, mailbox_out[6]),
776 + RD16_IO_REG(ha, mailbox_out[7]));
777 + handle_cnt = 2;
778 + mb[0] = MBA_SCSI_COMPLETION;
779 + break;
780 +
773 781 case MBA_CTIO_COMPLETION:
774 782 case MBA_IP_COMPLETION:
775 - handle = CFG_IST(ha, CFG_CTRL_2200) ? SHORT_TO_LONG(
783 + handles[0] = CFG_IST(ha, CFG_CTRL_22XX) ? SHORT_TO_LONG(
776 784 RD16_IO_REG(ha, mailbox_out[1]),
777 785 RD16_IO_REG(ha, mailbox_out[2])) :
778 786 SHORT_TO_LONG(MSW(mbx), RD16_IO_REG(ha, mailbox_out[2]));
787 + handle_cnt = 1;
779 788 mb[0] = MBA_SCSI_COMPLETION;
780 789 break;
781 790
782 791 default:
783 792 break;
784 793 }
785 794
786 795 /* Handle asynchronous event */
787 796 switch (mb[0]) {
788 797 case MBA_SCSI_COMPLETION:
789 - QL_PRINT_5(CE_CONT, "(%d): Fast post completion\n",
790 - ha->instance);
798 + QL_PRINT_5(ha, "Fast post completion\n");
791 799
792 - if (intr_clr) {
793 - /* Clear RISC interrupt */
794 - if (CFG_IST(ha, CFG_CTRL_8021)) {
795 - ql_8021_clr_fw_intr(ha);
796 - } else if (CFG_IST(ha, CFG_CTRL_242581)) {
797 - WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
798 - } else {
799 - WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
800 - }
801 - intr = B_FALSE;
802 - }
803 -
804 800 if ((ha->flags & ONLINE) == 0) {
805 801 break;
806 802 }
807 803
808 - /* Get handle. */
809 - index = handle & OSC_INDEX_MASK;
804 + for (cnt = 0; cnt < handle_cnt; cnt++) {
805 + QL_PRINT_5(ha, "Fast post completion, handle=%xh\n",
806 + handles[cnt]);
810 807
811 - /* Validate handle. */
812 - sp = index < MAX_OUTSTANDING_COMMANDS ?
813 - ha->outstanding_cmds[index] : NULL;
808 + /* Get handle. */
809 + index = handles[cnt] & OSC_INDEX_MASK;
814 810
815 - if (sp != NULL && sp->handle == handle) {
816 - ha->outstanding_cmds[index] = NULL;
817 - sp->handle = 0;
818 - sp->flags &= ~SRB_IN_TOKEN_ARRAY;
811 + /* Validate handle. */
812 + sp = index < ha->osc_max_cnt ?
813 + ha->outstanding_cmds[index] : NULL;
819 814
820 - /* Set completed status. */
821 - sp->flags |= SRB_ISP_COMPLETED;
815 + if (sp == QL_ABORTED_SRB(ha)) {
816 + EL(ha, "QL_ABORTED_SRB handle=%xh\n",
817 + handles[cnt]);
818 + ha->outstanding_cmds[index] = NULL;
819 + continue;
820 + }
821 + if (sp != NULL && sp->handle == handles[cnt]) {
822 + ha->outstanding_cmds[index] = NULL;
823 + sp->handle = 0;
824 + sp->flags &= ~SRB_IN_TOKEN_ARRAY;
822 825
823 - /* Set completion status */
824 - sp->pkt->pkt_reason = CS_COMPLETE;
826 + /* Set completed status. */
827 + sp->flags |= SRB_ISP_COMPLETED;
825 828
826 - if (!(sp->flags & SRB_FCP_CMD_PKT)) {
827 - /* Place block on done queue */
828 - ql_add_link_b(done_q, &sp->cmd);
829 - } else {
830 - ql_fast_fcp_post(sp);
831 - }
832 - } else if (handle != QL_FCA_BRAND) {
833 - if (sp == NULL) {
834 - EL(ha, "%xh unknown IOCB handle=%xh\n",
835 - mb[0], handle);
836 - } else {
837 - EL(ha, "%xh mismatch IOCB handle pkt=%xh, "
838 - "sp=%xh\n", mb[0], handle, sp->handle);
839 - }
829 + /* Set completion status */
830 + sp->pkt->pkt_reason = CS_COMPLETE;
840 831
841 - EL(ha, "%xh Fast post, mbx1=%xh, mbx2=%xh, mbx3=%xh,"
842 - "mbx6=%xh, mbx7=%xh\n", mb[0],
843 - RD16_IO_REG(ha, mailbox_out[1]),
844 - RD16_IO_REG(ha, mailbox_out[2]),
845 - RD16_IO_REG(ha, mailbox_out[3]),
846 - RD16_IO_REG(ha, mailbox_out[6]),
847 - RD16_IO_REG(ha, mailbox_out[7]));
832 + if (!(sp->flags & SRB_FCP_CMD_PKT)) {
833 + /* Place block on done queue */
834 + ql_add_link_b(done_q, &sp->cmd);
835 + } else {
836 + ql_fast_fcp_post(sp, rsp_q);
837 + }
838 + } else if (handles[cnt] != QL_FCA_BRAND) {
839 + if (sp == NULL) {
840 + EL(ha, "%xh unknown IOCB handle=%xh\n",
841 + mb[0], handles[cnt]);
842 + } else {
843 + EL(ha, "%xh mismatch IOCB handle "
844 + "pkt=%xh, sp=%xh\n", mb[0],
845 + handles[cnt], sp->handle);
846 + }
848 847
849 - (void) ql_binary_fw_dump(ha, FALSE);
848 + EL(ha, "%xh Fast post, mbx1=%xh, mbx2=%xh, "
849 + "mbx3=%xh, mbx6=%xh, mbx7=%xh\n", mb[0],
850 + RD16_IO_REG(ha, mailbox_out[1]),
851 + RD16_IO_REG(ha, mailbox_out[2]),
852 + RD16_IO_REG(ha, mailbox_out[3]),
853 + RD16_IO_REG(ha, mailbox_out[6]),
854 + RD16_IO_REG(ha, mailbox_out[7]));
850 855
851 - if (!(ha->task_daemon_flags &
852 - (ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE))) {
853 - EL(ha, "%xh ISP Invalid handle, "
854 - "isp_abort_needed\n", mb[0]);
855 - *set_flags |= ISP_ABORT_NEEDED;
856 + ADAPTER_STATE_LOCK(ha);
857 + ha->flags |= FW_DUMP_NEEDED;
858 + ADAPTER_STATE_UNLOCK(ha);
859 +
860 + if (!(ha->task_daemon_flags &
861 + ISP_ABORT_NEEDED)) {
862 + EL(ha, "%xh ISP Invalid handle, "
863 + "isp_abort_needed\n", mb[0]);
864 + *set_flags |= ISP_ABORT_NEEDED;
865 + }
856 866 }
857 867 }
858 868 break;
859 869
860 870 case MBA_RESET: /* Reset */
861 871 EL(ha, "%xh Reset received\n", mb[0]);
862 - *set_flags |= RESET_MARKER_NEEDED;
872 + *set_flags |= MARKER_NEEDED;
863 873 break;
864 874
865 875 case MBA_SYSTEM_ERR: /* System Error */
866 876 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
867 877 mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
868 878 mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
869 879 mb[7] = RD16_IO_REG(ha, mailbox_out[7]);
870 880
871 881 EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx1=%xh, "
872 882 "mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh, mbx6=%xh,\n "
873 883 "mbx7=%xh, mbx8=%xh, mbx9=%xh, mbx10=%xh, mbx11=%xh, "
874 884 "mbx12=%xh,\n", mb[0], mb[1], mb[2], mb[3],
875 885 RD16_IO_REG(ha, mailbox_out[4]),
876 886 RD16_IO_REG(ha, mailbox_out[5]),
877 887 RD16_IO_REG(ha, mailbox_out[6]), mb[7],
878 888 RD16_IO_REG(ha, mailbox_out[8]),
879 889 RD16_IO_REG(ha, mailbox_out[9]),
880 890 RD16_IO_REG(ha, mailbox_out[10]),
881 891 RD16_IO_REG(ha, mailbox_out[11]),
882 892 RD16_IO_REG(ha, mailbox_out[12]));
883 893
884 894 EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx13=%xh, "
885 895 "mbx14=%xh, mbx15=%xh, mbx16=%xh, mbx17=%xh, mbx18=%xh,\n"
886 896 "mbx19=%xh, mbx20=%xh, mbx21=%xh, mbx22=%xh, mbx23=%xh\n",
887 897 mb[0], RD16_IO_REG(ha, mailbox_out[13]),
888 898 RD16_IO_REG(ha, mailbox_out[14]),
889 899 RD16_IO_REG(ha, mailbox_out[15]),
890 900 RD16_IO_REG(ha, mailbox_out[16]),
891 901 RD16_IO_REG(ha, mailbox_out[17]),
892 902 RD16_IO_REG(ha, mailbox_out[18]),
893 903 RD16_IO_REG(ha, mailbox_out[19]),
894 904 RD16_IO_REG(ha, mailbox_out[20]),
895 905 RD16_IO_REG(ha, mailbox_out[21]),
896 906 RD16_IO_REG(ha, mailbox_out[22]),
897 907 RD16_IO_REG(ha, mailbox_out[23]));
898 908
899 909 if (ha->reg_off->mbox_cnt > 24) {
900 910 EL(ha, "%xh ISP System Error, mbx24=%xh, mbx25=%xh, "
901 911 "mbx26=%xh,\n mbx27=%xh, mbx28=%xh, mbx29=%xh, "
902 912 "mbx30=%xh, mbx31=%xh\n", mb[0],
|
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
903 913 RD16_IO_REG(ha, mailbox_out[24]),
904 914 RD16_IO_REG(ha, mailbox_out[25]),
905 915 RD16_IO_REG(ha, mailbox_out[26]),
906 916 RD16_IO_REG(ha, mailbox_out[27]),
907 917 RD16_IO_REG(ha, mailbox_out[28]),
908 918 RD16_IO_REG(ha, mailbox_out[29]),
909 919 RD16_IO_REG(ha, mailbox_out[30]),
910 920 RD16_IO_REG(ha, mailbox_out[31]));
911 921 }
912 922
913 - (void) ql_binary_fw_dump(ha, FALSE);
923 + ADAPTER_STATE_LOCK(ha);
924 + ha->flags |= FW_DUMP_NEEDED;
925 + ADAPTER_STATE_UNLOCK(ha);
914 926
915 - (void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8002, mb[1],
916 - mb[2], mb[3]);
927 + /* Signal task daemon to store error log. */
928 + if (ha->errlog[0] == 0) {
929 + ha->errlog[3] = mb[3];
930 + ha->errlog[2] = mb[2];
931 + ha->errlog[1] = mb[1];
932 + ha->errlog[0] = FLASH_ERRLOG_AEN_8002;
933 + }
917 934
918 935 if (CFG_IST(ha, CFG_CTRL_81XX) && mb[7] & SE_MPI_RISC) {
919 936 ADAPTER_STATE_LOCK(ha);
920 937 ha->flags |= MPI_RESET_NEEDED;
921 938 ADAPTER_STATE_UNLOCK(ha);
922 939 }
923 940
924 941 *set_flags |= ISP_ABORT_NEEDED;
925 942 ha->xioctl->ControllerErrorCount++;
926 943 break;
927 944
928 945 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
929 946 EL(ha, "%xh Request Transfer Error received, "
930 947 "isp_abort_needed\n", mb[0]);
931 948
932 - (void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8003,
933 - RD16_IO_REG(ha, mailbox_out[1]),
934 - RD16_IO_REG(ha, mailbox_out[2]),
935 - RD16_IO_REG(ha, mailbox_out[3]));
949 + /* Signal task daemon to store error log. */
950 + if (ha->errlog[0] == 0) {
951 + ha->errlog[3] = RD16_IO_REG(ha, mailbox_out[3]);
952 + ha->errlog[2] = RD16_IO_REG(ha, mailbox_out[2]);
953 + ha->errlog[1] = RD16_IO_REG(ha, mailbox_out[1]);
954 + ha->errlog[0] = FLASH_ERRLOG_AEN_8003;
955 + }
936 956
937 957 *set_flags |= ISP_ABORT_NEEDED;
938 958 ha->xioctl->ControllerErrorCount++;
959 +
960 + (void) qlc_fm_report_err_impact(ha,
961 + QL_FM_EREPORT_MBA_REQ_TRANSFER_ERR);
962 +
939 963 break;
940 964
941 965 case MBA_RSP_TRANSFER_ERR: /* Response Xfer Err */
942 966 EL(ha, "%xh Response Transfer Error received,"
943 967 " isp_abort_needed\n", mb[0]);
944 968
945 - (void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8004,
946 - RD16_IO_REG(ha, mailbox_out[1]),
947 - RD16_IO_REG(ha, mailbox_out[2]),
948 - RD16_IO_REG(ha, mailbox_out[3]));
969 + /* Signal task daemon to store error log. */
970 + if (ha->errlog[0] == 0) {
971 + ha->errlog[3] = RD16_IO_REG(ha, mailbox_out[3]);
972 + ha->errlog[2] = RD16_IO_REG(ha, mailbox_out[2]);
973 + ha->errlog[1] = RD16_IO_REG(ha, mailbox_out[1]);
974 + ha->errlog[0] = FLASH_ERRLOG_AEN_8004;
975 + }
949 976
950 977 *set_flags |= ISP_ABORT_NEEDED;
951 978 ha->xioctl->ControllerErrorCount++;
979 +
980 + (void) qlc_fm_report_err_impact(ha,
981 + QL_FM_EREPORT_MBA_RSP_TRANSFER_ERR);
982 +
952 983 break;
953 984
954 985 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
955 - EL(ha, "%xh Request Queue Wake-up received\n",
956 - mb[0]);
986 + EL(ha, "%xh Request Queue Wake-up "
987 + "received, mbx1=%xh\n", mb[0],
988 + RD16_IO_REG(ha, mailbox_out[1]));
957 989 break;
958 990
959 991 case MBA_MENLO_ALERT: /* Menlo Alert Notification */
960 992 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
961 993 mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
962 994 mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
963 995
964 996 EL(ha, "%xh Menlo Alert Notification received, mbx1=%xh,"
965 997 " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
966 998
967 999 switch (mb[1]) {
968 1000 case MLA_LOGIN_OPERATIONAL_FW:
969 1001 ADAPTER_STATE_LOCK(ha);
970 1002 ha->flags |= MENLO_LOGIN_OPERATIONAL;
971 1003 ADAPTER_STATE_UNLOCK(ha);
972 1004 break;
973 1005 case MLA_PANIC_RECOVERY:
974 1006 case MLA_LOGIN_DIAGNOSTIC_FW:
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
975 1007 case MLA_LOGIN_GOLDEN_FW:
976 1008 case MLA_REJECT_RESPONSE:
977 1009 default:
978 1010 break;
979 1011 }
980 1012 break;
981 1013
982 1014 case MBA_LIP_F8: /* Received a LIP F8. */
983 1015 case MBA_LIP_RESET: /* LIP reset occurred. */
984 1016 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
985 - if (CFG_IST(ha, CFG_CTRL_8081)) {
1017 + if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
986 1018 EL(ha, "%xh DCBX_STARTED received, mbx1=%xh, mbx2=%xh"
987 1019 "\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]),
988 1020 RD16_IO_REG(ha, mailbox_out[2]));
989 1021 } else {
990 1022 EL(ha, "%xh LIP received\n", mb[0]);
991 1023 }
992 1024
993 1025 ADAPTER_STATE_LOCK(ha);
994 1026 ha->flags &= ~POINT_TO_POINT;
995 1027 ADAPTER_STATE_UNLOCK(ha);
996 1028
997 1029 if (!(ha->task_daemon_flags & LOOP_DOWN)) {
998 1030 *set_flags |= LOOP_DOWN;
999 1031 }
1000 1032 ql_port_state(ha, FC_STATE_OFFLINE,
1001 1033 FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1002 1034
1003 1035 if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1004 1036 ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1005 1037 }
1006 1038
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
1007 1039 ha->adapter_stats->lip_count++;
1008 1040
1009 1041 /* Update AEN queue. */
1010 1042 ha->xioctl->TotalLipResets++;
1011 1043 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1012 1044 ql_enqueue_aen(ha, mb[0], NULL);
1013 1045 }
1014 1046 break;
1015 1047
1016 1048 case MBA_LOOP_UP:
1017 - if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322 |
1018 - CFG_CTRL_24258081))) {
1049 + if (!CFG_IST(ha, CFG_CTRL_22XX)) {
1019 1050 ha->iidma_rate = RD16_IO_REG(ha, mailbox_out[1]);
1020 1051 if (ha->iidma_rate == IIDMA_RATE_1GB) {
1021 1052 ha->state = FC_PORT_STATE_MASK(
1022 1053 ha->state) | FC_STATE_1GBIT_SPEED;
1023 1054 index = 1;
1024 1055 } else if (ha->iidma_rate == IIDMA_RATE_2GB) {
1025 1056 ha->state = FC_PORT_STATE_MASK(
1026 1057 ha->state) | FC_STATE_2GBIT_SPEED;
1027 1058 index = 2;
1028 1059 } else if (ha->iidma_rate == IIDMA_RATE_4GB) {
1029 1060 ha->state = FC_PORT_STATE_MASK(
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
1030 1061 ha->state) | FC_STATE_4GBIT_SPEED;
1031 1062 index = 4;
1032 1063 } else if (ha->iidma_rate == IIDMA_RATE_8GB) {
1033 1064 ha->state = FC_PORT_STATE_MASK(
1034 1065 ha->state) | FC_STATE_8GBIT_SPEED;
1035 1066 index = 8;
1036 1067 } else if (ha->iidma_rate == IIDMA_RATE_10GB) {
1037 1068 ha->state = FC_PORT_STATE_MASK(
1038 1069 ha->state) | FC_STATE_10GBIT_SPEED;
1039 1070 index = 10;
1071 + } else if (ha->iidma_rate == IIDMA_RATE_16GB) {
1072 + ha->state = FC_PORT_STATE_MASK(
1073 + ha->state) | FC_STATE_16GBIT_SPEED;
1074 + index = 16;
1075 + } else if (ha->iidma_rate == IIDMA_RATE_32GB) {
1076 + ha->state = FC_PORT_STATE_MASK(
1077 + ha->state) | FC_STATE_32GBIT_SPEED;
1078 + index = 32;
1040 1079 } else {
1041 1080 ha->state = FC_PORT_STATE_MASK(
1042 1081 ha->state);
1043 1082 index = 0;
1044 1083 }
1045 1084 } else {
1046 1085 ha->iidma_rate = IIDMA_RATE_1GB;
1047 1086 ha->state = FC_PORT_STATE_MASK(ha->state) |
1048 1087 FC_STATE_FULL_SPEED;
1049 1088 index = 1;
1050 1089 }
1051 1090
1052 1091 for (vha = ha; vha != NULL; vha = vha->vp_next) {
1053 1092 vha->state = FC_PORT_STATE_MASK(vha->state) |
1054 1093 FC_PORT_SPEED_MASK(ha->state);
1055 1094 }
1056 1095 EL(ha, "%d GB %xh Loop Up received\n", index, mb[0]);
1057 1096
1058 1097 /* Update AEN queue. */
1059 1098 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1060 1099 ql_enqueue_aen(ha, mb[0], NULL);
1061 1100 }
1062 1101 break;
1063 1102
1064 1103 case MBA_LOOP_DOWN:
1065 1104 EL(ha, "%xh Loop Down received, mbx1=%xh, mbx2=%xh, mbx3=%xh, "
1066 1105 "mbx4=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1067 1106 RD16_IO_REG(ha, mailbox_out[2]),
1068 1107 RD16_IO_REG(ha, mailbox_out[3]),
1069 1108 RD16_IO_REG(ha, mailbox_out[4]));
1070 1109
|
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
1071 1110 if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1072 1111 *set_flags |= LOOP_DOWN;
1073 1112 }
1074 1113 ql_port_state(ha, FC_STATE_OFFLINE,
1075 1114 FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1076 1115
1077 1116 if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1078 1117 ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1079 1118 }
1080 1119
1081 - if (CFG_IST(ha, CFG_CTRL_258081)) {
1120 + if (CFG_IST(ha, CFG_CTRL_252780818283)) {
1082 1121 ha->sfp_stat = RD16_IO_REG(ha, mailbox_out[2]);
1083 1122 }
1084 1123
1085 1124 /* Update AEN queue. */
1086 1125 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1087 1126 ql_enqueue_aen(ha, mb[0], NULL);
1088 1127 }
1089 1128 break;
1090 1129
1091 1130 case MBA_PORT_UPDATE:
1092 1131 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1093 1132 mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1094 1133 mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1095 1134 RD16_IO_REG(ha, mailbox_out[3]) : 0);
1096 1135
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
1097 1136 /* Locate port state structure. */
1098 1137 for (vha = ha; vha != NULL; vha = vha->vp_next) {
1099 1138 if (vha->vp_index == LSB(mb[3])) {
1100 1139 break;
1101 1140 }
1102 1141 }
1103 1142 if (vha == NULL) {
1104 1143 break;
1105 1144 }
1106 1145
1107 - if (CFG_IST(ha, CFG_CTRL_8081) && mb[1] == 0xffff &&
1146 + if (mb[1] == 0xffff &&
1108 1147 mb[2] == 7 && (MSB(mb[3]) == 0xe || MSB(mb[3]) == 0x1a ||
1109 1148 MSB(mb[3]) == 0x1c || MSB(mb[3]) == 0x1d ||
1110 1149 MSB(mb[3]) == 0x1e)) {
1150 + EL(ha, "%xh Port Database Update, Loop down "
1151 + "received, mbx1=%xh, mbx2=%xh, mbx3=%xh\n",
1152 + mb[0], mb[1], mb[2], mb[3]);
1111 1153 /*
1112 1154 * received FLOGI reject
1113 1155 * received FLOGO
1114 1156 * FCF configuration changed
1115 1157 * FIP Clear Virtual Link received
1116 - * FKA timeout
1158 + * FCF timeout
1117 1159 */
1118 1160 if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1119 1161 *set_flags |= LOOP_DOWN;
1120 1162 }
1121 1163 ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE |
1122 1164 COMMAND_WAIT_NEEDED | LOOP_DOWN);
1123 1165 if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1124 1166 ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1125 1167 }
1126 1168 /*
1127 1169 * In N port 2 N port topology the FW provides a port
1128 1170 * database entry at loop_id 0x7fe which we use to
1129 1171 * acquire the Ports WWPN.
1130 1172 */
1131 1173 } else if ((mb[1] != 0x7fe) &&
1132 1174 ((FC_PORT_STATE_MASK(vha->state) != FC_STATE_OFFLINE ||
1133 - (CFG_IST(ha, CFG_CTRL_24258081) &&
1134 - (mb[1] != 0xffff || mb[2] != 6 || mb[3] != 0))))) {
1175 + (CFG_IST(ha, CFG_ISP_FW_TYPE_2) &&
1176 + (mb[2] != 6 || mb[3] != 0))))) {
1135 1177 EL(ha, "%xh Port Database Update, Login/Logout "
1136 1178 "received, mbx1=%xh, mbx2=%xh, mbx3=%xh\n",
1137 1179 mb[0], mb[1], mb[2], mb[3]);
1138 1180 } else {
1139 1181 EL(ha, "%xh Port Database Update received, mbx1=%xh,"
1140 1182 " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2],
1141 1183 mb[3]);
1142 1184 *set_flags |= LOOP_RESYNC_NEEDED;
1143 1185 *set_flags &= ~LOOP_DOWN;
1144 1186 *reset_flags |= LOOP_DOWN;
1145 1187 *reset_flags &= ~LOOP_RESYNC_NEEDED;
1146 1188 vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
1147 1189 TASK_DAEMON_LOCK(ha);
1148 1190 vha->task_daemon_flags |= LOOP_RESYNC_NEEDED;
1149 1191 vha->task_daemon_flags &= ~LOOP_DOWN;
1150 1192 TASK_DAEMON_UNLOCK(ha);
1151 1193 ADAPTER_STATE_LOCK(ha);
1152 1194 vha->flags &= ~ABORT_CMDS_LOOP_DOWN_TMO;
1153 1195 ADAPTER_STATE_UNLOCK(ha);
1154 1196 }
1155 1197
1156 1198 /* Update AEN queue. */
1157 1199 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1158 1200 ql_enqueue_aen(ha, mb[0], NULL);
1159 1201 }
1160 1202 break;
1161 1203
1162 1204 case MBA_RSCN_UPDATE:
1163 1205 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1164 1206 mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1165 1207 mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1166 1208 RD16_IO_REG(ha, mailbox_out[3]) : 0);
1167 1209
1168 1210 /* Locate port state structure. */
1169 1211 for (vha = ha; vha != NULL; vha = vha->vp_next) {
1170 1212 if (vha->vp_index == LSB(mb[3])) {
1171 1213 break;
1172 1214 }
1173 1215 }
1174 1216
1175 1217 if (vha == NULL) {
1176 1218 break;
1177 1219 }
1178 1220
1179 1221 if (LSB(mb[1]) == vha->d_id.b.domain &&
1180 1222 MSB(mb[2]) == vha->d_id.b.area &&
1181 1223 LSB(mb[2]) == vha->d_id.b.al_pa) {
1182 1224 EL(ha, "%xh RSCN match adapter, mbx1=%xh, mbx2=%xh, "
1183 1225 "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1184 1226 } else {
1185 1227 EL(ha, "%xh RSCN received, mbx1=%xh, mbx2=%xh, "
1186 1228 "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1187 1229 if (FC_PORT_STATE_MASK(vha->state) !=
1188 1230 FC_STATE_OFFLINE) {
1189 1231 ql_rcv_rscn_els(vha, &mb[0], done_q);
1190 1232 TASK_DAEMON_LOCK(ha);
1191 1233 vha->task_daemon_flags |= RSCN_UPDATE_NEEDED;
1192 1234 TASK_DAEMON_UNLOCK(ha);
1193 1235 *set_flags |= RSCN_UPDATE_NEEDED;
1194 1236 }
1195 1237 }
1196 1238
1197 1239 /* Update AEN queue. */
1198 1240 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1199 1241 ql_enqueue_aen(ha, mb[0], NULL);
1200 1242 }
1201 1243 break;
1202 1244
1203 1245 case MBA_LIP_ERROR: /* Loop initialization errors. */
1204 1246 EL(ha, "%xh LIP error received, mbx1=%xh\n", mb[0],
1205 1247 RD16_IO_REG(ha, mailbox_out[1]));
1206 1248 break;
1207 1249
1208 1250 case MBA_IP_RECEIVE:
1209 1251 case MBA_IP_BROADCAST:
1210 1252 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1211 1253 mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1212 1254 mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
1213 1255
1214 1256 EL(ha, "%xh IP packet/broadcast received, mbx1=%xh, "
1215 1257 "mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
|
↓ open down ↓ |
71 lines elided |
↑ open up ↑ |
1216 1258
1217 1259 /* Locate device queue. */
1218 1260 s_id.b.al_pa = LSB(mb[2]);
1219 1261 s_id.b.area = MSB(mb[2]);
1220 1262 s_id.b.domain = LSB(mb[1]);
1221 1263 if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
1222 1264 EL(ha, "Unknown IP device=%xh\n", s_id.b24);
1223 1265 break;
1224 1266 }
1225 1267
1226 - cnt = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
1268 + cnt = (uint16_t)(CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
1227 1269 CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb24.buf_size[0],
1228 1270 ha->ip_init_ctrl_blk.cb24.buf_size[1]) :
1229 1271 CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb.buf_size[0],
1230 1272 ha->ip_init_ctrl_blk.cb.buf_size[1]));
1231 1273
1232 1274 tq->ub_sequence_length = mb[3];
1233 1275 tq->ub_total_seg_cnt = (uint8_t)(mb[3] / cnt);
1234 1276 if (mb[3] % cnt) {
1235 1277 tq->ub_total_seg_cnt++;
1236 1278 }
1237 1279 cnt = (uint16_t)(tq->ub_total_seg_cnt + 10);
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
1238 1280
1239 1281 for (index = 10; index < ha->reg_off->mbox_cnt && index < cnt;
1240 1282 index++) {
1241 1283 mb[index] = RD16_IO_REG(ha, mailbox_out[index]);
1242 1284 }
1243 1285
1244 1286 tq->ub_seq_id = ++ha->ub_seq_id;
1245 1287 tq->ub_seq_cnt = 0;
1246 1288 tq->ub_frame_ro = 0;
1247 1289 tq->ub_loop_id = (uint16_t)(mb[0] == MBA_IP_BROADCAST ?
1248 - (CFG_IST(ha, CFG_CTRL_24258081) ? BROADCAST_24XX_HDL :
1290 + (CFG_IST(ha, CFG_CTRL_24XX) ? BROADCAST_24XX_HDL :
1249 1291 IP_BROADCAST_LOOP_ID) : tq->loop_id);
1250 1292 ha->rcv_dev_q = tq;
1251 1293
1252 1294 for (cnt = 10; cnt < ha->reg_off->mbox_cnt &&
1253 1295 tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
1254 1296 if (ql_ub_frame_hdr(ha, tq, mb[cnt], done_q) !=
1255 1297 QL_SUCCESS) {
1256 1298 EL(ha, "ql_ub_frame_hdr failed, "
1257 1299 "isp_abort_needed\n");
1258 1300 *set_flags |= ISP_ABORT_NEEDED;
1259 1301 break;
1260 1302 }
1261 1303 }
1262 1304 break;
1263 1305
1264 1306 case MBA_IP_LOW_WATER_MARK:
1265 1307 case MBA_IP_RCV_BUFFER_EMPTY:
1266 1308 EL(ha, "%xh IP low water mark / RCV buffer empty received\n",
1267 1309 mb[0]);
1268 1310 *set_flags |= NEED_UNSOLICITED_BUFFERS;
1269 1311 break;
1270 1312
1271 1313 case MBA_IP_HDR_DATA_SPLIT:
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
1272 1314 EL(ha, "%xh IP HDR data split received\n", mb[0]);
1273 1315 break;
1274 1316
1275 1317 case MBA_ERROR_LOGGING_DISABLED:
1276 1318 EL(ha, "%xh error logging disabled received, "
1277 1319 "mbx1=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1278 1320 break;
1279 1321
1280 1322 case MBA_POINT_TO_POINT:
1281 1323 /* case MBA_DCBX_COMPLETED: */
1282 - if (CFG_IST(ha, CFG_CTRL_8081)) {
1324 + if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
1283 1325 EL(ha, "%xh DCBX completed received\n", mb[0]);
1284 1326 } else {
1285 1327 EL(ha, "%xh Point to Point Mode received\n", mb[0]);
1286 1328 }
1287 1329 ADAPTER_STATE_LOCK(ha);
1288 1330 ha->flags |= POINT_TO_POINT;
1289 1331 ADAPTER_STATE_UNLOCK(ha);
1332 + if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1333 + *set_flags |= LOOP_DOWN;
1334 + }
1335 + if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1336 + ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1337 + }
1338 + ql_port_state(ha, FC_STATE_OFFLINE,
1339 + FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1290 1340 break;
1291 1341
1292 1342 case MBA_FCF_CONFIG_ERROR:
1293 1343 EL(ha, "%xh FCF configuration Error received, mbx1=%xh\n",
1294 1344 mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1295 1345 break;
1296 1346
1297 1347 case MBA_DCBX_PARAM_CHANGED:
1298 1348 EL(ha, "%xh DCBX parameters changed received, mbx1=%xh\n",
1299 1349 mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1300 1350 break;
1301 1351
1302 1352 case MBA_CHG_IN_CONNECTION:
1303 1353 mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1304 1354 if (mb[1] == 2) {
1305 1355 EL(ha, "%xh Change In Connection received, "
1306 - "mbx1=%xh\n", mb[0], mb[1]);
1356 + "mbx1=%xh\n", mb[0], mb[1]);
1307 1357 ADAPTER_STATE_LOCK(ha);
1308 1358 ha->flags &= ~POINT_TO_POINT;
1309 1359 ADAPTER_STATE_UNLOCK(ha);
1310 1360 if (ha->topology & QL_N_PORT) {
1311 1361 ha->topology = (uint8_t)(ha->topology &
1312 1362 ~QL_N_PORT);
1313 1363 ha->topology = (uint8_t)(ha->topology |
1314 1364 QL_NL_PORT);
1315 1365 }
1316 1366 } else {
1317 1367 EL(ha, "%xh Change In Connection received, "
1318 1368 "mbx1=%xh, isp_abort_needed\n", mb[0], mb[1]);
1319 1369 *set_flags |= ISP_ABORT_NEEDED;
1320 1370 }
1321 1371 break;
1322 1372
1323 1373 case MBA_ZIO_UPDATE:
1324 1374 EL(ha, "%xh ZIO response received\n", mb[0]);
1325 1375
1326 - ha->isp_rsp_index = RD16_IO_REG(ha, resp_in);
1327 - ql_response_pkt(ha, done_q, set_flags, reset_flags, intr_clr);
1328 - intr = B_FALSE;
1376 + rsp_q->isp_rsp_index = RD16_IO_REG(ha, resp_in);
1377 + ql_response_pkt(ha, rsp_q, done_q, set_flags, reset_flags);
1329 1378 break;
1330 1379
1331 1380 case MBA_PORT_BYPASS_CHANGED:
1332 1381 EL(ha, "%xh Port Bypass Changed received, mbx1=%xh\n",
1333 1382 mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1334 1383 /*
1335 1384 * Event generated when there is a transition on
1336 1385 * port bypass of crystal+.
1337 1386 * Mailbox 1: Bit 0 - External.
1338 1387 * Bit 2 - Internal.
1339 1388 * When the bit is 0, the port is bypassed.
1340 1389 *
1341 1390 * For now we will generate a LIP for all cases.
1342 1391 */
1343 1392 *set_flags |= HANDLE_PORT_BYPASS_CHANGE;
1344 1393 break;
1345 1394
1346 1395 case MBA_RECEIVE_ERROR:
1347 1396 EL(ha, "%xh Receive Error received, mbx1=%xh, mbx2=%xh\n",
1348 1397 mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1349 1398 RD16_IO_REG(ha, mailbox_out[2]));
1350 1399 break;
1351 1400
1352 1401 case MBA_LS_RJT_SENT:
1353 1402 EL(ha, "%xh LS_RJT Response Sent ELS=%xh\n", mb[0],
1354 1403 RD16_IO_REG(ha, mailbox_out[1]));
1355 1404 break;
1356 1405
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
1357 1406 case MBA_FW_RESTART_COMP:
1358 1407 EL(ha, "%xh firmware restart complete received mb1=%xh\n",
1359 1408 mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1360 1409 break;
1361 1410
1362 1411 /*
1363 1412 * MBA_IDC_COMPLETE & MBA_IDC_NOTIFICATION: We won't get another
1364 1413 * IDC async event until we ACK the current one.
1365 1414 */
1366 1415 case MBA_IDC_COMPLETE:
1367 - ha->idc_mb[0] = mb[0];
1368 - ha->idc_mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1369 - ha->idc_mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1370 - ha->idc_mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
1371 - ha->idc_mb[4] = RD16_IO_REG(ha, mailbox_out[4]);
1372 - ha->idc_mb[5] = RD16_IO_REG(ha, mailbox_out[5]);
1373 - ha->idc_mb[6] = RD16_IO_REG(ha, mailbox_out[6]);
1374 - ha->idc_mb[7] = RD16_IO_REG(ha, mailbox_out[7]);
1375 - EL(ha, "%xh Inter-driver communication complete received, "
1376 - " mbx1=%xh, mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh,"
1377 - " mbx6=%xh, mbx7=%xh\n", mb[0], ha->idc_mb[1],
1378 - ha->idc_mb[2], ha->idc_mb[3], ha->idc_mb[4], ha->idc_mb[5],
1379 - ha->idc_mb[6], ha->idc_mb[7]);
1380 - *set_flags |= IDC_EVENT;
1416 + mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1417 + EL(ha, "%xh MBA_IDC_COMPLETE received, mbx2=%xh\n", mb[0],
1418 + mb[2]);
1419 + switch (mb[2]) {
1420 + case IDC_OPC_FLASH_ACC:
1421 + case IDC_OPC_RESTART_MPI:
1422 + case IDC_OPC_PORT_RESET_MBC:
1423 + case IDC_OPC_SET_PORT_CONFIG_MBC:
1424 + ADAPTER_STATE_LOCK(ha);
1425 + ha->flags |= IDC_RESTART_NEEDED;
1426 + ADAPTER_STATE_UNLOCK(ha);
1427 + break;
1428 + default:
1429 + EL(ha, "unknown IDC completion opcode=%xh\n", mb[2]);
1430 + break;
1431 + }
1381 1432 break;
1382 1433
1383 1434 case MBA_IDC_NOTIFICATION:
1384 - ha->idc_mb[0] = mb[0];
1385 - ha->idc_mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1386 - ha->idc_mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1387 - ha->idc_mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
1388 - ha->idc_mb[4] = RD16_IO_REG(ha, mailbox_out[4]);
1389 - ha->idc_mb[5] = RD16_IO_REG(ha, mailbox_out[5]);
1390 - ha->idc_mb[6] = RD16_IO_REG(ha, mailbox_out[6]);
1391 - ha->idc_mb[7] = RD16_IO_REG(ha, mailbox_out[7]);
1392 - EL(ha, "%xh Inter-driver communication request notification "
1393 - "received, mbx1=%xh, mbx2=%xh, mbx3=%xh, mbx4=%xh, "
1394 - "mbx5=%xh, mbx6=%xh, mbx7=%xh\n", mb[0], ha->idc_mb[1],
1395 - ha->idc_mb[2], ha->idc_mb[3], ha->idc_mb[4], ha->idc_mb[5],
1396 - ha->idc_mb[6], ha->idc_mb[7]);
1397 - *set_flags |= IDC_EVENT;
1435 + for (cnt = 1; cnt < 8; cnt++) {
1436 + ha->idc_mb[cnt] = RD16_IO_REG(ha, mailbox_out[cnt]);
1437 + }
1438 + EL(ha, "%xh MBA_IDC_REQ_NOTIFICATION received, mbx1=%xh, "
1439 + "mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh, mbx6=%xh, "
1440 + "mbx7=%xh\n", mb[0], ha->idc_mb[1], ha->idc_mb[2],
1441 + ha->idc_mb[3], ha->idc_mb[4], ha->idc_mb[5], ha->idc_mb[6],
1442 + ha->idc_mb[7]);
1443 +
1444 + ADAPTER_STATE_LOCK(ha);
1445 + switch (ha->idc_mb[2]) {
1446 + case IDC_OPC_DRV_START:
1447 + ha->flags |= IDC_RESTART_NEEDED;
1448 + break;
1449 + case IDC_OPC_FLASH_ACC:
1450 + case IDC_OPC_RESTART_MPI:
1451 + case IDC_OPC_PORT_RESET_MBC:
1452 + case IDC_OPC_SET_PORT_CONFIG_MBC:
1453 + ha->flags |= IDC_STALL_NEEDED;
1454 + break;
1455 + default:
1456 + EL(ha, "unknown IDC request opcode=%xh\n",
1457 + ha->idc_mb[2]);
1458 + break;
1459 + }
1460 + /*
1461 + * If there is a timeout value associated with this IDC
1462 + * notification then there is an implied requirement
1463 + * that we return an ACK.
1464 + */
1465 + if (ha->idc_mb[1] & IDC_TIMEOUT_MASK) {
1466 + ha->flags |= IDC_ACK_NEEDED;
1467 + }
1468 + ADAPTER_STATE_UNLOCK(ha);
1469 +
1470 + ql_awaken_task_daemon(ha, NULL, 0, 0);
1398 1471 break;
1399 1472
1400 1473 case MBA_IDC_TIME_EXTENDED:
1401 - EL(ha, "%xh Inter-driver communication time extended received,"
1402 - " mbx1=%xh, mbx2=%xh\n", mb[0],
1403 - RD16_IO_REG(ha, mailbox_out[1]),
1404 - RD16_IO_REG(ha, mailbox_out[2]));
1474 + EL(ha, "%xh MBA_IDC_TIME_EXTENDED received, mbx2=%xh\n",
1475 + mb[0], RD16_IO_REG(ha, mailbox_out[2]));
1405 1476 break;
1406 1477
1407 1478 default:
1408 1479 EL(ha, "%xh UNKNOWN event received, mbx1=%xh, mbx2=%xh, "
1409 1480 "mbx3=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1410 1481 RD16_IO_REG(ha, mailbox_out[2]),
1411 1482 RD16_IO_REG(ha, mailbox_out[3]));
1412 1483 break;
1413 1484 }
1414 1485
1415 - /* Clear RISC interrupt */
1416 - if (intr && intr_clr) {
1417 - if (CFG_IST(ha, CFG_CTRL_8021)) {
1418 - ql_8021_clr_fw_intr(ha);
1419 - } else if (CFG_IST(ha, CFG_CTRL_242581)) {
1420 - WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
1421 - } else {
1422 - WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
1423 - }
1424 - }
1425 -
1426 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1486 + QL_PRINT_3(ha, "done\n");
1427 1487 }
1428 1488
1429 1489 /*
1430 1490 * ql_fast_fcp_post
1431 1491 * Fast path for good SCSI I/O completion.
1432 1492 *
1433 1493 * Input:
1434 1494 * sp: SRB pointer.
1495 + * rsp_q: response queue structure pointer.
1435 1496 *
1436 1497 * Context:
1437 1498 * Interrupt or Kernel context, no mailbox commands allowed.
1438 1499 */
1439 1500 static void
1440 -ql_fast_fcp_post(ql_srb_t *sp)
1501 +ql_fast_fcp_post(ql_srb_t *sp, ql_response_q_t *rsp_q)
1441 1502 {
1442 1503 ql_adapter_state_t *ha = sp->ha;
1443 1504 ql_lun_t *lq = sp->lun_queue;
1444 1505 ql_tgt_t *tq = lq->target_queue;
1445 1506
1446 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1507 + QL_PRINT_3(ha, "started\n");
1447 1508
1448 1509 /* Acquire device queue lock. */
1449 1510 DEVICE_QUEUE_LOCK(tq);
1450 1511
1451 1512 /* Decrement outstanding commands on device. */
1452 1513 if (tq->outcnt != 0) {
1453 1514 tq->outcnt--;
1454 1515 }
1455 1516
1456 1517 if (sp->flags & SRB_FCP_CMD_PKT) {
1457 1518 if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_UNTAGGED) {
1458 1519 /*
1459 1520 * Clear the flag for this LUN so that
1460 1521 * untagged commands can be submitted
1461 1522 * for it.
1462 1523 */
1463 1524 lq->flags &= ~LQF_UNTAGGED_PENDING;
1464 1525 }
1465 1526
1466 1527 if (lq->lun_outcnt != 0) {
1467 1528 lq->lun_outcnt--;
1468 1529 }
1469 1530 }
1470 1531
1471 1532 /* Reset port down retry count on good completion. */
1472 1533 tq->port_down_retry_count = ha->port_down_retry_count;
1473 1534 tq->qfull_retry_count = ha->qfull_retry_count;
1474 1535 ha->pha->timeout_cnt = 0;
1475 1536
1476 1537 /* Remove command from watchdog queue. */
1477 1538 if (sp->flags & SRB_WATCHDOG_ENABLED) {
1478 1539 ql_remove_link(&tq->wdg, &sp->wdg);
1479 1540 sp->flags &= ~SRB_WATCHDOG_ENABLED;
1480 1541 }
1481 1542
1482 1543 if (lq->cmd.first != NULL) {
1483 1544 ql_next(ha, lq);
1484 1545 } else {
1485 1546 /* Release LU queue specific lock. */
1486 1547 DEVICE_QUEUE_UNLOCK(tq);
1487 1548 if (ha->pha->pending_cmds.first != NULL) {
1488 1549 ql_start_iocb(ha, NULL);
1489 1550 }
1490 1551 }
1491 1552
1492 1553 /* Sync buffers if required. */
|
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
1493 1554 if (sp->flags & SRB_MS_PKT) {
1494 1555 (void) ddi_dma_sync(sp->pkt->pkt_resp_dma, 0, 0,
1495 1556 DDI_DMA_SYNC_FORCPU);
1496 1557 }
1497 1558
1498 1559 /* Map ISP completion codes. */
1499 1560 sp->pkt->pkt_expln = FC_EXPLN_NONE;
1500 1561 sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
1501 1562 sp->pkt->pkt_state = FC_PKT_SUCCESS;
1502 1563
1564 + (void) qlc_fm_check_pkt_dma_handle(ha, sp);
1565 +
1503 1566 /* Now call the pkt completion callback */
1504 1567 if (sp->flags & SRB_POLL) {
1505 1568 sp->flags &= ~SRB_POLL;
1506 - } else if (sp->pkt->pkt_comp) {
1507 - INTR_UNLOCK(ha);
1569 + } else if (ha->completion_thds == 1 && sp->pkt->pkt_comp &&
1570 + !(ha->flags & POLL_INTR)) {
1571 + INDX_INTR_UNLOCK(ha, rsp_q->rsp_q_number);
1508 1572 (*sp->pkt->pkt_comp)(sp->pkt);
1509 - INTR_LOCK(ha);
1573 + INDX_INTR_LOCK(ha, rsp_q->rsp_q_number);
1574 + } else {
1575 + ql_io_comp(sp);
1510 1576 }
1511 1577
1512 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1578 + if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
1579 + != DDI_FM_OK) {
1580 + qlc_fm_report_err_impact(ha,
1581 + QL_FM_EREPORT_ACC_HANDLE_CHECK);
1582 + }
1583 +
1584 + QL_PRINT_3(ha, "done\n");
1513 1585 }
1514 1586
1515 1587 /*
1516 1588 * ql_response_pkt
1517 1589 * Processes response entry.
1518 1590 *
1519 1591 * Input:
1520 1592 * ha: adapter state pointer.
1593 + * rsp_q: response queue structure pointer.
1521 1594 * done_q: head pointer to done queue.
1522 1595 * set_flags: task daemon flags to set.
1523 1596 * reset_flags: task daemon flags to reset.
1524 - * intr_clr: early interrupt clear
1525 1597 *
1526 1598 * Context:
1527 1599 * Interrupt or Kernel context, no mailbox commands allowed.
1528 1600 */
1529 1601 static void
1530 -ql_response_pkt(ql_adapter_state_t *ha, ql_head_t *done_q, uint32_t *set_flags,
1531 - uint32_t *reset_flags, int intr_clr)
1602 +ql_response_pkt(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
1603 + ql_head_t *done_q, uint64_t *set_flags, uint64_t *reset_flags)
1532 1604 {
1533 1605 response_t *pkt;
1534 1606 uint32_t dma_sync_size_1 = 0;
1535 1607 uint32_t dma_sync_size_2 = 0;
1536 1608 int status = 0;
1537 1609
1538 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1610 + QL_PRINT_3(ha, "started\n");
1539 1611
1540 - /* Clear RISC interrupt */
1541 - if (intr_clr) {
1542 - if (CFG_IST(ha, CFG_CTRL_8021)) {
1543 - ql_8021_clr_fw_intr(ha);
1544 - } else if (CFG_IST(ha, CFG_CTRL_242581)) {
1545 - WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
1546 - } else {
1547 - WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
1548 - }
1549 - }
1550 -
1551 - if (ha->isp_rsp_index >= RESPONSE_ENTRY_CNT) {
1612 + if (rsp_q->isp_rsp_index >= rsp_q->rsp_entry_cnt) {
1552 1613 EL(ha, "index error = %xh, isp_abort_needed",
1553 - ha->isp_rsp_index);
1614 + rsp_q->isp_rsp_index);
1554 1615 *set_flags |= ISP_ABORT_NEEDED;
1555 1616 return;
1556 1617 }
1557 1618
1558 1619 if ((ha->flags & ONLINE) == 0) {
1559 - QL_PRINT_3(CE_CONT, "(%d): not onlne, done\n", ha->instance);
1620 + QL_PRINT_10(ha, "not onlne, done\n");
1560 1621 return;
1561 1622 }
1562 1623
1563 1624 /* Calculate size of response queue entries to sync. */
1564 - if (ha->isp_rsp_index > ha->rsp_ring_index) {
1625 + if (rsp_q->isp_rsp_index > rsp_q->rsp_ring_index) {
1565 1626 dma_sync_size_1 = (uint32_t)
1566 - ((uint32_t)(ha->isp_rsp_index - ha->rsp_ring_index) *
1627 + ((uint32_t)(rsp_q->isp_rsp_index - rsp_q->rsp_ring_index) *
1567 1628 RESPONSE_ENTRY_SIZE);
1568 - } else if (ha->isp_rsp_index == 0) {
1629 + } else if (rsp_q->isp_rsp_index == 0) {
1569 1630 dma_sync_size_1 = (uint32_t)
1570 - ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) *
1631 + ((uint32_t)(rsp_q->rsp_entry_cnt - rsp_q->rsp_ring_index) *
1571 1632 RESPONSE_ENTRY_SIZE);
1572 1633 } else {
1573 1634 /* Responses wrap around the Q */
1574 1635 dma_sync_size_1 = (uint32_t)
1575 - ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) *
1636 + ((uint32_t)(rsp_q->rsp_entry_cnt - rsp_q->rsp_ring_index) *
1576 1637 RESPONSE_ENTRY_SIZE);
1577 1638 dma_sync_size_2 = (uint32_t)
1578 - (ha->isp_rsp_index * RESPONSE_ENTRY_SIZE);
1639 + (rsp_q->isp_rsp_index * RESPONSE_ENTRY_SIZE);
1579 1640 }
1580 1641
1581 1642 /* Sync DMA buffer. */
1582 - (void) ddi_dma_sync(ha->hba_buf.dma_handle,
1583 - (off_t)(ha->rsp_ring_index * RESPONSE_ENTRY_SIZE +
1584 - RESPONSE_Q_BUFFER_OFFSET), dma_sync_size_1,
1585 - DDI_DMA_SYNC_FORKERNEL);
1643 + (void) ddi_dma_sync(rsp_q->rsp_ring.dma_handle,
1644 + (off_t)(rsp_q->rsp_ring_index * RESPONSE_ENTRY_SIZE),
1645 + dma_sync_size_1, DDI_DMA_SYNC_FORCPU);
1586 1646 if (dma_sync_size_2) {
1587 - (void) ddi_dma_sync(ha->hba_buf.dma_handle,
1588 - RESPONSE_Q_BUFFER_OFFSET, dma_sync_size_2,
1589 - DDI_DMA_SYNC_FORKERNEL);
1647 + (void) ddi_dma_sync(rsp_q->rsp_ring.dma_handle, 0,
1648 + dma_sync_size_2, DDI_DMA_SYNC_FORCPU);
1590 1649 }
1591 1650
1592 - while (ha->rsp_ring_index != ha->isp_rsp_index) {
1593 - pkt = ha->response_ring_ptr;
1651 + if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
1652 + != DDI_FM_OK) {
1653 + qlc_fm_report_err_impact(ha,
1654 + QL_FM_EREPORT_ACC_HANDLE_CHECK);
1655 + }
1594 1656
1595 - QL_PRINT_5(CE_CONT, "(%d): ha->rsp_rg_idx=%xh, mbx[5]=%xh\n",
1596 - ha->instance, ha->rsp_ring_index, ha->isp_rsp_index);
1597 - QL_DUMP_5((uint8_t *)ha->response_ring_ptr, 8,
1657 + while (rsp_q->rsp_ring_index != rsp_q->isp_rsp_index) {
1658 + pkt = rsp_q->rsp_ring_ptr;
1659 +
1660 + QL_PRINT_5(ha, "ha->rsp_rg_idx=%xh, mbx[5]=%xh\n",
1661 + rsp_q->rsp_ring_index, rsp_q->isp_rsp_index);
1662 + QL_DUMP_5((uint8_t *)rsp_q->rsp_ring_ptr, 8,
1598 1663 RESPONSE_ENTRY_SIZE);
1599 1664
1600 1665 /* Adjust ring index. */
1601 - ha->rsp_ring_index++;
1602 - if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
1603 - ha->rsp_ring_index = 0;
1604 - ha->response_ring_ptr = ha->response_ring_bp;
1666 + rsp_q->rsp_ring_index++;
1667 + if (rsp_q->rsp_ring_index == rsp_q->rsp_entry_cnt) {
1668 + rsp_q->rsp_ring_index = 0;
1669 + rsp_q->rsp_ring_ptr = rsp_q->rsp_ring.bp;
1605 1670 } else {
1606 - ha->response_ring_ptr++;
1671 + rsp_q->rsp_ring_ptr++;
1607 1672 }
1608 1673
1609 1674 /* Process packet. */
1610 - if (ha->status_srb != NULL && pkt->entry_type !=
1611 - STATUS_CONT_TYPE) {
1612 - ql_add_link_b(done_q, &ha->status_srb->cmd);
1613 - ha->status_srb = NULL;
1675 + if (rsp_q->status_srb != NULL &&
1676 + pkt->entry_type != STATUS_CONT_TYPE) {
1677 + ql_add_link_b(done_q, &rsp_q->status_srb->cmd);
1678 + rsp_q->status_srb = NULL;
1614 1679 }
1615 1680
1616 - pkt->entry_status = (uint8_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
1681 + pkt->entry_status = (uint8_t)
1682 + (CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
1617 1683 pkt->entry_status & 0x3c : pkt->entry_status & 0x7e);
1618 1684
1619 - if (pkt->entry_status != 0) {
1620 - ql_error_entry(ha, pkt, done_q, set_flags,
1621 - reset_flags);
1685 + if (pkt->entry_status != 0 ||
1686 + pkt->entry_type == ABORTED_ENTRY_TYPE) {
1687 + ql_error_entry(ha, rsp_q,
1688 + pkt, done_q,
1689 + set_flags, reset_flags);
1622 1690 } else {
1623 1691 switch (pkt->entry_type) {
1624 1692 case STATUS_TYPE:
1625 - status |= CFG_IST(ha, CFG_CTRL_24258081) ?
1626 - ql_24xx_status_entry(ha,
1627 - (sts_24xx_entry_t *)pkt, done_q, set_flags,
1628 - reset_flags) :
1629 - ql_status_entry(ha, (sts_entry_t *)pkt,
1693 + status |= CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
1694 + ql_24xx_status_entry(ha, rsp_q,
1695 + (sts_24xx_entry_t *)pkt, done_q,
1696 + set_flags, reset_flags) :
1697 + ql_status_entry(ha, rsp_q,
1698 + (sts_entry_t *)pkt,
1630 1699 done_q, set_flags, reset_flags);
1631 1700 break;
1632 1701 case STATUS_CONT_TYPE:
1633 - ql_status_cont_entry(ha,
1634 - (sts_cont_entry_t *)pkt, done_q, set_flags,
1635 - reset_flags);
1702 + ql_status_cont_entry(ha, rsp_q,
1703 + (sts_cont_entry_t *)pkt, done_q,
1704 + set_flags, reset_flags);
1636 1705 break;
1637 1706 case IP_TYPE:
1638 1707 case IP_A64_TYPE:
1639 1708 case IP_CMD_TYPE:
1640 - ql_ip_entry(ha, (ip_entry_t *)pkt, done_q,
1709 + ql_ip_entry(ha, rsp_q,
1710 + (ip_entry_t *)pkt, done_q,
1641 1711 set_flags, reset_flags);
1642 1712 break;
1643 1713 case IP_RECEIVE_TYPE:
1644 - ql_ip_rcv_entry(ha,
1645 - (ip_rcv_entry_t *)pkt, done_q, set_flags,
1646 - reset_flags);
1714 + ql_ip_rcv_entry(ha, rsp_q,
1715 + (ip_rcv_entry_t *)pkt, done_q,
1716 + set_flags, reset_flags);
1647 1717 break;
1648 1718 case IP_RECEIVE_CONT_TYPE:
1649 - ql_ip_rcv_cont_entry(ha,
1719 + ql_ip_rcv_cont_entry(ha, rsp_q,
1650 1720 (ip_rcv_cont_entry_t *)pkt, done_q,
1651 1721 set_flags, reset_flags);
1652 1722 break;
1653 1723 case IP_24XX_RECEIVE_TYPE:
1654 - ql_ip_24xx_rcv_entry(ha,
1724 + ql_ip_24xx_rcv_entry(ha, rsp_q,
1655 1725 (ip_rcv_24xx_entry_t *)pkt, done_q,
1656 1726 set_flags, reset_flags);
1657 1727 break;
1658 1728 case MS_TYPE:
1659 - ql_ms_entry(ha, (ms_entry_t *)pkt, done_q,
1729 + ql_ms_entry(ha, rsp_q,
1730 + (ms_entry_t *)pkt, done_q,
1660 1731 set_flags, reset_flags);
1661 1732 break;
1662 1733 case REPORT_ID_TYPE:
1663 - ql_report_id_entry(ha, (report_id_1_t *)pkt,
1664 - done_q, set_flags, reset_flags);
1734 + ql_report_id_entry(ha, rsp_q,
1735 + (report_id_acq_t *)pkt, done_q,
1736 + set_flags, reset_flags);
1665 1737 break;
1666 1738 case ELS_PASSTHRU_TYPE:
1667 - ql_els_passthru_entry(ha,
1668 - (els_passthru_entry_rsp_t *)pkt,
1669 - done_q, set_flags, reset_flags);
1739 + ql_els_passthru_entry(ha, rsp_q,
1740 + (els_passthru_entry_rsp_t *)pkt, done_q,
1741 + set_flags, reset_flags);
1670 1742 break;
1671 1743 case IP_BUF_POOL_TYPE:
1672 1744 case MARKER_TYPE:
1673 1745 case VP_MODIFY_TYPE:
1674 1746 case VP_CONTROL_TYPE:
1675 1747 break;
1676 1748 default:
1677 1749 EL(ha, "Unknown IOCB entry type=%xh\n",
1678 1750 pkt->entry_type);
1679 1751 break;
1680 1752 }
1681 1753 }
1682 1754 }
1683 1755
1684 1756 /* Inform RISC of processed responses. */
1685 - WRT16_IO_REG(ha, resp_out, ha->rsp_ring_index);
1686 1757
1758 + if (ha->flags & MULTI_QUEUE) {
1759 + WR32_MBAR_REG(ha, rsp_q->mbar_rsp_out, rsp_q->rsp_ring_index);
1760 + } else {
1761 + WRT16_IO_REG(ha, resp_out, rsp_q->rsp_ring_index);
1762 + }
1763 +
1764 + if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
1765 + != DDI_FM_OK) {
1766 + qlc_fm_report_err_impact(ha,
1767 + QL_FM_EREPORT_ACC_HANDLE_CHECK);
1768 + }
1769 +
1687 1770 /* RESET packet received delay for possible async event. */
1688 1771 if (status & BIT_0) {
1689 1772 drv_usecwait(500000);
1690 1773 }
1691 1774
1692 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1775 + QL_PRINT_3(ha, "done\n");
1693 1776 }
1694 1777
1695 1778 /*
1696 1779 * ql_error_entry
1697 1780 * Processes error entry.
1698 1781 *
1699 1782 * Input:
1700 - * ha = adapter state pointer.
1701 - * pkt = entry pointer.
1702 - * done_q = head pointer to done queue.
1703 - * set_flags = task daemon flags to set.
1704 - * reset_flags = task daemon flags to reset.
1783 + * ha: adapter state pointer.
1784 + * rsp_q: response queue structure pointer.
1785 + * pkt: entry pointer.
1786 + * done_q: head pointer to done queue.
1787 + * set_flags: task daemon flags to set.
1788 + * reset_flags: task daemon flags to reset.
1705 1789 *
1706 1790 * Context:
1707 1791 * Interrupt or Kernel context, no mailbox commands allowed.
1708 1792 */
1709 1793 /* ARGSUSED */
1710 1794 static void
1711 -ql_error_entry(ql_adapter_state_t *ha, response_t *pkt, ql_head_t *done_q,
1712 - uint32_t *set_flags, uint32_t *reset_flags)
1795 +ql_error_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q, response_t *pkt,
1796 + ql_head_t *done_q, uint64_t *set_flags, uint64_t *reset_flags)
1713 1797 {
1714 - ql_srb_t *sp;
1798 + ql_srb_t *sp = NULL;
1715 1799 uint32_t index, resp_identifier;
1716 1800
1717 - if (pkt->entry_type == INVALID_ENTRY_TYPE) {
1718 - EL(ha, "Aborted command\n");
1801 + if (pkt->entry_type == ABORTED_ENTRY_TYPE) {
1802 + resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle,
1803 + &pkt->handle);
1804 + index = resp_identifier & OSC_INDEX_MASK;
1805 + if (index < ha->osc_max_cnt) {
1806 + if (ha->outstanding_cmds[index] ==
1807 + QL_ABORTED_SRB(ha)) {
1808 + EL(ha, "Aborted command sp=QL_ABORTED_SRB, "
1809 + "handle=%xh\n", resp_identifier);
1810 + ha->outstanding_cmds[index] = NULL;
1811 + } else {
1812 + EL(ha, "Aborted command sp=%ph, handle=%xh\n",
1813 + (void *) ha->outstanding_cmds[index],
1814 + resp_identifier);
1815 + }
1816 + } else {
1817 + EL(ha, "Aborted command handle=%xh, out of range "
1818 + "index=%xh\n", resp_identifier, index);
1819 + }
1719 1820 return;
1720 1821 }
1721 1822
1722 - QL_PRINT_2(CE_CONT, "(%d): started, packet:\n", ha->instance);
1823 + QL_PRINT_2(ha, "started, packet:\n");
1723 1824 QL_DUMP_2((uint8_t *)pkt, 8, RESPONSE_ENTRY_SIZE);
1724 1825
1725 1826 if (pkt->entry_status & BIT_6) {
1726 1827 EL(ha, "Request Queue DMA error\n");
1727 1828 } else if (pkt->entry_status & BIT_5) {
1728 1829 EL(ha, "Invalid Entry Order\n");
1729 1830 } else if (pkt->entry_status & BIT_4) {
1730 1831 EL(ha, "Invalid Entry Count\n");
1731 1832 } else if (pkt->entry_status & BIT_3) {
1732 1833 EL(ha, "Invalid Entry Parameter\n");
1733 1834 } else if (pkt->entry_status & BIT_2) {
1734 1835 EL(ha, "Invalid Entry Type\n");
1735 1836 } else if (pkt->entry_status & BIT_1) {
1736 1837 EL(ha, "Busy\n");
1737 1838 } else {
1738 1839 EL(ha, "UNKNOWN flag = %xh error\n", pkt->entry_status);
1739 1840 }
1740 1841
1741 1842 /* Validate the response entry handle. */
1742 - resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1843 + resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle, &pkt->handle);
1743 1844 index = resp_identifier & OSC_INDEX_MASK;
1744 - if (index < MAX_OUTSTANDING_COMMANDS) {
1845 + if (index < ha->osc_max_cnt) {
1745 1846 /* the index seems reasonable */
1746 - sp = ha->outstanding_cmds[index];
1847 + if ((sp = ha->outstanding_cmds[index]) == NULL) {
1848 + sp = ql_verify_preprocessed_cmd(ha, rsp_q,
1849 + (uint32_t *)&pkt->handle,
1850 + (uint32_t *)&resp_identifier, set_flags,
1851 + reset_flags);
1852 + }
1747 1853 if (sp != NULL) {
1748 - if (sp->handle == resp_identifier) {
1854 + if (sp == QL_ABORTED_SRB(ha)) {
1855 + EL(ha, "QL_ABORTED_SRB handle=%xh\n",
1856 + resp_identifier);
1857 + sp = NULL;
1858 + ha->outstanding_cmds[index] = NULL;
1859 + } else if (sp->handle == resp_identifier) {
1749 1860 /* Neo, you're the one... */
1750 1861 ha->outstanding_cmds[index] = NULL;
1751 1862 sp->handle = 0;
1752 1863 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1753 1864 } else {
1754 1865 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1755 1866 resp_identifier, sp->handle);
1756 1867 sp = NULL;
1757 1868 ql_signal_abort(ha, set_flags);
1758 1869 }
1759 - } else {
1760 - sp = ql_verify_preprocessed_cmd(ha,
1761 - (uint32_t *)&pkt->handle, set_flags, reset_flags);
1762 1870 }
1763 1871 } else {
1764 1872 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1765 1873 index, resp_identifier);
1766 1874 ql_signal_abort(ha, set_flags);
1767 1875 }
1768 1876
1769 1877 if (sp != NULL) {
1770 1878 /* Bad payload or header */
1771 1879 if (pkt->entry_status & (BIT_5 + BIT_4 + BIT_3 + BIT_2)) {
1772 1880 /* Bad payload or header, set error status. */
1773 1881 sp->pkt->pkt_reason = CS_BAD_PAYLOAD;
1774 1882 } else if (pkt->entry_status & BIT_1) /* FULL flag */ {
1775 1883 sp->pkt->pkt_reason = CS_QUEUE_FULL;
1776 1884 } else {
1777 1885 /* Set error status. */
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
1778 1886 sp->pkt->pkt_reason = CS_UNKNOWN;
1779 1887 }
1780 1888
1781 1889 /* Set completed status. */
1782 1890 sp->flags |= SRB_ISP_COMPLETED;
1783 1891
1784 1892 /* Place command on done queue. */
1785 1893 ql_add_link_b(done_q, &sp->cmd);
1786 1894
1787 1895 }
1788 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1896 + QL_PRINT_3(ha, "done\n");
1789 1897 }
1790 1898
1791 1899 /*
1792 1900 * ql_status_entry
1793 1901 * Processes received ISP2200-2300 status entry.
1794 1902 *
1795 1903 * Input:
1796 1904 * ha: adapter state pointer.
1905 + * rsp_q: response queue structure pointer.
1797 1906 * pkt: entry pointer.
1798 1907 * done_q: done queue pointer.
1799 1908 * set_flags: task daemon flags to set.
1800 1909 * reset_flags: task daemon flags to reset.
1801 1910 *
1802 1911 * Returns:
1803 1912 * BIT_0 = CS_RESET status received.
1804 1913 *
1805 1914 * Context:
1806 1915 * Interrupt or Kernel context, no mailbox commands allowed.
1807 1916 */
1808 1917 /* ARGSUSED */
1809 1918 static int
1810 -ql_status_entry(ql_adapter_state_t *ha, sts_entry_t *pkt,
1811 - ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
1919 +ql_status_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
1920 + sts_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
1921 + uint64_t *reset_flags)
1812 1922 {
1813 - ql_srb_t *sp;
1923 + ql_srb_t *sp = NULL;
1814 1924 uint32_t index, resp_identifier;
1815 1925 uint16_t comp_status;
1816 1926 int rval = 0;
1817 1927
1818 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1928 + QL_PRINT_3(ha, "started\n");
1819 1929
1820 1930 /* Validate the response entry handle. */
1821 - resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1931 + resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle, &pkt->handle);
1822 1932 index = resp_identifier & OSC_INDEX_MASK;
1823 - if (index < MAX_OUTSTANDING_COMMANDS) {
1933 + if (index < ha->osc_max_cnt) {
1824 1934 /* the index seems reasonable */
1825 - sp = ha->outstanding_cmds[index];
1935 + if ((sp = ha->outstanding_cmds[index]) == NULL) {
1936 + sp = ql_verify_preprocessed_cmd(ha, rsp_q,
1937 + (uint32_t *)&pkt->handle,
1938 + (uint32_t *)&resp_identifier, set_flags,
1939 + reset_flags);
1940 + }
1826 1941 if (sp != NULL) {
1827 - if (sp->handle == resp_identifier) {
1942 + if (sp == QL_ABORTED_SRB(ha)) {
1943 + EL(ha, "QL_ABORTED_SRB handle=%xh\n",
1944 + resp_identifier);
1945 + sp = NULL;
1946 + ha->outstanding_cmds[index] = NULL;
1947 + } else if (sp->handle == resp_identifier) {
1828 1948 /* Neo, you're the one... */
1829 1949 ha->outstanding_cmds[index] = NULL;
1830 1950 sp->handle = 0;
1831 1951 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1832 1952 } else {
1833 1953 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1834 1954 resp_identifier, sp->handle);
1835 1955 sp = NULL;
1836 1956 ql_signal_abort(ha, set_flags);
1837 1957 }
1838 - } else {
1839 - sp = ql_verify_preprocessed_cmd(ha,
1840 - (uint32_t *)&pkt->handle, set_flags, reset_flags);
1841 1958 }
1842 1959 } else {
1843 1960 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1844 1961 index, resp_identifier);
1845 1962 ql_signal_abort(ha, set_flags);
1846 1963 }
1847 1964
1848 1965 if (sp != NULL) {
1849 - comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
1966 + comp_status = (uint16_t)ddi_get16(rsp_q->rsp_ring.acc_handle,
1850 1967 &pkt->comp_status);
1851 1968
1852 1969 /*
1853 1970 * We dont care about SCSI QFULLs.
1854 1971 */
1855 1972 if (comp_status == CS_QUEUE_FULL) {
1856 1973 EL(ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
1857 1974 sp->lun_queue->target_queue->d_id.b24,
1858 1975 sp->lun_queue->lun_no);
1859 1976 comp_status = CS_COMPLETE;
1860 1977 }
1861 1978
1862 1979 /*
1863 1980 * 2300 firmware marks completion status as data underrun
1864 1981 * for scsi qfulls. Make it transport complete.
1865 1982 */
1866 - if ((CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) &&
1867 - (comp_status == CS_DATA_UNDERRUN) &&
1868 - (pkt->scsi_status_l != 0)) {
1983 + if (CFG_IST(ha, CFG_CTRL_2363) &&
1984 + comp_status == CS_DATA_UNDERRUN &&
1985 + pkt->scsi_status_l != STATUS_GOOD) {
1869 1986 comp_status = CS_COMPLETE;
1870 1987 }
1871 1988
1872 1989 /*
1873 1990 * Workaround T3 issue where we do not get any data xferred
1874 1991 * but get back a good status.
1875 1992 */
1876 1993 if ((pkt->state_flags_h & SF_XFERRED_DATA) == 0 &&
1877 1994 comp_status == CS_COMPLETE &&
1878 - pkt->scsi_status_l == 0 &&
1995 + pkt->scsi_status_l == STATUS_GOOD &&
1879 1996 (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
1880 1997 pkt->residual_length == 0 &&
1881 1998 sp->fcp &&
1882 1999 sp->fcp->fcp_data_len != 0 &&
1883 2000 (pkt->state_flags_l & (SF_DATA_OUT | SF_DATA_IN)) ==
1884 2001 SF_DATA_OUT) {
1885 2002 comp_status = CS_ABORTED;
1886 2003 }
1887 2004
1888 2005 if (sp->flags & SRB_MS_PKT) {
1889 2006 /*
1890 2007 * Ideally it should never be true. But there
1891 2008 * is a bug in FW which upon receiving invalid
1892 2009 * parameters in MS IOCB returns it as
1893 2010 * status entry and not as ms entry type.
1894 2011 */
1895 - ql_ms_entry(ha, (ms_entry_t *)pkt, done_q,
2012 + ql_ms_entry(ha, rsp_q, (ms_entry_t *)pkt, done_q,
1896 2013 set_flags, reset_flags);
1897 - QL_PRINT_3(CE_CONT, "(%d): ql_ms_entry done\n",
1898 - ha->instance);
2014 + QL_PRINT_3(ha, "ql_ms_entry done\n");
1899 2015 return (0);
1900 2016 }
1901 2017
1902 2018 /*
1903 2019 * Fast path to good SCSI I/O completion
1904 2020 */
1905 - if ((comp_status == CS_COMPLETE) &
1906 - (!pkt->scsi_status_l) &
1907 - (!(pkt->scsi_status_h & FCP_RSP_MASK))) {
2021 + if (comp_status == CS_COMPLETE &&
2022 + pkt->scsi_status_l == STATUS_GOOD &&
2023 + (pkt->scsi_status_h & FCP_RSP_MASK) == 0) {
1908 2024 /* Set completed status. */
1909 2025 sp->flags |= SRB_ISP_COMPLETED;
1910 2026 sp->pkt->pkt_reason = comp_status;
1911 - ql_fast_fcp_post(sp);
1912 - QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n",
1913 - ha->instance);
2027 + ql_fast_fcp_post(sp, rsp_q);
2028 + QL_PRINT_3(ha, "ql_fast_fcp_post done\n");
1914 2029 return (0);
1915 2030 }
1916 - rval = ql_status_error(ha, sp, pkt, done_q, set_flags,
2031 + rval = ql_status_error(ha, rsp_q, sp, pkt, done_q, set_flags,
1917 2032 reset_flags);
1918 2033 }
1919 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2034 + QL_PRINT_3(ha, "done\n");
1920 2035
1921 2036 return (rval);
1922 2037 }
1923 2038
1924 2039 /*
1925 2040 * ql_24xx_status_entry
1926 2041 * Processes received ISP24xx status entry.
1927 2042 *
1928 2043 * Input:
1929 2044 * ha: adapter state pointer.
2045 + * rsp_q: response queue structure pointer.
1930 2046 * pkt: entry pointer.
1931 2047 * done_q: done queue pointer.
1932 2048 * set_flags: task daemon flags to set.
1933 2049 * reset_flags: task daemon flags to reset.
1934 2050 *
1935 2051 * Returns:
1936 2052 * BIT_0 = CS_RESET status received.
1937 2053 *
1938 2054 * Context:
1939 2055 * Interrupt or Kernel context, no mailbox commands allowed.
1940 2056 */
1941 2057 /* ARGSUSED */
1942 2058 static int
1943 -ql_24xx_status_entry(ql_adapter_state_t *ha, sts_24xx_entry_t *pkt,
1944 - ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2059 +ql_24xx_status_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
2060 + sts_24xx_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
2061 + uint64_t *reset_flags)
1945 2062 {
1946 2063 ql_srb_t *sp = NULL;
1947 2064 uint16_t comp_status;
1948 2065 uint32_t index, resp_identifier;
1949 2066 int rval = 0;
1950 2067
1951 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2068 + QL_PRINT_3(ha, "started\n");
1952 2069
1953 2070 /* Validate the response entry handle. */
1954 - resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
2071 + resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle, &pkt->handle);
1955 2072 index = resp_identifier & OSC_INDEX_MASK;
1956 - if (index < MAX_OUTSTANDING_COMMANDS) {
2073 + if (index < ha->osc_max_cnt) {
1957 2074 /* the index seems reasonable */
1958 - sp = ha->outstanding_cmds[index];
2075 + if ((sp = ha->outstanding_cmds[index]) == NULL) {
2076 + sp = ql_verify_preprocessed_cmd(ha, rsp_q,
2077 + (uint32_t *)&pkt->handle,
2078 + (uint32_t *)&resp_identifier, set_flags,
2079 + reset_flags);
2080 + }
1959 2081 if (sp != NULL) {
1960 - if (sp->handle == resp_identifier) {
2082 + if (sp == QL_ABORTED_SRB(ha)) {
2083 + EL(ha, "QL_ABORTED_SRB handle=%xh\n",
2084 + resp_identifier);
2085 + sp = NULL;
2086 + ha->outstanding_cmds[index] = NULL;
2087 + } else if (sp->handle == resp_identifier) {
1961 2088 /* Neo, you're the one... */
1962 2089 ha->outstanding_cmds[index] = NULL;
1963 2090 sp->handle = 0;
1964 2091 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1965 2092 } else {
1966 2093 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1967 2094 resp_identifier, sp->handle);
1968 2095 sp = NULL;
1969 2096 ql_signal_abort(ha, set_flags);
1970 2097 }
1971 - } else {
1972 - sp = ql_verify_preprocessed_cmd(ha,
1973 - (uint32_t *)&pkt->handle, set_flags, reset_flags);
1974 2098 }
1975 2099 } else {
1976 2100 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1977 2101 index, resp_identifier);
1978 2102 ql_signal_abort(ha, set_flags);
1979 2103 }
1980 2104
1981 2105 if (sp != NULL) {
1982 - comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2106 + comp_status = (uint16_t)ddi_get16(rsp_q->rsp_ring.acc_handle,
1983 2107 &pkt->comp_status);
1984 2108
1985 2109 /* We dont care about SCSI QFULLs. */
1986 2110 if (comp_status == CS_QUEUE_FULL) {
1987 2111 EL(sp->ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
1988 2112 sp->lun_queue->target_queue->d_id.b24,
1989 2113 sp->lun_queue->lun_no);
1990 2114 comp_status = CS_COMPLETE;
1991 2115 }
1992 2116
1993 2117 /*
1994 2118 * 2300 firmware marks completion status as data underrun
1995 2119 * for scsi qfulls. Make it transport complete.
1996 2120 */
1997 - if ((comp_status == CS_DATA_UNDERRUN) &&
1998 - (pkt->scsi_status_l != 0)) {
2121 + if (comp_status == CS_DATA_UNDERRUN &&
2122 + pkt->scsi_status_l != STATUS_GOOD) {
1999 2123 comp_status = CS_COMPLETE;
2000 2124 }
2001 2125
2002 2126 /*
2003 2127 * Workaround T3 issue where we do not get any data xferred
2004 2128 * but get back a good status.
2005 2129 */
2006 2130 if (comp_status == CS_COMPLETE &&
2007 - pkt->scsi_status_l == 0 &&
2131 + pkt->scsi_status_l == STATUS_GOOD &&
2008 2132 (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
2009 2133 pkt->residual_length != 0 &&
2010 2134 sp->fcp &&
2011 2135 sp->fcp->fcp_data_len != 0 &&
2012 2136 sp->fcp->fcp_cntl.cntl_write_data) {
2013 2137 comp_status = CS_ABORTED;
2014 2138 }
2015 2139
2016 2140 /*
2017 2141 * Fast path to good SCSI I/O completion
2018 2142 */
2019 - if ((comp_status == CS_COMPLETE) &
2020 - (!pkt->scsi_status_l) &
2021 - (!(pkt->scsi_status_h & FCP_RSP_MASK))) {
2143 + if (comp_status == CS_COMPLETE &&
2144 + pkt->scsi_status_l == STATUS_GOOD &&
2145 + (pkt->scsi_status_h & FCP_RSP_MASK) == 0) {
2022 2146 /* Set completed status. */
2023 2147 sp->flags |= SRB_ISP_COMPLETED;
2024 2148 sp->pkt->pkt_reason = comp_status;
2025 - ql_fast_fcp_post(sp);
2026 - QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n",
2027 - ha->instance);
2149 + ql_fast_fcp_post(sp, rsp_q);
2150 + QL_PRINT_3(ha, "ql_fast_fcp_post done\n");
2028 2151 return (0);
2029 2152 }
2030 - rval = ql_status_error(ha, sp, (sts_entry_t *)pkt, done_q,
2031 - set_flags, reset_flags);
2153 + rval = ql_status_error(ha, rsp_q, sp, (sts_entry_t *)pkt,
2154 + done_q, set_flags, reset_flags);
2032 2155 }
2033 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2156 + QL_PRINT_3(ha, "done\n");
2034 2157
2035 2158 return (rval);
2036 2159 }
2037 2160
2038 2161 /*
2039 2162 * ql_verify_preprocessed_cmd
2040 2163 * Handles preprocessed cmds..
2041 2164 *
2042 2165 * Input:
2043 - * ha: adapter state pointer.
2044 - * pkt_handle: handle pointer.
2045 - * set_flags: task daemon flags to set.
2046 - * reset_flags: task daemon flags to reset.
2166 + * ha: adapter state pointer.
2167 + * rsp_q: response queue structure pointer.
2168 + * pkt_handle: handle pointer.
2169 + * resp_identifier: resp_identifier pointer.
2170 + * set_flags: task daemon flags to set.
2171 + * reset_flags: task daemon flags to reset.
2047 2172 *
2048 2173 * Returns:
2049 2174 * srb pointer or NULL
2050 2175 *
2051 2176 * Context:
2052 2177 * Interrupt or Kernel context, no mailbox commands allowed.
2053 2178 */
2054 2179 /* ARGSUSED */
2055 2180 ql_srb_t *
2056 -ql_verify_preprocessed_cmd(ql_adapter_state_t *ha, uint32_t *pkt_handle,
2057 - uint32_t *set_flags, uint32_t *reset_flags)
2181 +ql_verify_preprocessed_cmd(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
2182 + uint32_t *pkt_handle, uint32_t *resp_identifier, uint64_t *set_flags,
2183 + uint64_t *reset_flags)
2058 2184 {
2059 2185 ql_srb_t *sp = NULL;
2060 - uint32_t index, resp_identifier;
2186 + uint32_t index;
2061 2187 uint32_t get_handle = 10;
2062 2188
2063 2189 while (get_handle) {
2064 2190 /* Get handle. */
2065 - resp_identifier = ddi_get32(ha->hba_buf.acc_handle, pkt_handle);
2066 - index = resp_identifier & OSC_INDEX_MASK;
2191 + *resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle,
2192 + pkt_handle);
2193 + index = *resp_identifier & OSC_INDEX_MASK;
2067 2194 /* Validate handle. */
2068 - if (index < MAX_OUTSTANDING_COMMANDS) {
2195 + if (index < ha->osc_max_cnt) {
2069 2196 sp = ha->outstanding_cmds[index];
2070 2197 }
2071 2198
2072 2199 if (sp != NULL) {
2073 2200 EL(ha, "sp=%xh, resp_id=%xh, get=%d, index=%xh\n", sp,
2074 - resp_identifier, get_handle, index);
2201 + *resp_identifier, get_handle, index);
2075 2202 break;
2076 2203 } else {
2077 2204 get_handle -= 1;
2078 2205 drv_usecwait(10000);
2079 - if (get_handle == 1) {
2206 + if (get_handle == 1 && rsp_q->rsp_ring.dma_handle) {
2080 2207 /* Last chance, Sync whole DMA buffer. */
2081 - (void) ddi_dma_sync(ha->hba_buf.dma_handle,
2082 - RESPONSE_Q_BUFFER_OFFSET,
2083 - RESPONSE_QUEUE_SIZE,
2084 - DDI_DMA_SYNC_FORKERNEL);
2208 + (void) ddi_dma_sync(rsp_q->rsp_ring.dma_handle,
2209 + 0, 0, DDI_DMA_SYNC_FORCPU);
2085 2210 EL(ha, "last chance DMA sync, index=%xh\n",
2086 2211 index);
2087 2212 }
2088 2213 }
2089 2214 }
2090 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2215 + QL_PRINT_3(ha, "done\n");
2091 2216
2092 2217 return (sp);
2093 2218 }
2094 2219
2095 2220
2096 2221 /*
2097 2222 * ql_status_error
2098 2223 * Processes received ISP status entry error.
2099 2224 *
2100 2225 * Input:
2101 2226 * ha: adapter state pointer.
2227 + * rsp_q: response queue structure pointer.
2102 2228 * sp: SRB pointer.
2103 2229 * pkt: entry pointer.
2104 2230 * done_q: done queue pointer.
2105 2231 * set_flags: task daemon flags to set.
2106 2232 * reset_flags: task daemon flags to reset.
2107 2233 *
2108 2234 * Returns:
2109 2235 * BIT_0 = CS_RESET status received.
2110 2236 *
2111 2237 * Context:
2112 2238 * Interrupt or Kernel context, no mailbox commands allowed.
2113 2239 */
2114 2240 /* ARGSUSED */
2115 2241 static int
2116 -ql_status_error(ql_adapter_state_t *ha, ql_srb_t *sp, sts_entry_t *pkt23,
2117 - ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2242 +ql_status_error(ql_adapter_state_t *ha, ql_response_q_t *rsp_q, ql_srb_t *sp,
2243 + sts_entry_t *pkt23, ql_head_t *done_q, uint64_t *set_flags,
2244 + uint64_t *reset_flags)
2118 2245 {
2119 2246 uint32_t sense_sz = 0;
2120 2247 uint32_t cnt;
2121 2248 ql_tgt_t *tq;
2122 2249 fcp_rsp_t *fcpr;
2123 2250 struct fcp_rsp_info *rsp;
2124 2251 int rval = 0;
2125 2252
2126 2253 struct {
2127 2254 uint8_t *rsp_info;
2128 2255 uint8_t *req_sense_data;
2129 2256 uint32_t residual_length;
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
2130 2257 uint32_t fcp_residual_length;
2131 2258 uint32_t rsp_info_length;
2132 2259 uint32_t req_sense_length;
2133 2260 uint16_t comp_status;
2134 2261 uint8_t state_flags_l;
2135 2262 uint8_t state_flags_h;
2136 2263 uint8_t scsi_status_l;
2137 2264 uint8_t scsi_status_h;
2138 2265 } sts;
2139 2266
2140 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2267 + QL_PRINT_3(ha, "started\n");
2141 2268
2142 - if (CFG_IST(ha, CFG_CTRL_24258081)) {
2269 + if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2143 2270 sts_24xx_entry_t *pkt24 = (sts_24xx_entry_t *)pkt23;
2144 2271
2145 2272 /* Setup status. */
2146 - sts.comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2147 - &pkt24->comp_status);
2273 + sts.comp_status = (uint16_t)ddi_get16(
2274 + rsp_q->rsp_ring.acc_handle, &pkt24->comp_status);
2148 2275 sts.scsi_status_l = pkt24->scsi_status_l;
2149 2276 sts.scsi_status_h = pkt24->scsi_status_h;
2150 2277
2151 2278 /* Setup firmware residuals. */
2152 2279 sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2153 - ddi_get32(ha->hba_buf.acc_handle,
2280 + ddi_get32(rsp_q->rsp_ring.acc_handle,
2154 2281 (uint32_t *)&pkt24->residual_length) : 0;
2155 2282
2156 2283 /* Setup FCP residuals. */
2157 2284 sts.fcp_residual_length = sts.scsi_status_h &
2158 2285 (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2159 - ddi_get32(ha->hba_buf.acc_handle,
2286 + ddi_get32(rsp_q->rsp_ring.acc_handle,
2160 2287 (uint32_t *)&pkt24->fcp_rsp_residual_count) : 0;
2161 2288
2162 2289 if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2163 2290 (sts.scsi_status_h & FCP_RESID_UNDER) &&
2164 2291 (sts.residual_length != pkt24->fcp_rsp_residual_count)) {
2165 2292
2166 2293 EL(sp->ha, "mismatch resid's: fw=%xh, pkt=%xh\n",
2167 2294 sts.residual_length,
2168 2295 pkt24->fcp_rsp_residual_count);
2169 2296 sts.scsi_status_h = (uint8_t)
2170 2297 (sts.scsi_status_h & ~FCP_RESID_UNDER);
2171 2298 }
2172 2299
2173 2300 /* Setup state flags. */
2174 2301 sts.state_flags_l = pkt24->state_flags_l;
2175 2302 sts.state_flags_h = pkt24->state_flags_h;
2176 2303
2177 2304 if (sp->fcp->fcp_data_len &&
2178 2305 (sts.comp_status != CS_DATA_UNDERRUN ||
2179 2306 sts.residual_length != sp->fcp->fcp_data_len)) {
2180 2307 sts.state_flags_h = (uint8_t)
2181 2308 (sts.state_flags_h | SF_GOT_BUS |
2182 2309 SF_GOT_TARGET | SF_SENT_CMD |
2183 2310 SF_XFERRED_DATA | SF_GOT_STATUS);
2184 2311 } else {
2185 2312 sts.state_flags_h = (uint8_t)
2186 2313 (sts.state_flags_h | SF_GOT_BUS |
2187 2314 SF_GOT_TARGET | SF_SENT_CMD |
2188 2315 SF_GOT_STATUS);
2189 2316 }
2190 2317 if (sp->fcp->fcp_cntl.cntl_write_data) {
2191 2318 sts.state_flags_l = (uint8_t)
2192 2319 (sts.state_flags_l | SF_DATA_OUT);
2193 2320 } else if (sp->fcp->fcp_cntl.cntl_read_data) {
2194 2321 sts.state_flags_l = (uint8_t)
2195 2322 (sts.state_flags_l | SF_DATA_IN);
2196 2323 }
2197 2324 if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) {
2198 2325 sts.state_flags_l = (uint8_t)
2199 2326 (sts.state_flags_l | SF_HEAD_OF_Q);
2200 2327 } else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) {
|
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
2201 2328 sts.state_flags_l = (uint8_t)
2202 2329 (sts.state_flags_l | SF_ORDERED_Q);
2203 2330 } else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) {
2204 2331 sts.state_flags_l = (uint8_t)
2205 2332 (sts.state_flags_l | SF_SIMPLE_Q);
2206 2333 }
2207 2334
2208 2335 /* Setup FCP response info. */
2209 2336 sts.rsp_info = &pkt24->rsp_sense_data[0];
2210 2337 if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2211 - sts.rsp_info_length = ddi_get32(ha->hba_buf.acc_handle,
2338 + sts.rsp_info_length = ddi_get32(
2339 + rsp_q->rsp_ring.acc_handle,
2212 2340 (uint32_t *)&pkt24->fcp_rsp_data_length);
2213 2341 if (sts.rsp_info_length >
2214 2342 sizeof (struct fcp_rsp_info)) {
2215 2343 sts.rsp_info_length =
2216 2344 sizeof (struct fcp_rsp_info);
2217 2345 }
2218 2346 for (cnt = 0; cnt < sts.rsp_info_length; cnt += 4) {
2219 2347 ql_chg_endian(sts.rsp_info + cnt, 4);
2220 2348 }
2221 2349 } else {
2222 2350 sts.rsp_info_length = 0;
2223 2351 }
2224 2352
2225 2353 /* Setup sense data. */
2226 2354 sts.req_sense_data =
2227 2355 &pkt24->rsp_sense_data[sts.rsp_info_length];
2228 2356 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2229 2357 sts.req_sense_length =
2230 - ddi_get32(ha->hba_buf.acc_handle,
2358 + ddi_get32(rsp_q->rsp_ring.acc_handle,
2231 2359 (uint32_t *)&pkt24->fcp_sense_length);
2232 2360 sts.state_flags_h = (uint8_t)
2233 2361 (sts.state_flags_h | SF_ARQ_DONE);
2234 2362 sense_sz = (uint32_t)
2235 2363 (((uintptr_t)pkt24 + sizeof (sts_24xx_entry_t)) -
2236 2364 (uintptr_t)sts.req_sense_data);
2237 2365 for (cnt = 0; cnt < sense_sz; cnt += 4) {
2238 2366 ql_chg_endian(sts.req_sense_data + cnt, 4);
2239 2367 }
2240 2368 } else {
2241 2369 sts.req_sense_length = 0;
2242 2370 }
2243 2371 } else {
2244 2372 /* Setup status. */
2245 2373 sts.comp_status = (uint16_t)ddi_get16(
2246 - ha->hba_buf.acc_handle, &pkt23->comp_status);
2374 + rsp_q->rsp_ring.acc_handle, &pkt23->comp_status);
2247 2375 sts.scsi_status_l = pkt23->scsi_status_l;
2248 2376 sts.scsi_status_h = pkt23->scsi_status_h;
2249 2377
2250 2378 /* Setup firmware residuals. */
2251 2379 sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2252 - ddi_get32(ha->hba_buf.acc_handle,
2380 + ddi_get32(rsp_q->rsp_ring.acc_handle,
2253 2381 (uint32_t *)&pkt23->residual_length) : 0;
2254 2382
2255 2383 /* Setup FCP residuals. */
2256 2384 sts.fcp_residual_length = sts.scsi_status_h &
2257 2385 (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2258 2386 sts.residual_length : 0;
2259 2387
2260 2388 /* Setup state flags. */
2261 2389 sts.state_flags_l = pkt23->state_flags_l;
2262 2390 sts.state_flags_h = pkt23->state_flags_h;
2263 2391
2264 2392 /* Setup FCP response info. */
2265 2393 sts.rsp_info = &pkt23->rsp_info[0];
2266 2394 if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2267 2395 sts.rsp_info_length = ddi_get16(
2268 - ha->hba_buf.acc_handle,
2396 + rsp_q->rsp_ring.acc_handle,
2269 2397 (uint16_t *)&pkt23->rsp_info_length);
2270 2398 if (sts.rsp_info_length >
2271 2399 sizeof (struct fcp_rsp_info)) {
2272 2400 sts.rsp_info_length =
2273 2401 sizeof (struct fcp_rsp_info);
2274 2402 }
2275 2403 } else {
2276 2404 sts.rsp_info_length = 0;
2277 2405 }
2278 2406
2279 2407 /* Setup sense data. */
2280 2408 sts.req_sense_data = &pkt23->req_sense_data[0];
2281 2409 sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2282 - ddi_get16(ha->hba_buf.acc_handle,
2410 + ddi_get16(rsp_q->rsp_ring.acc_handle,
2283 2411 (uint16_t *)&pkt23->req_sense_length) : 0;
2284 2412 }
2285 2413
2286 2414 bzero(sp->pkt->pkt_resp, sp->pkt->pkt_rsplen);
2287 2415
2288 2416 fcpr = (fcp_rsp_t *)sp->pkt->pkt_resp;
2289 2417 rsp = (struct fcp_rsp_info *)(sp->pkt->pkt_resp +
2290 2418 sizeof (fcp_rsp_t));
2291 2419
2292 2420 tq = sp->lun_queue->target_queue;
2293 2421
2294 2422 fcpr->fcp_u.fcp_status.scsi_status = sts.scsi_status_l;
2295 2423 if (sts.scsi_status_h & FCP_RSP_LEN_VALID) {
2296 2424 fcpr->fcp_u.fcp_status.rsp_len_set = 1;
2297 2425 }
2298 2426 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2299 2427 fcpr->fcp_u.fcp_status.sense_len_set = 1;
2300 2428 }
2301 2429 if (sts.scsi_status_h & FCP_RESID_OVER) {
2302 2430 fcpr->fcp_u.fcp_status.resid_over = 1;
2303 2431 }
2304 2432 if (sts.scsi_status_h & FCP_RESID_UNDER) {
2305 2433 fcpr->fcp_u.fcp_status.resid_under = 1;
2306 2434 }
2307 2435 fcpr->fcp_u.fcp_status.reserved_1 = 0;
2308 2436
2309 2437 /* Set ISP completion status */
2310 2438 sp->pkt->pkt_reason = sts.comp_status;
2311 2439
2312 2440 /* Update statistics. */
|
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
2313 2441 if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) &&
2314 2442 (sp->pkt->pkt_rsplen > sizeof (fcp_rsp_t))) {
2315 2443
2316 2444 sense_sz = sp->pkt->pkt_rsplen - (uint32_t)sizeof (fcp_rsp_t);
2317 2445 if (sense_sz > sts.rsp_info_length) {
2318 2446 sense_sz = sts.rsp_info_length;
2319 2447 }
2320 2448
2321 2449 /* copy response information data. */
2322 2450 if (sense_sz) {
2323 - ddi_rep_get8(ha->hba_buf.acc_handle, (uint8_t *)rsp,
2324 - sts.rsp_info, sense_sz, DDI_DEV_AUTOINCR);
2451 + ddi_rep_get8(rsp_q->rsp_ring.acc_handle,
2452 + (uint8_t *)rsp, sts.rsp_info, sense_sz,
2453 + DDI_DEV_AUTOINCR);
2325 2454 }
2326 2455 fcpr->fcp_response_len = sense_sz;
2327 2456
2328 2457 rsp = (struct fcp_rsp_info *)((caddr_t)rsp +
2329 2458 fcpr->fcp_response_len);
2330 2459
2331 2460 switch (*(sts.rsp_info + 3)) {
2332 2461 case FCP_NO_FAILURE:
2333 2462 break;
2334 2463 case FCP_DL_LEN_MISMATCH:
2335 2464 ha->adapter_stats->d_stats[lobyte(
2336 2465 tq->loop_id)].dl_len_mismatches++;
2337 2466 break;
2338 2467 case FCP_CMND_INVALID:
2339 2468 break;
2340 2469 case FCP_DATA_RO_MISMATCH:
2341 2470 ha->adapter_stats->d_stats[lobyte(
2342 2471 tq->loop_id)].data_ro_mismatches++;
2343 2472 break;
2344 2473 case FCP_TASK_MGMT_NOT_SUPPTD:
2345 2474 break;
2346 2475 case FCP_TASK_MGMT_FAILED:
2347 2476 ha->adapter_stats->d_stats[lobyte(
2348 2477 tq->loop_id)].task_mgmt_failures++;
2349 2478 break;
2350 2479 default:
2351 2480 break;
2352 2481 }
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
2353 2482 } else {
2354 2483 /*
2355 2484 * EL(sp->ha, "scsi_h=%xh, pkt_rsplen=%xh\n",
2356 2485 * sts.scsi_status_h, sp->pkt->pkt_rsplen);
2357 2486 */
2358 2487 fcpr->fcp_response_len = 0;
2359 2488 }
2360 2489
2361 2490 /* Set reset status received. */
2362 2491 if (sts.comp_status == CS_RESET && LOOP_READY(ha)) {
2492 + *set_flags |= MARKER_NEEDED;
2363 2493 rval |= BIT_0;
2364 2494 }
2365 2495
2366 2496 if (!(tq->flags & TQF_TAPE_DEVICE) &&
2367 2497 (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) ||
2368 2498 ha->loop_down_abort_time < LOOP_DOWN_TIMER_START) &&
2369 2499 ha->task_daemon_flags & LOOP_DOWN) {
2370 2500 EL(sp->ha, "Loop Not Ready Retry, d_id=%xh, lun=%xh\n",
2371 2501 tq->d_id.b24, sp->lun_queue->lun_no);
2372 2502
2373 2503 /* Set retry status. */
2374 2504 sp->flags |= SRB_RETRY;
2375 2505 } else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2376 2506 tq->port_down_retry_count != 0 &&
2377 2507 (sts.comp_status == CS_INCOMPLETE ||
2378 2508 sts.comp_status == CS_PORT_UNAVAILABLE ||
2379 2509 sts.comp_status == CS_PORT_LOGGED_OUT ||
2380 2510 sts.comp_status == CS_PORT_CONFIG_CHG ||
2381 2511 sts.comp_status == CS_PORT_BUSY)) {
2382 2512 EL(sp->ha, "Port Down Retry=%xh, d_id=%xh, lun=%xh, count=%d"
2383 2513 "\n", sts.comp_status, tq->d_id.b24, sp->lun_queue->lun_no,
2384 2514 tq->port_down_retry_count);
2385 2515
2386 2516 /* Set retry status. */
2387 2517 sp->flags |= SRB_RETRY;
2388 2518
2389 2519 if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2390 2520 /* Acquire device queue lock. */
2391 2521 DEVICE_QUEUE_LOCK(tq);
2392 2522
2393 2523 tq->flags |= TQF_QUEUE_SUSPENDED;
2394 2524
2395 2525 /* Decrement port down count. */
2396 2526 if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
2397 2527 tq->port_down_retry_count--;
2398 2528 }
2399 2529
2400 2530 DEVICE_QUEUE_UNLOCK(tq);
2401 2531
2402 2532 if ((ha->task_daemon_flags & ABORT_ISP_ACTIVE)
2403 2533 == 0 &&
2404 2534 (sts.comp_status == CS_PORT_LOGGED_OUT ||
2405 2535 sts.comp_status == CS_PORT_UNAVAILABLE)) {
2406 2536 sp->ha->adapter_stats->d_stats[lobyte(
2407 2537 tq->loop_id)].logouts_recvd++;
2408 2538 ql_send_logo(sp->ha, tq, done_q);
2409 2539 }
2410 2540
2411 2541 ADAPTER_STATE_LOCK(ha);
2412 2542 if (ha->port_retry_timer == 0) {
2413 2543 if ((ha->port_retry_timer =
2414 2544 ha->port_down_retry_delay) == 0) {
2415 2545 *set_flags |=
2416 2546 PORT_RETRY_NEEDED;
2417 2547 }
2418 2548 }
2419 2549 ADAPTER_STATE_UNLOCK(ha);
2420 2550 }
2421 2551 } else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2422 2552 (sts.comp_status == CS_RESET ||
2423 2553 (sts.comp_status == CS_QUEUE_FULL && tq->qfull_retry_count != 0) ||
2424 2554 (sts.comp_status == CS_ABORTED && !(sp->flags & SRB_ABORTING)))) {
2425 2555 if (sts.comp_status == CS_RESET) {
2426 2556 EL(sp->ha, "Reset Retry, d_id=%xh, lun=%xh\n",
2427 2557 tq->d_id.b24, sp->lun_queue->lun_no);
2428 2558 } else if (sts.comp_status == CS_QUEUE_FULL) {
2429 2559 EL(sp->ha, "Queue Full Retry, d_id=%xh, lun=%xh, "
2430 2560 "cnt=%d\n", tq->d_id.b24, sp->lun_queue->lun_no,
2431 2561 tq->qfull_retry_count);
2432 2562 if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2433 2563 tq->flags |= TQF_QUEUE_SUSPENDED;
2434 2564
2435 2565 tq->qfull_retry_count--;
2436 2566
2437 2567 ADAPTER_STATE_LOCK(ha);
2438 2568 if (ha->port_retry_timer == 0) {
2439 2569 if ((ha->port_retry_timer =
2440 2570 ha->qfull_retry_delay) ==
|
↓ open down ↓ |
68 lines elided |
↑ open up ↑ |
2441 2571 0) {
2442 2572 *set_flags |=
2443 2573 PORT_RETRY_NEEDED;
2444 2574 }
2445 2575 }
2446 2576 ADAPTER_STATE_UNLOCK(ha);
2447 2577 }
2448 2578 } else {
2449 2579 EL(sp->ha, "Abort Retry, d_id=%xh, lun=%xh\n",
2450 2580 tq->d_id.b24, sp->lun_queue->lun_no);
2581 +
2582 + if (CFG_IST(ha, CFG_ISP_FW_TYPE_2) && LOOP_READY(ha)) {
2583 + *set_flags |= MARKER_NEEDED;
2584 + rval |= BIT_0;
2585 + }
2451 2586 }
2452 2587
2453 2588 /* Set retry status. */
2454 2589 sp->flags |= SRB_RETRY;
2455 2590 } else {
2456 2591 fcpr->fcp_resid =
2457 2592 sts.fcp_residual_length > sp->fcp->fcp_data_len ?
2458 2593 sp->fcp->fcp_data_len : sts.fcp_residual_length;
2459 2594
2460 2595 if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2461 2596 (sts.scsi_status_h & FCP_RESID_UNDER) == 0) {
2462 2597
2463 2598 if (sts.scsi_status_l == STATUS_CHECK) {
2464 2599 sp->pkt->pkt_reason = CS_COMPLETE;
2465 2600 } else {
2466 2601 EL(ha, "transport error - "
2467 2602 "underrun & invalid resid\n");
2468 2603 EL(ha, "ssh=%xh, ssl=%xh\n",
2469 2604 sts.scsi_status_h, sts.scsi_status_l);
2470 2605 sp->pkt->pkt_reason = CS_FCP_RESPONSE_ERROR;
2471 2606 }
2472 2607 }
2473 2608
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
2474 2609 /* Ignore firmware underrun error. */
2475 2610 if (sts.comp_status == CS_DATA_UNDERRUN &&
2476 2611 (sts.scsi_status_h & FCP_RESID_UNDER ||
2477 2612 (sts.scsi_status_l != STATUS_CHECK &&
2478 2613 sts.scsi_status_l != STATUS_GOOD))) {
2479 2614 sp->pkt->pkt_reason = CS_COMPLETE;
2480 2615 }
2481 2616
2482 2617 if (sp->pkt->pkt_reason != CS_COMPLETE) {
2483 2618 ha->xioctl->DeviceErrorCount++;
2484 - EL(sp->ha, "Cmplt status err = %xh, d_id=%xh, lun=%xh"
2485 - "\n", sts.comp_status, tq->d_id.b24,
2486 - sp->lun_queue->lun_no);
2619 + EL(sp->ha, "Cmplt status err = %xh, d_id=%xh, lun=%xh,"
2620 + " pkt_reason=%xh, spf=%xh, sp=%ph\n",
2621 + sts.comp_status, tq->d_id.b24,
2622 + sp->lun_queue->lun_no, sp->pkt->pkt_reason,
2623 + sp->flags, sp);
2487 2624 }
2488 2625
2489 2626 /* Set target request sense data. */
2490 2627 if (sts.scsi_status_l == STATUS_CHECK) {
2491 2628 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2492 2629
2493 2630 if (sp->pkt->pkt_reason == CS_COMPLETE &&
2494 2631 sts.req_sense_data[2] != KEY_NO_SENSE &&
2495 2632 sts.req_sense_data[2] !=
2496 2633 KEY_UNIT_ATTENTION) {
2497 2634 ha->xioctl->DeviceErrorCount++;
2498 2635 }
2499 2636
2500 2637 sense_sz = sts.req_sense_length;
2501 2638
2502 2639 /* Insure data does not exceed buf. */
2503 2640 if (sp->pkt->pkt_rsplen <=
2504 2641 (uint32_t)sizeof (fcp_rsp_t) +
2505 2642 fcpr->fcp_response_len) {
2506 2643 sp->request_sense_length = 0;
2507 2644 } else {
2508 2645 sp->request_sense_length = (uint32_t)
2509 2646 (sp->pkt->pkt_rsplen -
2510 2647 sizeof (fcp_rsp_t) -
2511 2648 fcpr->fcp_response_len);
2512 2649 }
2513 2650
2514 2651 if (sense_sz <
2515 2652 sp->request_sense_length) {
2516 2653 sp->request_sense_length =
2517 2654 sense_sz;
2518 2655 }
2519 2656
2520 2657 sp->request_sense_ptr = (caddr_t)rsp;
2521 2658
2522 2659 sense_sz = (uint32_t)
2523 2660 (((uintptr_t)pkt23 +
2524 2661 sizeof (sts_entry_t)) -
|
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
2525 2662 (uintptr_t)sts.req_sense_data);
2526 2663 if (sp->request_sense_length <
2527 2664 sense_sz) {
2528 2665 sense_sz =
2529 2666 sp->request_sense_length;
2530 2667 }
2531 2668
2532 2669 fcpr->fcp_sense_len = sense_sz;
2533 2670
2534 2671 /* Move sense data. */
2535 - ddi_rep_get8(ha->hba_buf.acc_handle,
2672 + ddi_rep_get8(rsp_q->rsp_ring.acc_handle,
2536 2673 (uint8_t *)sp->request_sense_ptr,
2537 2674 sts.req_sense_data,
2538 2675 (size_t)sense_sz,
2539 2676 DDI_DEV_AUTOINCR);
2540 2677
2541 2678 sp->request_sense_ptr += sense_sz;
2542 2679 sp->request_sense_length -= sense_sz;
2543 2680 if (sp->request_sense_length != 0 &&
2544 - !(CFG_IST(ha, CFG_CTRL_8021))) {
2545 - ha->status_srb = sp;
2681 + !(CFG_IST(ha, CFG_CTRL_82XX))) {
2682 + rsp_q->status_srb = sp;
2546 2683 }
2547 2684 }
2548 2685
2549 2686 if (sense_sz != 0) {
2550 2687 EL(sp->ha, "check condition sense data, "
2551 2688 "d_id=%xh, lun=%xh\n%2xh%3xh%3xh%3xh"
2552 2689 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
2553 2690 "%3xh%3xh%3xh%3xh%3xh\n", tq->d_id.b24,
2554 2691 sp->lun_queue->lun_no,
2555 2692 sts.req_sense_data[0],
2556 2693 sts.req_sense_data[1],
2557 2694 sts.req_sense_data[2],
2558 2695 sts.req_sense_data[3],
2559 2696 sts.req_sense_data[4],
2560 2697 sts.req_sense_data[5],
2561 2698 sts.req_sense_data[6],
2562 2699 sts.req_sense_data[7],
2563 2700 sts.req_sense_data[8],
2564 2701 sts.req_sense_data[9],
2565 2702 sts.req_sense_data[10],
2566 2703 sts.req_sense_data[11],
2567 2704 sts.req_sense_data[12],
2568 2705 sts.req_sense_data[13],
2569 2706 sts.req_sense_data[14],
2570 2707 sts.req_sense_data[15],
2571 2708 sts.req_sense_data[16],
2572 2709 sts.req_sense_data[17]);
2573 2710 } else {
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
2574 2711 EL(sp->ha, "check condition, d_id=%xh, lun=%xh"
2575 2712 "\n", tq->d_id.b24, sp->lun_queue->lun_no);
2576 2713 }
2577 2714 }
2578 2715 }
2579 2716
2580 2717 /* Set completed status. */
2581 2718 sp->flags |= SRB_ISP_COMPLETED;
2582 2719
2583 2720 /* Place command on done queue. */
2584 - if (ha->status_srb == NULL) {
2721 + if (rsp_q->status_srb == NULL) {
2585 2722 ql_add_link_b(done_q, &sp->cmd);
2586 2723 }
2587 2724
2588 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2725 + QL_PRINT_3(ha, "done\n");
2589 2726
2590 2727 return (rval);
2591 2728 }
2592 2729
2593 2730 /*
2594 2731 * ql_status_cont_entry
2595 2732 * Processes status continuation entry.
2596 2733 *
2597 2734 * Input:
2598 2735 * ha: adapter state pointer.
2736 + * rsp_q: response queue structure pointer.
2599 2737 * pkt: entry pointer.
2600 2738 * done_q: done queue pointer.
2601 2739 * set_flags: task daemon flags to set.
2602 2740 * reset_flags: task daemon flags to reset.
2603 2741 *
2604 2742 * Context:
2605 2743 * Interrupt or Kernel context, no mailbox commands allowed.
2606 2744 */
2607 2745 /* ARGSUSED */
2608 2746 static void
2609 -ql_status_cont_entry(ql_adapter_state_t *ha, sts_cont_entry_t *pkt,
2610 - ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2747 +ql_status_cont_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
2748 + sts_cont_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
2749 + uint64_t *reset_flags)
2611 2750 {
2612 2751 uint32_t sense_sz, index;
2613 - ql_srb_t *sp = ha->status_srb;
2752 + ql_srb_t *sp = rsp_q->status_srb;
2614 2753
2615 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2754 + QL_PRINT_3(ha, "started\n");
2616 2755
2617 2756 if (sp != NULL && sp->request_sense_length) {
2618 2757 if (sp->request_sense_length > sizeof (pkt->req_sense_data)) {
2619 2758 sense_sz = sizeof (pkt->req_sense_data);
2620 2759 } else {
2621 2760 sense_sz = sp->request_sense_length;
2622 2761 }
2623 2762
2624 - if (CFG_IST(ha, CFG_CTRL_24258081)) {
2763 + if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2625 2764 for (index = 0; index < sense_sz; index += 4) {
2626 2765 ql_chg_endian((uint8_t *)
2627 2766 &pkt->req_sense_data[0] + index, 4);
2628 2767 }
2629 2768 }
2630 2769
2631 2770 /* Move sense data. */
2632 - ddi_rep_get8(ha->hba_buf.acc_handle,
2771 + ddi_rep_get8(rsp_q->rsp_ring.acc_handle,
2633 2772 (uint8_t *)sp->request_sense_ptr,
2634 2773 (uint8_t *)&pkt->req_sense_data[0], (size_t)sense_sz,
2635 2774 DDI_DEV_AUTOINCR);
2636 2775
2637 2776 sp->request_sense_ptr += sense_sz;
2638 2777 sp->request_sense_length -= sense_sz;
2639 2778
2640 2779 /* Place command on done queue. */
2641 2780 if (sp->request_sense_length == 0) {
2642 2781 ql_add_link_b(done_q, &sp->cmd);
2643 - ha->status_srb = NULL;
2782 + rsp_q->status_srb = NULL;
2644 2783 }
2645 2784 }
2646 2785
2647 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2786 + QL_PRINT_3(ha, "done\n");
2648 2787 }
2649 2788
2650 2789 /*
2651 2790 * ql_ip_entry
2652 2791 * Processes received ISP IP entry.
2653 2792 *
2654 2793 * Input:
2655 2794 * ha: adapter state pointer.
2795 + * rsp_q: response queue structure pointer.
2656 2796 * pkt: entry pointer.
2657 2797 * done_q: done queue pointer.
2658 2798 * set_flags: task daemon flags to set.
2659 2799 * reset_flags: task daemon flags to reset.
2660 2800 *
2661 2801 * Context:
2662 2802 * Interrupt or Kernel context, no mailbox commands allowed.
2663 2803 */
2664 2804 /* ARGSUSED */
2665 2805 static void
2666 -ql_ip_entry(ql_adapter_state_t *ha, ip_entry_t *pkt23, ql_head_t *done_q,
2667 - uint32_t *set_flags, uint32_t *reset_flags)
2806 +ql_ip_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q, ip_entry_t *pkt23,
2807 + ql_head_t *done_q, uint64_t *set_flags, uint64_t *reset_flags)
2668 2808 {
2669 - ql_srb_t *sp;
2809 + ql_srb_t *sp = NULL;
2670 2810 uint32_t index, resp_identifier;
2671 2811 ql_tgt_t *tq;
2672 2812
2673 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2813 + QL_PRINT_3(ha, "started\n");
2674 2814
2675 2815 /* Validate the response entry handle. */
2676 - resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle);
2816 + resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle,
2817 + &pkt23->handle);
2677 2818 index = resp_identifier & OSC_INDEX_MASK;
2678 - if (index < MAX_OUTSTANDING_COMMANDS) {
2819 + if (index < ha->osc_max_cnt) {
2679 2820 /* the index seems reasonable */
2680 - sp = ha->outstanding_cmds[index];
2821 + if ((sp = ha->outstanding_cmds[index]) == NULL) {
2822 + sp = ql_verify_preprocessed_cmd(ha, rsp_q,
2823 + (uint32_t *)&pkt23->handle,
2824 + (uint32_t *)&resp_identifier, set_flags,
2825 + reset_flags);
2826 + }
2681 2827 if (sp != NULL) {
2682 - if (sp->handle == resp_identifier) {
2828 + if (sp == QL_ABORTED_SRB(ha)) {
2829 + EL(ha, "QL_ABORTED_SRB handle=%xh\n",
2830 + resp_identifier);
2831 + sp = NULL;
2832 + ha->outstanding_cmds[index] = NULL;
2833 + } else if (sp->handle == resp_identifier) {
2683 2834 /* Neo, you're the one... */
2684 2835 ha->outstanding_cmds[index] = NULL;
2685 2836 sp->handle = 0;
2686 2837 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2687 2838 } else {
2688 2839 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
2689 2840 resp_identifier, sp->handle);
2690 2841 sp = NULL;
2691 2842 ql_signal_abort(ha, set_flags);
2692 2843 }
2693 - } else {
2694 - sp = ql_verify_preprocessed_cmd(ha,
2695 - (uint32_t *)&pkt23->handle, set_flags, reset_flags);
2696 2844 }
2697 2845 } else {
2698 2846 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
2699 2847 index, resp_identifier);
2700 2848 ql_signal_abort(ha, set_flags);
2701 2849 }
2702 2850
2703 2851 if (sp != NULL) {
2704 2852 tq = sp->lun_queue->target_queue;
2705 2853
2706 2854 /* Set ISP completion status */
2707 - if (CFG_IST(ha, CFG_CTRL_24258081)) {
2855 + if (CFG_IST(ha, CFG_CTRL_24XX)) {
2708 2856 ip_cmd_entry_t *pkt24 = (ip_cmd_entry_t *)pkt23;
2709 2857
2710 2858 sp->pkt->pkt_reason = ddi_get16(
2711 - ha->hba_buf.acc_handle, &pkt24->hdl_status);
2859 + rsp_q->rsp_ring.acc_handle, &pkt24->hdl_status);
2712 2860 } else {
2713 2861 sp->pkt->pkt_reason = ddi_get16(
2714 - ha->hba_buf.acc_handle, &pkt23->comp_status);
2862 + rsp_q->rsp_ring.acc_handle, &pkt23->comp_status);
2715 2863 }
2716 2864
2717 2865 if (ha->task_daemon_flags & LOOP_DOWN) {
2718 2866 EL(ha, "Loop Not Ready Retry, d_id=%xh\n",
2719 2867 tq->d_id.b24);
2720 2868
2721 2869 /* Set retry status. */
2722 2870 sp->flags |= SRB_RETRY;
2723 2871
2724 2872 } else if (tq->port_down_retry_count &&
2725 2873 (sp->pkt->pkt_reason == CS_INCOMPLETE ||
2726 2874 sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE ||
2727 2875 sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
2728 2876 sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
2729 2877 sp->pkt->pkt_reason == CS_PORT_BUSY)) {
2730 2878 EL(ha, "Port Down Retry=%xh, d_id=%xh, count=%d\n",
2731 2879 sp->pkt->pkt_reason, tq->d_id.b24,
2732 2880 tq->port_down_retry_count);
2733 2881
2734 2882 /* Set retry status. */
2735 2883 sp->flags |= SRB_RETRY;
2736 2884
2737 2885 if (sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
2738 2886 sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE) {
2739 2887 ha->adapter_stats->d_stats[lobyte(
2740 2888 tq->loop_id)].logouts_recvd++;
2741 2889 ql_send_logo(ha, tq, done_q);
2742 2890 }
2743 2891
2744 2892 /* Acquire device queue lock. */
2745 2893 DEVICE_QUEUE_LOCK(tq);
2746 2894
2747 2895 if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2748 2896 tq->flags |= TQF_QUEUE_SUSPENDED;
2749 2897
2750 2898 tq->port_down_retry_count--;
2751 2899
2752 2900 ADAPTER_STATE_LOCK(ha);
2753 2901 if (ha->port_retry_timer == 0) {
2754 2902 if ((ha->port_retry_timer =
2755 2903 ha->port_down_retry_delay) == 0) {
2756 2904 *set_flags |=
2757 2905 PORT_RETRY_NEEDED;
2758 2906 }
2759 2907 }
2760 2908 ADAPTER_STATE_UNLOCK(ha);
2761 2909 }
2762 2910
2763 2911 /* Release device queue specific lock. */
2764 2912 DEVICE_QUEUE_UNLOCK(tq);
2765 2913
2766 2914 } else if (sp->pkt->pkt_reason == CS_RESET) {
2767 2915 EL(ha, "Reset Retry, d_id=%xh\n", tq->d_id.b24);
2768 2916
2769 2917 /* Set retry status. */
2770 2918 sp->flags |= SRB_RETRY;
2771 2919 } else {
2772 2920 if (sp->pkt->pkt_reason != CS_COMPLETE) {
2773 2921 EL(ha, "Cmplt status err=%xh, d_id=%xh\n",
|
↓ open down ↓ |
49 lines elided |
↑ open up ↑ |
2774 2922 sp->pkt->pkt_reason, tq->d_id.b24);
2775 2923 }
2776 2924 }
2777 2925
2778 2926 /* Set completed status. */
2779 2927 sp->flags |= SRB_ISP_COMPLETED;
2780 2928
2781 2929 ql_add_link_b(done_q, &sp->cmd);
2782 2930
2783 2931 }
2784 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2932 + QL_PRINT_3(ha, "done\n");
2785 2933 }
2786 2934
2787 2935 /*
2788 2936 * ql_ip_rcv_entry
2789 2937 * Processes received ISP IP buffers entry.
2790 2938 *
2791 2939 * Input:
2792 2940 * ha: adapter state pointer.
2941 + * rsp_q: response queue structure pointer.
2793 2942 * pkt: entry pointer.
2794 2943 * done_q: done queue pointer.
2795 2944 * set_flags: task daemon flags to set.
2796 2945 * reset_flags: task daemon flags to reset.
2797 2946 *
2798 2947 * Context:
2799 2948 * Interrupt or Kernel context, no mailbox commands allowed.
2800 2949 */
2801 2950 /* ARGSUSED */
2802 2951 static void
2803 -ql_ip_rcv_entry(ql_adapter_state_t *ha, ip_rcv_entry_t *pkt,
2804 - ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2952 +ql_ip_rcv_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
2953 + ip_rcv_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
2954 + uint64_t *reset_flags)
2805 2955 {
2806 2956 port_id_t s_id;
2807 2957 uint16_t index;
2808 2958 uint8_t cnt;
2809 2959 ql_tgt_t *tq;
2810 2960
2811 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2961 + QL_PRINT_3(ha, "started\n");
2812 2962
2813 2963 /* Locate device queue. */
2814 2964 s_id.b.al_pa = pkt->s_id[0];
2815 2965 s_id.b.area = pkt->s_id[1];
2816 2966 s_id.b.domain = pkt->s_id[2];
2817 2967 if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
2818 2968 EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
2819 2969 return;
2820 2970 }
2821 2971
2822 - tq->ub_sequence_length = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2823 - &pkt->seq_length);
2972 + tq->ub_sequence_length = (uint16_t)ddi_get16(
2973 + rsp_q->rsp_ring.acc_handle, &pkt->seq_length);
2824 2974 tq->ub_total_seg_cnt = pkt->segment_count;
2825 2975 tq->ub_seq_id = ++ha->ub_seq_id;
2826 2976 tq->ub_seq_cnt = 0;
2827 2977 tq->ub_frame_ro = 0;
2828 2978 tq->ub_loop_id = pkt->loop_id;
2829 2979 ha->rcv_dev_q = tq;
2830 2980
2831 2981 for (cnt = 0; cnt < IP_RCVBUF_HANDLES && tq->ub_seq_cnt <
2832 2982 tq->ub_total_seg_cnt; cnt++) {
2833 2983
2834 - index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2984 + index = (uint16_t)ddi_get16(rsp_q->rsp_ring.acc_handle,
2835 2985 &pkt->buffer_handle[cnt]);
2836 2986
2837 2987 if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2838 2988 EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2839 2989 *set_flags |= ISP_ABORT_NEEDED;
2840 2990 break;
2841 2991 }
2842 2992 }
2843 2993
2844 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2994 + QL_PRINT_3(ha, "done\n");
2845 2995 }
2846 2996
2847 2997 /*
2848 2998 * ql_ip_rcv_cont_entry
2849 2999 * Processes received ISP IP buffers continuation entry.
2850 3000 *
2851 3001 * Input:
2852 3002 * ha: adapter state pointer.
3003 + * rsp_q: response queue structure pointer.
2853 3004 * pkt: entry pointer.
2854 3005 * done_q: done queue pointer.
2855 3006 * set_flags: task daemon flags to set.
2856 3007 * reset_flags: task daemon flags to reset.
2857 3008 *
2858 3009 * Context:
2859 3010 * Interrupt or Kernel context, no mailbox commands allowed.
2860 3011 */
2861 3012 /* ARGSUSED */
2862 3013 static void
2863 -ql_ip_rcv_cont_entry(ql_adapter_state_t *ha, ip_rcv_cont_entry_t *pkt,
2864 - ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
3014 +ql_ip_rcv_cont_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
3015 + ip_rcv_cont_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
3016 + uint64_t *reset_flags)
2865 3017 {
2866 3018 uint16_t index;
2867 3019 uint8_t cnt;
2868 3020 ql_tgt_t *tq;
2869 3021
2870 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3022 + QL_PRINT_3(ha, "started\n");
2871 3023
2872 3024 if ((tq = ha->rcv_dev_q) == NULL) {
2873 3025 EL(ha, "No IP receive device\n");
2874 3026 return;
2875 3027 }
2876 3028
2877 3029 for (cnt = 0; cnt < IP_RCVBUF_CONT_HANDLES &&
2878 3030 tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
2879 3031
2880 - index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
3032 + index = (uint16_t)ddi_get16(rsp_q->rsp_ring.acc_handle,
2881 3033 &pkt->buffer_handle[cnt]);
2882 3034
2883 3035 if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2884 3036 EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2885 3037 *set_flags |= ISP_ABORT_NEEDED;
2886 3038 break;
2887 3039 }
2888 3040 }
2889 3041
2890 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3042 + QL_PRINT_3(ha, "done\n");
2891 3043 }
2892 3044
2893 3045 /*
2894 3046 * ip_rcv_24xx_entry_t
2895 3047 * Processes received ISP24xx IP buffers entry.
2896 3048 *
2897 3049 * Input:
2898 3050 * ha: adapter state pointer.
3051 + * rsp_q: response queue structure pointer.
2899 3052 * pkt: entry pointer.
2900 3053 * done_q: done queue pointer.
2901 3054 * set_flags: task daemon flags to set.
2902 3055 * reset_flags: task daemon flags to reset.
2903 3056 *
2904 3057 * Context:
2905 3058 * Interrupt or Kernel context, no mailbox commands allowed.
2906 3059 */
2907 3060 /* ARGSUSED */
2908 3061 static void
2909 -ql_ip_24xx_rcv_entry(ql_adapter_state_t *ha, ip_rcv_24xx_entry_t *pkt,
2910 - ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
3062 +ql_ip_24xx_rcv_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
3063 + ip_rcv_24xx_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
3064 + uint64_t *reset_flags)
2911 3065 {
2912 3066 port_id_t s_id;
2913 3067 uint16_t index;
2914 3068 uint8_t cnt;
2915 3069 ql_tgt_t *tq;
2916 3070
2917 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3071 + QL_PRINT_3(ha, "started\n");
2918 3072
2919 3073 /* Locate device queue. */
2920 3074 s_id.b.al_pa = pkt->s_id[0];
2921 3075 s_id.b.area = pkt->s_id[1];
2922 3076 s_id.b.domain = pkt->s_id[2];
2923 3077 if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
2924 3078 EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
2925 3079 return;
2926 3080 }
2927 3081
2928 3082 if (tq->ub_total_seg_cnt == 0) {
2929 3083 tq->ub_sequence_length = (uint16_t)ddi_get16(
2930 - ha->hba_buf.acc_handle, &pkt->seq_length);
3084 + rsp_q->rsp_ring.acc_handle, &pkt->seq_length);
2931 3085 tq->ub_total_seg_cnt = pkt->segment_count;
2932 3086 tq->ub_seq_id = ++ha->ub_seq_id;
2933 3087 tq->ub_seq_cnt = 0;
2934 3088 tq->ub_frame_ro = 0;
2935 3089 tq->ub_loop_id = (uint16_t)ddi_get16(
2936 - ha->hba_buf.acc_handle, &pkt->n_port_hdl);
3090 + rsp_q->rsp_ring.acc_handle, &pkt->n_port_hdl);
2937 3091 }
2938 3092
2939 3093 for (cnt = 0; cnt < IP_24XX_RCVBUF_HANDLES && tq->ub_seq_cnt <
2940 3094 tq->ub_total_seg_cnt; cnt++) {
2941 3095
2942 - index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
3096 + index = (uint16_t)ddi_get16(rsp_q->rsp_ring.acc_handle,
2943 3097 &pkt->buffer_handle[cnt]);
2944 3098
2945 3099 if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2946 3100 EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2947 3101 *set_flags |= ISP_ABORT_NEEDED;
2948 3102 break;
2949 3103 }
2950 3104 }
2951 3105
2952 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3106 + QL_PRINT_3(ha, "done\n");
2953 3107 }
2954 3108
2955 3109 /*
2956 3110 * ql_ms_entry
2957 3111 * Processes received Name/Management/CT Pass-Through entry.
2958 3112 *
2959 3113 * Input:
2960 3114 * ha: adapter state pointer.
3115 + * rsp_q: response queue structure pointer.
2961 3116 * pkt23: entry pointer.
2962 3117 * done_q: done queue pointer.
2963 3118 * set_flags: task daemon flags to set.
2964 3119 * reset_flags: task daemon flags to reset.
2965 3120 *
2966 3121 * Context:
2967 3122 * Interrupt or Kernel context, no mailbox commands allowed.
2968 3123 */
2969 3124 /* ARGSUSED */
2970 3125 static void
2971 -ql_ms_entry(ql_adapter_state_t *ha, ms_entry_t *pkt23, ql_head_t *done_q,
2972 - uint32_t *set_flags, uint32_t *reset_flags)
3126 +ql_ms_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q, ms_entry_t *pkt23,
3127 + ql_head_t *done_q, uint64_t *set_flags, uint64_t *reset_flags)
2973 3128 {
2974 - ql_srb_t *sp;
3129 + ql_srb_t *sp = NULL;
2975 3130 uint32_t index, cnt, resp_identifier;
2976 3131 ql_tgt_t *tq;
2977 3132 ct_passthru_entry_t *pkt24 = (ct_passthru_entry_t *)pkt23;
2978 3133
2979 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3134 + QL_PRINT_3(ha, "started\n");
2980 3135
2981 3136 /* Validate the response entry handle. */
2982 - resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle);
3137 + resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle,
3138 + &pkt23->handle);
2983 3139 index = resp_identifier & OSC_INDEX_MASK;
2984 - if (index < MAX_OUTSTANDING_COMMANDS) {
3140 + if (index < ha->osc_max_cnt) {
2985 3141 /* the index seems reasonable */
2986 - sp = ha->outstanding_cmds[index];
3142 + if ((sp = ha->outstanding_cmds[index]) == NULL) {
3143 + sp = ql_verify_preprocessed_cmd(ha, rsp_q,
3144 + (uint32_t *)&pkt23->handle,
3145 + (uint32_t *)&resp_identifier, set_flags,
3146 + reset_flags);
3147 + }
2987 3148 if (sp != NULL) {
2988 - if (sp->handle == resp_identifier) {
3149 + if (sp == QL_ABORTED_SRB(ha)) {
3150 + EL(ha, "QL_ABORTED_SRB handle=%xh\n",
3151 + resp_identifier);
3152 + sp = NULL;
3153 + ha->outstanding_cmds[index] = NULL;
3154 + } else if (sp->handle == resp_identifier) {
2989 3155 /* Neo, you're the one... */
2990 3156 ha->outstanding_cmds[index] = NULL;
2991 3157 sp->handle = 0;
2992 3158 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2993 3159 } else {
2994 3160 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
2995 3161 resp_identifier, sp->handle);
2996 3162 sp = NULL;
2997 3163 ql_signal_abort(ha, set_flags);
2998 3164 }
2999 - } else {
3000 - sp = ql_verify_preprocessed_cmd(ha,
3001 - (uint32_t *)&pkt23->handle, set_flags, reset_flags);
3002 3165 }
3003 3166 } else {
3004 3167 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
3005 3168 index, resp_identifier);
3006 3169 ql_signal_abort(ha, set_flags);
3007 3170 }
3008 3171
3009 3172 if (sp != NULL) {
3010 3173 if (!(sp->flags & SRB_MS_PKT)) {
3011 3174 EL(ha, "Not SRB_MS_PKT flags=%xh, isp_abort_needed",
3012 3175 sp->flags);
3013 3176 *set_flags |= ISP_ABORT_NEEDED;
3014 3177 return;
3015 3178 }
3016 3179
3017 3180 tq = sp->lun_queue->target_queue;
3018 3181
3019 3182 /* Set ISP completion status */
3020 - if (CFG_IST(ha, CFG_CTRL_24258081)) {
3183 + if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
3021 3184 sp->pkt->pkt_reason = ddi_get16(
3022 - ha->hba_buf.acc_handle, &pkt24->status);
3185 + rsp_q->rsp_ring.acc_handle, &pkt24->status);
3023 3186 } else {
3024 3187 sp->pkt->pkt_reason = ddi_get16(
3025 - ha->hba_buf.acc_handle, &pkt23->comp_status);
3188 + rsp_q->rsp_ring.acc_handle, &pkt23->comp_status);
3026 3189 }
3027 3190
3028 3191 if (sp->pkt->pkt_reason == CS_RESOUCE_UNAVAILABLE &&
3029 3192 sp->retry_count) {
3030 3193 EL(ha, "Resouce Unavailable Retry = %d\n",
3031 3194 sp->retry_count);
3032 3195
3033 3196 /* Set retry status. */
3034 3197 sp->retry_count--;
3035 3198 sp->flags |= SRB_RETRY;
3036 3199
3037 3200 /* Acquire device queue lock. */
3038 3201 DEVICE_QUEUE_LOCK(tq);
3039 3202
3040 3203 if (!(tq->flags & TQF_QUEUE_SUSPENDED)) {
3041 3204 tq->flags |= TQF_QUEUE_SUSPENDED;
3042 3205
3043 3206 ADAPTER_STATE_LOCK(ha);
3044 3207 if (ha->port_retry_timer == 0) {
3045 3208 ha->port_retry_timer = 2;
3046 3209 }
3047 3210 ADAPTER_STATE_UNLOCK(ha);
3048 3211 }
3049 3212
3050 3213 /* Release device queue specific lock. */
3051 3214 DEVICE_QUEUE_UNLOCK(tq);
3052 3215
3053 3216 } else if (tq->port_down_retry_count &&
3054 3217 (sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
3055 3218 sp->pkt->pkt_reason == CS_PORT_BUSY)) {
3056 3219 EL(ha, "Port Down Retry\n");
3057 3220
3058 3221 /* Set retry status. */
3059 3222 sp->flags |= SRB_RETRY;
3060 3223
3061 3224 /* Acquire device queue lock. */
3062 3225 DEVICE_QUEUE_LOCK(tq);
3063 3226
3064 3227 if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
3065 3228 tq->flags |= TQF_QUEUE_SUSPENDED;
3066 3229
3067 3230 tq->port_down_retry_count--;
3068 3231
3069 3232 ADAPTER_STATE_LOCK(ha);
3070 3233 if (ha->port_retry_timer == 0) {
3071 3234 if ((ha->port_retry_timer =
3072 3235 ha->port_down_retry_delay) == 0) {
3073 3236 *set_flags |=
3074 3237 PORT_RETRY_NEEDED;
3075 3238 }
3076 3239 }
3077 3240 ADAPTER_STATE_UNLOCK(ha);
|
↓ open down ↓ |
42 lines elided |
↑ open up ↑ |
3078 3241 }
3079 3242 /* Release device queue specific lock. */
3080 3243 DEVICE_QUEUE_UNLOCK(tq);
3081 3244
3082 3245 } else if (sp->pkt->pkt_reason == CS_RESET) {
3083 3246 EL(ha, "Reset Retry\n");
3084 3247
3085 3248 /* Set retry status. */
3086 3249 sp->flags |= SRB_RETRY;
3087 3250
3088 - } else if (CFG_IST(ha, CFG_CTRL_24258081) &&
3251 + } else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2) &&
3089 3252 sp->pkt->pkt_reason == CS_DATA_UNDERRUN) {
3090 - cnt = ddi_get32(ha->hba_buf.acc_handle,
3253 + cnt = ddi_get32(rsp_q->rsp_ring.acc_handle,
3091 3254 &pkt24->resp_byte_count);
3092 3255 if (cnt < sizeof (fc_ct_header_t)) {
3093 3256 EL(ha, "Data underrun\n");
3094 3257 } else {
3095 3258 sp->pkt->pkt_reason = CS_COMPLETE;
3096 3259 }
3097 3260
3261 + } else if (sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE ||
3262 + sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT) {
3263 + EL(ha, "Port unavailable %xh\n", sp->pkt->pkt_reason);
3264 + DEVICE_QUEUE_LOCK(tq);
3265 + tq->flags |= TQF_LOGIN_NEEDED;
3266 + DEVICE_QUEUE_UNLOCK(tq);
3267 + sp->pkt->pkt_reason = CS_TIMEOUT;
3268 +
3098 3269 } else if (sp->pkt->pkt_reason != CS_COMPLETE) {
3099 3270 EL(ha, "status err=%xh\n", sp->pkt->pkt_reason);
3100 3271 }
3101 3272
3102 3273 if (sp->pkt->pkt_reason == CS_COMPLETE) {
3103 3274 /*EMPTY*/
3104 - QL_PRINT_3(CE_CONT, "(%d): ct_cmdrsp=%x%02xh resp\n",
3105 - ha->instance, sp->pkt->pkt_cmd[8],
3106 - sp->pkt->pkt_cmd[9]);
3275 + QL_PRINT_3(ha, "ct_cmdrsp=%x%02xh resp\n",
3276 + sp->pkt->pkt_cmd[8], sp->pkt->pkt_cmd[9]);
3107 3277 QL_DUMP_3(sp->pkt->pkt_resp, 8, sp->pkt->pkt_rsplen);
3108 3278 }
3109 3279
3110 3280 /* For nameserver restore command, management change header. */
3111 3281 if ((sp->flags & SRB_RETRY) == 0) {
3112 - tq->d_id.b24 == 0xfffffc ?
3282 + tq->d_id.b24 == FS_NAME_SERVER ?
3113 3283 ql_cthdr_endian(sp->pkt->pkt_cmd_acc,
3114 3284 sp->pkt->pkt_cmd, B_TRUE) :
3115 3285 ql_cthdr_endian(sp->pkt->pkt_resp_acc,
3116 3286 sp->pkt->pkt_resp, B_TRUE);
3117 3287 }
3118 3288
3119 3289 /* Set completed status. */
3120 3290 sp->flags |= SRB_ISP_COMPLETED;
3121 3291
3122 3292 /* Place command on done queue. */
3123 3293 ql_add_link_b(done_q, &sp->cmd);
3124 3294
3125 3295 }
3126 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3296 + QL_PRINT_3(ha, "done\n");
3127 3297 }
3128 3298
3129 3299 /*
3130 3300 * ql_report_id_entry
3131 3301 * Processes received Name/Management/CT Pass-Through entry.
3132 3302 *
3133 3303 * Input:
3134 3304 * ha: adapter state pointer.
3305 + * rsp_q: response queue structure pointer.
3135 3306 * pkt: entry pointer.
3136 3307 * done_q: done queue pointer.
3137 3308 * set_flags: task daemon flags to set.
3138 3309 * reset_flags: task daemon flags to reset.
3139 3310 *
3140 3311 * Context:
3141 3312 * Interrupt or Kernel context, no mailbox commands allowed.
3142 3313 */
3143 3314 /* ARGSUSED */
3144 3315 static void
3145 -ql_report_id_entry(ql_adapter_state_t *ha, report_id_1_t *pkt,
3146 - ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
3316 +ql_report_id_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
3317 + report_id_acq_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
3318 + uint64_t *reset_flags)
3147 3319 {
3148 3320 ql_adapter_state_t *vha;
3149 3321
3150 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3322 + QL_PRINT_3(ha, "started\n");
3151 3323
3152 - EL(ha, "format=%d, vp=%d, status=%d\n",
3153 - pkt->format, pkt->vp_index, pkt->status);
3324 + EL(ha, "format=%d, index=%d, status=%d\n",
3325 + pkt->format, pkt->vp_index, pkt->vp_status);
3154 3326
3155 3327 if (pkt->format == 1) {
3156 3328 /* Locate port state structure. */
3157 3329 for (vha = ha; vha != NULL; vha = vha->vp_next) {
3158 3330 if (vha->vp_index == pkt->vp_index) {
3159 3331 break;
3160 3332 }
3161 3333 }
3162 - if (vha != NULL && vha->vp_index != 0 &&
3163 - (pkt->status == CS_COMPLETE ||
3164 - pkt->status == CS_PORT_ID_CHANGE)) {
3165 - *set_flags |= LOOP_RESYNC_NEEDED;
3166 - *reset_flags &= ~LOOP_RESYNC_NEEDED;
3167 - vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
3168 - TASK_DAEMON_LOCK(ha);
3169 - vha->task_daemon_flags |= LOOP_RESYNC_NEEDED;
3170 - vha->task_daemon_flags &= ~LOOP_DOWN;
3171 - TASK_DAEMON_UNLOCK(ha);
3334 + if (vha != NULL) {
3335 + if (pkt->vp_status == CS_COMPLETE ||
3336 + pkt->vp_status == CS_PORT_ID_CHANGE) {
3337 + if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
3338 + vha->fcoe_fcf_idx = pkt->fcf_index;
3339 + }
3340 + if (vha->vp_index != 0) {
3341 + *set_flags |= LOOP_RESYNC_NEEDED;
3342 + *reset_flags &= ~LOOP_RESYNC_NEEDED;
3343 + vha->loop_down_timer =
3344 + LOOP_DOWN_TIMER_OFF;
3345 + TASK_DAEMON_LOCK(ha);
3346 + vha->task_daemon_flags |=
3347 + LOOP_RESYNC_NEEDED;
3348 + vha->task_daemon_flags &= ~LOOP_DOWN;
3349 + TASK_DAEMON_UNLOCK(ha);
3350 + }
3351 + ADAPTER_STATE_LOCK(ha);
3352 + vha->flags &= ~VP_ID_NOT_ACQUIRED;
3353 + ADAPTER_STATE_UNLOCK(ha);
3354 + } else {
3355 + /* FA-WWPN failure. */
3356 + if (pkt->vp_status == CS_INCOMPLETE &&
3357 + pkt->ls_rjt_reason_code == 0xff &&
3358 + pkt->ls_rjt_explanation == 0x44) {
3359 + *set_flags |= ISP_ABORT_NEEDED;
3360 + }
3361 + if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
3362 + EL(ha, "sts sc=%d, rjt_rea=%xh, "
3363 + "rjt_exp=%xh, rjt_sc=%xh\n",
3364 + pkt->status_subcode,
3365 + pkt->ls_rjt_reason_code,
3366 + pkt->ls_rjt_explanation,
3367 + pkt->ls_rjt_subcode);
3368 + }
3369 + ADAPTER_STATE_LOCK(ha);
3370 + vha->flags |= VP_ID_NOT_ACQUIRED;
3371 + ADAPTER_STATE_UNLOCK(ha);
3372 + }
3172 3373 }
3173 3374 }
3174 3375
3175 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3376 + QL_PRINT_3(ha, "done\n");
3176 3377 }
3177 3378
3178 3379 /*
3179 3380 * ql_els_entry
3180 3381 * Processes received ELS Pass-Through entry.
3181 3382 *
3182 3383 * Input:
3183 3384 * ha: adapter state pointer.
3385 + * rsp_q: response queue structure pointer.
3184 3386 * pkt23: entry pointer.
3185 3387 * done_q: done queue pointer.
3186 3388 * set_flags: task daemon flags to set.
3187 3389 * reset_flags: task daemon flags to reset.
3188 3390 *
3189 3391 * Context:
3190 3392 * Interrupt or Kernel context, no mailbox commands allowed.
3191 3393 */
3192 3394 /* ARGSUSED */
3193 3395 static void
3194 -ql_els_passthru_entry(ql_adapter_state_t *ha, els_passthru_entry_rsp_t *rsp,
3195 - ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
3396 +ql_els_passthru_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
3397 + els_passthru_entry_rsp_t *rsp, ql_head_t *done_q, uint64_t *set_flags,
3398 + uint64_t *reset_flags)
3196 3399 {
3197 3400 ql_tgt_t *tq;
3198 - port_id_t d_id, s_id;
3199 - ql_srb_t *srb;
3401 + port_id_t s_id;
3402 + ql_srb_t *srb = NULL;
3200 3403 uint32_t index, resp_identifier;
3201 3404
3202 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3405 + QL_PRINT_3(ha, "started\n");
3203 3406
3204 3407 /* Validate the response entry handle. */
3205 - resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &rsp->handle);
3408 + resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle, &rsp->handle);
3206 3409 index = resp_identifier & OSC_INDEX_MASK;
3207 - if (index < MAX_OUTSTANDING_COMMANDS) {
3410 + if (index < ha->osc_max_cnt) {
3208 3411 /* the index seems reasonable */
3209 - srb = ha->outstanding_cmds[index];
3412 + if ((srb = ha->outstanding_cmds[index]) == NULL) {
3413 + srb = ql_verify_preprocessed_cmd(ha, rsp_q,
3414 + (uint32_t *)&rsp->handle,
3415 + (uint32_t *)&resp_identifier, set_flags,
3416 + reset_flags);
3417 + }
3210 3418 if (srb != NULL) {
3211 - if (srb->handle == resp_identifier) {
3419 + if (srb == QL_ABORTED_SRB(ha)) {
3420 + EL(ha, "QL_ABORTED_SRB handle=%xh\n",
3421 + resp_identifier);
3422 + srb = NULL;
3423 + ha->outstanding_cmds[index] = NULL;
3424 + } else if (srb->handle == resp_identifier) {
3212 3425 /* Neo, you're the one... */
3213 3426 ha->outstanding_cmds[index] = NULL;
3214 3427 srb->handle = 0;
3215 3428 srb->flags &= ~SRB_IN_TOKEN_ARRAY;
3216 3429 } else {
3217 3430 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
3218 3431 resp_identifier, srb->handle);
3219 3432 srb = NULL;
3220 3433 ql_signal_abort(ha, set_flags);
3221 3434 }
3222 - } else {
3223 - srb = ql_verify_preprocessed_cmd(ha,
3224 - (uint32_t *)&rsp->handle, set_flags, reset_flags);
3225 3435 }
3226 3436 } else {
3227 3437 EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
3228 3438 index, resp_identifier);
3229 3439 ql_signal_abort(ha, set_flags);
3230 3440 }
3231 3441
3232 3442 if (srb != NULL) {
3233 3443 if (!(srb->flags & SRB_ELS_PKT)) {
3234 - EL(ha, "Not SRB_ELS_PKT flags=%xh, isp_abort_needed",
3444 + EL(ha, "Not SRB_ELS_PKT flags=%xh, isp_abort_needed\n",
3235 3445 srb->flags);
3236 3446 *set_flags |= ISP_ABORT_NEEDED;
3237 3447 return;
3238 3448 }
3239 3449
3240 3450 (void) ddi_dma_sync(srb->pkt->pkt_resp_dma, 0, 0,
3241 3451 DDI_DMA_SYNC_FORKERNEL);
3242 3452
3243 3453 /* Set ISP completion status */
3244 - srb->pkt->pkt_reason = ddi_get16(
3245 - ha->hba_buf.acc_handle, &rsp->comp_status);
3454 + srb->pkt->pkt_reason = ddi_get16(rsp_q->rsp_ring.acc_handle,
3455 + &rsp->comp_status);
3246 3456
3247 3457 if (srb->pkt->pkt_reason != CS_COMPLETE) {
3248 3458 la_els_rjt_t rjt;
3249 - EL(ha, "status err=%xh\n", srb->pkt->pkt_reason);
3250 3459
3460 + EL(ha, "srb=%ph,status err=%xh\n",
3461 + srb, srb->pkt->pkt_reason);
3462 +
3251 3463 if (srb->pkt->pkt_reason == CS_LOGIN_LOGOUT_ERROR) {
3252 3464 EL(ha, "e1=%xh e2=%xh\n",
3253 3465 rsp->error_subcode1, rsp->error_subcode2);
3254 3466 }
3255 3467
3256 3468 srb->pkt->pkt_state = FC_PKT_TRAN_ERROR;
3257 3469
3258 3470 /* Build RJT in the response. */
3259 3471 rjt.ls_code.ls_code = LA_ELS_RJT;
3260 3472 rjt.reason = FC_REASON_NO_CONNECTION;
3261 3473
3262 3474 ddi_rep_put8(srb->pkt->pkt_resp_acc, (uint8_t *)&rjt,
3263 3475 (uint8_t *)srb->pkt->pkt_resp,
3264 3476 sizeof (rjt), DDI_DEV_AUTOINCR);
3265 3477
3266 3478 srb->pkt->pkt_state = FC_PKT_TRAN_ERROR;
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
3267 3479 srb->pkt->pkt_reason = FC_REASON_NO_CONNECTION;
3268 3480 }
3269 3481
3270 3482 if (srb->pkt->pkt_reason == CS_COMPLETE) {
3271 3483 uint8_t opcode;
3272 3484 uint16_t loop_id;
3273 3485
3274 3486 /* Indicate ISP completion */
3275 3487 srb->flags |= SRB_ISP_COMPLETED;
3276 3488
3277 - loop_id = ddi_get16(ha->hba_buf.acc_handle,
3489 + loop_id = ddi_get16(rsp_q->rsp_ring.acc_handle,
3278 3490 &rsp->n_port_hdl);
3279 3491
3280 - if (ha->topology & QL_N_PORT) {
3281 - /* create a target Q if there isn't one */
3282 - tq = ql_loop_id_to_queue(ha, loop_id);
3283 - if (tq == NULL) {
3284 - d_id.b.al_pa = rsp->d_id_7_0;
3285 - d_id.b.area = rsp->d_id_15_8;
3286 - d_id.b.domain = rsp->d_id_23_16;
3287 - /* Acquire adapter state lock. */
3288 - ADAPTER_STATE_LOCK(ha);
3492 + /* tq is obtained from lun_queue */
3493 + tq = srb->lun_queue->target_queue;
3289 3494
3290 - tq = ql_dev_init(ha, d_id, loop_id);
3291 - EL(ha, " tq = %x\n", tq);
3292 -
3293 - ADAPTER_STATE_UNLOCK(ha);
3294 - }
3295 -
3495 + if (ha->topology & QL_N_PORT) {
3296 3496 /* on plogi success assume the chosen s_id */
3297 - opcode = ddi_get8(ha->hba_buf.acc_handle,
3497 + opcode = ddi_get8(rsp_q->rsp_ring.acc_handle,
3298 3498 &rsp->els_cmd_opcode);
3299 3499
3300 - EL(ha, "els_cmd_opcode=%x srb->pkt=%x\n",
3301 - opcode, srb->pkt);
3500 + EL(ha, "els opcode=%x srb=%ph,pkt=%ph, tq=%ph"
3501 + ", portid=%xh, tqlpid=%xh, loop_id=%xh\n",
3502 + opcode, srb, srb->pkt, tq, tq->d_id.b24,
3503 + tq->loop_id, loop_id);
3302 3504
3303 3505 if (opcode == LA_ELS_PLOGI) {
3304 3506 s_id.b.al_pa = rsp->s_id_7_0;
3305 3507 s_id.b.area = rsp->s_id_15_8;
3306 3508 s_id.b.domain = rsp->s_id_23_16;
3307 3509
3308 3510 ha->d_id.b24 = s_id.b24;
3309 3511 EL(ha, "Set port's source ID %xh\n",
3310 3512 ha->d_id.b24);
3311 3513 }
3312 3514 }
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
3313 3515 ql_isp_els_handle_rsp_endian(ha, srb);
3314 3516
3315 3517 if (ha != srb->ha) {
3316 3518 EL(ha, "ha=%x srb->ha=%x\n", ha, srb->ha);
3317 3519 }
3318 3520
3319 3521 if (tq != NULL) {
3320 3522 tq->logout_sent = 0;
3321 3523 tq->flags &= ~TQF_NEED_AUTHENTICATION;
3322 3524
3323 - if (CFG_IST(ha, CFG_CTRL_24258081)) {
3525 + if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
3324 3526 tq->flags |= TQF_IIDMA_NEEDED;
3325 3527 }
3326 - srb->pkt->pkt_state = FC_PKT_SUCCESS;
3528 + srb->pkt->pkt_state = FC_PKT_SUCCESS;
3327 3529 }
3328 3530 }
3531 +
3532 + /* Remove command from watchdog queue. */
3533 + if (srb->flags & SRB_WATCHDOG_ENABLED) {
3534 + tq = srb->lun_queue->target_queue;
3535 +
3536 + DEVICE_QUEUE_LOCK(tq);
3537 + ql_remove_link(&tq->wdg, &srb->wdg);
3538 + srb->flags &= ~SRB_WATCHDOG_ENABLED;
3539 + DEVICE_QUEUE_UNLOCK(tq);
3540 + }
3541 +
3329 3542 /* invoke the callback */
3330 - ql_awaken_task_daemon(ha, srb, 0, 0);
3543 + ql_io_comp(srb);
3331 3544 }
3332 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3545 + QL_PRINT_3(ha, "done\n");
3333 3546 }
3334 3547
3335 3548 /*
3336 3549 * ql_signal_abort
3337 3550 * Signal to the task daemon that a condition warranting an
3338 3551 * isp reset has been detected.
3339 3552 *
3340 3553 * Input:
3341 3554 * ha: adapter state pointer.
3342 3555 * set_flags: task daemon flags to set.
3343 3556 *
3344 3557 * Context:
3345 3558 * Interrupt or Kernel context, no mailbox commands allowed.
3346 3559 */
3347 3560 static void
3348 -ql_signal_abort(ql_adapter_state_t *ha, uint32_t *set_flags)
3561 +ql_signal_abort(ql_adapter_state_t *ha, uint64_t *set_flags)
3349 3562 {
3350 - if (!CFG_IST(ha, CFG_CTRL_8021) &&
3563 + if (!CFG_IST(ha, CFG_CTRL_82XX) &&
3351 3564 !(ha->task_daemon_flags & (ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE))) {
3352 3565 *set_flags |= ISP_ABORT_NEEDED;
3353 3566 }
3354 3567 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX