1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright 2010 QLogic Corporation */
23
24 /*
25 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26 */
27 /*
28 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
29 * Copyright (c) 2016 by Delphix. All rights reserved.
30 */
31
32 #pragma ident "Copyright 2010 QLogic Corporation; ql_api.c"
33
34 /*
35 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
36 *
37 * ***********************************************************************
38 * * **
39 * * NOTICE **
40 * * COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION **
41 * * ALL RIGHTS RESERVED **
42 * * **
43 * ***********************************************************************
44 *
45 */
46
47 #include <ql_apps.h>
48 #include <ql_api.h>
49 #include <ql_debug.h>
50 #include <ql_init.h>
51 #include <ql_iocb.h>
52 #include <ql_ioctl.h>
53 #include <ql_isr.h>
54 #include <ql_mbx.h>
55 #include <ql_nx.h>
56 #include <ql_xioctl.h>
57
58 /*
59 * Solaris external defines.
60 */
61 extern pri_t minclsyspri;
62 extern pri_t maxclsyspri;
63
64 /*
65 * dev_ops functions prototypes
66 */
67 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
68 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
69 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
70 static int ql_power(dev_info_t *, int, int);
71 static int ql_quiesce(dev_info_t *);
72
73 /*
74 * FCA functions prototypes exported by means of the transport table
75 */
76 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
77 fc_fca_bind_info_t *);
78 static void ql_unbind_port(opaque_t);
79 static int ql_init_pkt(opaque_t, fc_packet_t *, int);
80 static int ql_un_init_pkt(opaque_t, fc_packet_t *);
81 static int ql_els_send(opaque_t, fc_packet_t *);
82 static int ql_get_cap(opaque_t, char *, void *);
83 static int ql_set_cap(opaque_t, char *, void *);
84 static int ql_getmap(opaque_t, fc_lilpmap_t *);
85 static int ql_transport(opaque_t, fc_packet_t *);
86 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
87 static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
88 static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
89 static int ql_abort(opaque_t, fc_packet_t *, int);
90 static int ql_reset(opaque_t, uint32_t);
91 static int ql_port_manage(opaque_t, fc_fca_pm_t *);
92 static opaque_t ql_get_device(opaque_t, fc_portid_t);
93
94 /*
95 * FCA Driver Support Function Prototypes.
96 */
97 static uint16_t ql_wait_outstanding(ql_adapter_state_t *);
98 static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
99 ql_srb_t *);
100 static void ql_task_daemon(void *);
101 static void ql_task_thread(ql_adapter_state_t *);
102 static void ql_unsol_callback(ql_srb_t *);
103 static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
104 fc_unsol_buf_t *);
105 static void ql_timer(void *);
106 static void ql_watchdog(ql_adapter_state_t *, uint32_t *, uint32_t *);
107 static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *,
108 uint32_t *, uint32_t *);
109 static void ql_halt(ql_adapter_state_t *, int);
110 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
111 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
112 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
113 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
114 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
115 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
116 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
117 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
118 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
119 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
120 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
121 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
122 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
123 static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
124 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
125 static int ql_login_port(ql_adapter_state_t *, port_id_t);
126 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
127 static int ql_logout_port(ql_adapter_state_t *, port_id_t);
128 static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
129 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
130 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
131 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
132 static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
133 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
134 ql_srb_t *);
135 static int ql_kstat_update(kstat_t *, int);
136 static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
137 static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
138 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
139 static void ql_rst_aen(ql_adapter_state_t *);
140 static void ql_restart_queues(ql_adapter_state_t *);
141 static void ql_abort_queues(ql_adapter_state_t *);
142 static void ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq);
143 static void ql_idle_check(ql_adapter_state_t *);
144 static int ql_loop_resync(ql_adapter_state_t *);
145 static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
146 static size_t ql_2581_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
147 static int ql_save_config_regs(dev_info_t *);
148 static int ql_restore_config_regs(dev_info_t *);
149 static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
150 static int ql_handle_rscn_update(ql_adapter_state_t *);
151 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
152 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
153 static int ql_dump_firmware(ql_adapter_state_t *);
154 static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
155 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
156 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
157 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
158 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
159 static int ql_81xx_binary_fw_dump(ql_adapter_state_t *, ql_81xx_fw_dump_t *);
160 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
161 void *);
162 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
163 uint8_t);
164 static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
165 static int ql_suspend_adapter(ql_adapter_state_t *);
166 static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
167 static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
168 int ql_alloc_dma_resouce(ql_adapter_state_t *, dma_mem_t *, int);
169 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
170 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
171 static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
172 static int ql_setup_interrupts(ql_adapter_state_t *);
173 static int ql_setup_msi(ql_adapter_state_t *);
174 static int ql_setup_msix(ql_adapter_state_t *);
175 static int ql_setup_fixed(ql_adapter_state_t *);
176 static void ql_release_intr(ql_adapter_state_t *);
177 static void ql_disable_intr(ql_adapter_state_t *);
178 static int ql_legacy_intr(ql_adapter_state_t *);
179 static int ql_init_mutex(ql_adapter_state_t *);
180 static void ql_destroy_mutex(ql_adapter_state_t *);
181 static void ql_iidma(ql_adapter_state_t *);
182
183 static int ql_n_port_plogi(ql_adapter_state_t *);
184 static void ql_fca_isp_els_request(ql_adapter_state_t *, fc_packet_t *,
185 els_descriptor_t *);
186 static void ql_isp_els_request_ctor(els_descriptor_t *,
187 els_passthru_entry_t *);
188 static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *);
189 static int ql_wait_for_td_stop(ql_adapter_state_t *);
190 static void ql_process_idc_event(ql_adapter_state_t *);
191
192 /*
193 * Global data
194 */
195 static uint8_t ql_enable_pm = 1;
196 static int ql_flash_sbus_fpga = 0;
197 uint32_t ql_os_release_level;
198 uint32_t ql_disable_aif = 0;
199 uint32_t ql_disable_msi = 0;
200 uint32_t ql_disable_msix = 0;
201 uint32_t ql_enable_ets = 0;
202 uint16_t ql_osc_wait_count = 1000;
203
204 /* Timer routine variables. */
205 static timeout_id_t ql_timer_timeout_id = NULL;
206 static clock_t ql_timer_ticks;
207
208 /* Soft state head pointer. */
209 void *ql_state = NULL;
210
211 /* Head adapter link. */
212 ql_head_t ql_hba = {
213 NULL,
214 NULL
215 };
216
217 /* Global hba index */
218 uint32_t ql_gfru_hba_index = 1;
219
220 /*
221 * Some IP defines and globals
222 */
223 uint32_t ql_ip_buffer_count = 128;
224 uint32_t ql_ip_low_water = 10;
225 uint8_t ql_ip_fast_post_count = 5;
226 static int ql_ip_mtu = 65280; /* equivalent to FCIPMTU */
227
228 /* Device AL_PA to Device Head Queue index array. */
229 uint8_t ql_alpa_to_index[] = {
230 0x7e, 0x7d, 0x7c, 0x00, 0x7b, 0x01, 0x02, 0x03, 0x7a, 0x04,
231 0x05, 0x06, 0x07, 0x08, 0x09, 0x79, 0x78, 0x0a, 0x0b, 0x0c,
232 0x0d, 0x0e, 0x0f, 0x77, 0x76, 0x10, 0x11, 0x75, 0x12, 0x74,
233 0x73, 0x72, 0x13, 0x14, 0x15, 0x71, 0x16, 0x70, 0x6f, 0x6e,
234 0x17, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x18, 0x19, 0x67,
235 0x66, 0x65, 0x64, 0x63, 0x62, 0x20, 0x21, 0x61, 0x60, 0x23,
236 0x5f, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x5e, 0x2a, 0x5d,
237 0x5c, 0x5b, 0x2b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x2c,
238 0x2d, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x2e, 0x2f, 0x4e,
239 0x4d, 0x30, 0x4c, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x4b,
240 0x37, 0x4a, 0x49, 0x48, 0x38, 0x47, 0x46, 0x45, 0x44, 0x43,
241 0x42, 0x39, 0x3a, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b,
242 0x3c, 0x3b, 0x3a, 0x3d, 0x39, 0x3e, 0x3f, 0x40, 0x38, 0x37,
243 0x36, 0x41, 0x35, 0x42, 0x43, 0x44, 0x34, 0x45, 0x46, 0x47,
244 0x48, 0x49, 0x4a, 0x33, 0x32, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
245 0x50, 0x31, 0x30, 0x51, 0x52, 0x2f, 0x53, 0x2e, 0x2d, 0x2c,
246 0x54, 0x55, 0x56, 0x2b, 0x57, 0x2a, 0x29, 0x28, 0x58, 0x27,
247 0x26, 0x25, 0x24, 0x23, 0x22, 0x59, 0x5a, 0x21, 0x20, 0x1f,
248 0x1e, 0x1d, 0x1c, 0x5b, 0x5c, 0x1b, 0x1a, 0x5d, 0x19, 0x5e,
249 0x5f, 0x60, 0x61, 0x62, 0x63, 0x18, 0x64, 0x17, 0x16, 0x15,
250 0x65, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x66, 0x67, 0x0e,
251 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x68, 0x69, 0x08, 0x07, 0x6a,
252 0x06, 0x6b, 0x6c, 0x6d, 0x05, 0x04, 0x03, 0x6e, 0x02, 0x6f,
253 0x70, 0x71, 0x01, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x00,
254 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7f, 0x80, 0x00, 0x01,
255 0x02, 0x03, 0x80, 0x7f, 0x7e, 0x04
256 };
257
258 /* Device loop_id to ALPA array. */
259 static uint8_t ql_index_to_alpa[] = {
260 0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
261 0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
262 0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
263 0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
264 0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
265 0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
266 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
267 0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
268 0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
269 0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
270 0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
271 0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
272 0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
273 };
274
275 /* 2200 register offsets */
276 static reg_off_t reg_off_2200 = {
277 0x00, /* flash_address */
278 0x02, /* flash_data */
279 0x06, /* ctrl_status */
280 0x08, /* ictrl */
281 0x0a, /* istatus */
282 0x0c, /* semaphore */
283 0x0e, /* nvram */
284 0x18, /* req_in */
285 0x18, /* req_out */
286 0x1a, /* resp_in */
287 0x1a, /* resp_out */
288 0xff, /* risc2host - n/a */
289 24, /* Number of mailboxes */
290
291 /* Mailbox in register offsets 0 - 23 */
292 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
293 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
294 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
295 /* 2200 does not have mailbox 24-31 - n/a */
296 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
297
298 /* Mailbox out register offsets 0 - 23 */
299 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
300 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
301 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
302 /* 2200 does not have mailbox 24-31 - n/a */
303 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
304
305 0x96, /* fpm_diag_config */
306 0xa4, /* pcr */
307 0xb0, /* mctr */
308 0xb8, /* fb_cmd */
309 0xc0, /* hccr */
310 0xcc, /* gpiod */
311 0xce, /* gpioe */
312 0xff, /* host_to_host_sema - n/a */
313 0xff, /* pri_req_in - n/a */
314 0xff, /* pri_req_out - n/a */
315 0xff, /* atio_req_in - n/a */
316 0xff, /* atio_req_out - n/a */
317 0xff, /* io_base_addr - n/a */
318 0xff, /* nx_host_int - n/a */
319 0xff /* nx_risc_int - n/a */
320 };
321
322 /* 2300 register offsets */
323 static reg_off_t reg_off_2300 = {
324 0x00, /* flash_address */
325 0x02, /* flash_data */
326 0x06, /* ctrl_status */
327 0x08, /* ictrl */
328 0x0a, /* istatus */
329 0x0c, /* semaphore */
330 0x0e, /* nvram */
331 0x10, /* req_in */
332 0x12, /* req_out */
333 0x14, /* resp_in */
334 0x16, /* resp_out */
335 0x18, /* risc2host */
336 32, /* Number of mailboxes */
337
338 /* Mailbox in register offsets 0 - 31 */
339 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
340 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
341 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
342 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
343
344 /* Mailbox out register offsets 0 - 31 */
345 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
346 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
347 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
348 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
349
350 0x96, /* fpm_diag_config */
351 0xa4, /* pcr */
352 0xb0, /* mctr */
353 0x80, /* fb_cmd */
354 0xc0, /* hccr */
355 0xcc, /* gpiod */
356 0xce, /* gpioe */
357 0x1c, /* host_to_host_sema */
358 0xff, /* pri_req_in - n/a */
359 0xff, /* pri_req_out - n/a */
360 0xff, /* atio_req_in - n/a */
361 0xff, /* atio_req_out - n/a */
362 0xff, /* io_base_addr - n/a */
363 0xff, /* nx_host_int - n/a */
364 0xff /* nx_risc_int - n/a */
365 };
366
367 /* 2400/2500 register offsets */
368 reg_off_t reg_off_2400_2500 = {
369 0x00, /* flash_address */
370 0x04, /* flash_data */
371 0x08, /* ctrl_status */
372 0x0c, /* ictrl */
373 0x10, /* istatus */
374 0xff, /* semaphore - n/a */
375 0xff, /* nvram - n/a */
376 0x1c, /* req_in */
377 0x20, /* req_out */
378 0x24, /* resp_in */
379 0x28, /* resp_out */
380 0x44, /* risc2host */
381 32, /* Number of mailboxes */
382
383 /* Mailbox in register offsets 0 - 31 */
384 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
385 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
386 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
387 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
388
389 /* Mailbox out register offsets 0 - 31 */
390 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
391 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
392 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
393 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
394
395 0xff, /* fpm_diag_config - n/a */
396 0xff, /* pcr - n/a */
397 0xff, /* mctr - n/a */
398 0xff, /* fb_cmd - n/a */
399 0x48, /* hccr */
400 0x4c, /* gpiod */
401 0x50, /* gpioe */
402 0xff, /* host_to_host_sema - n/a */
403 0x2c, /* pri_req_in */
404 0x30, /* pri_req_out */
405 0x3c, /* atio_req_in */
406 0x40, /* atio_req_out */
407 0x54, /* io_base_addr */
408 0xff, /* nx_host_int - n/a */
409 0xff /* nx_risc_int - n/a */
410 };
411
412 /* P3 register offsets */
413 static reg_off_t reg_off_8021 = {
414 0x00, /* flash_address */
415 0x04, /* flash_data */
416 0x08, /* ctrl_status */
417 0x0c, /* ictrl */
418 0x10, /* istatus */
419 0xff, /* semaphore - n/a */
420 0xff, /* nvram - n/a */
421 0xff, /* req_in - n/a */
422 0x0, /* req_out */
423 0x100, /* resp_in */
424 0x200, /* resp_out */
425 0x500, /* risc2host */
426 32, /* Number of mailboxes */
427
428 /* Mailbox in register offsets 0 - 31 */
429 0x300, 0x302, 0x304, 0x306, 0x308, 0x30a, 0x30c, 0x30e,
430 0x310, 0x312, 0x314, 0x316, 0x318, 0x31a, 0x31c, 0x31e,
431 0x320, 0x322, 0x324, 0x326, 0x328, 0x32a, 0x32c, 0x32e,
432 0x330, 0x332, 0x334, 0x336, 0x338, 0x33a, 0x33c, 0x33e,
433
434 /* Mailbox out register offsets 0 - 31 */
435 0x400, 0x402, 0x404, 0x406, 0x408, 0x40a, 0x40c, 0x40e,
436 0x410, 0x412, 0x414, 0x416, 0x418, 0x41a, 0x41c, 0x41e,
437 0x420, 0x422, 0x424, 0x426, 0x428, 0x42a, 0x42c, 0x42e,
438 0x430, 0x432, 0x434, 0x436, 0x438, 0x43a, 0x43c, 0x43e,
439
440 0xff, /* fpm_diag_config - n/a */
441 0xff, /* pcr - n/a */
442 0xff, /* mctr - n/a */
443 0xff, /* fb_cmd - n/a */
444 0x48, /* hccr */
445 0x4c, /* gpiod */
446 0x50, /* gpioe */
447 0xff, /* host_to_host_sema - n/a */
448 0x2c, /* pri_req_in */
449 0x30, /* pri_req_out */
450 0x3c, /* atio_req_in */
451 0x40, /* atio_req_out */
452 0x54, /* io_base_addr */
453 0x380, /* nx_host_int */
454 0x504 /* nx_risc_int */
455 };
456
457 /* mutex for protecting variables shared by all instances of the driver */
458 kmutex_t ql_global_mutex;
459 kmutex_t ql_global_hw_mutex;
460 kmutex_t ql_global_el_mutex;
461
462 /* DMA access attribute structure. */
463 static ddi_device_acc_attr_t ql_dev_acc_attr = {
464 DDI_DEVICE_ATTR_V0,
465 DDI_STRUCTURE_LE_ACC,
466 DDI_STRICTORDER_ACC
467 };
468
469 /* I/O DMA attributes structures. */
470 static ddi_dma_attr_t ql_64bit_io_dma_attr = {
471 DMA_ATTR_V0, /* dma_attr_version */
472 QL_DMA_LOW_ADDRESS, /* low DMA address range */
473 QL_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */
474 QL_DMA_XFER_COUNTER, /* DMA counter register */
475 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */
476 QL_DMA_BURSTSIZES, /* DMA burstsizes */
477 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
478 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
479 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
480 QL_DMA_SG_LIST_LENGTH, /* s/g list length */
481 QL_DMA_GRANULARITY, /* granularity of device */
482 QL_DMA_XFER_FLAGS /* DMA transfer flags */
483 };
484
485 static ddi_dma_attr_t ql_32bit_io_dma_attr = {
486 DMA_ATTR_V0, /* dma_attr_version */
487 QL_DMA_LOW_ADDRESS, /* low DMA address range */
488 QL_DMA_HIGH_32BIT_ADDRESS, /* high DMA address range */
489 QL_DMA_XFER_COUNTER, /* DMA counter register */
490 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */
491 QL_DMA_BURSTSIZES, /* DMA burstsizes */
492 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
493 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
494 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
495 QL_DMA_SG_LIST_LENGTH, /* s/g list length */
496 QL_DMA_GRANULARITY, /* granularity of device */
497 QL_DMA_XFER_FLAGS /* DMA transfer flags */
498 };
499
500 /* Load the default dma attributes */
501 static ddi_dma_attr_t ql_32fcsm_cmd_dma_attr;
502 static ddi_dma_attr_t ql_64fcsm_cmd_dma_attr;
503 static ddi_dma_attr_t ql_32fcsm_rsp_dma_attr;
504 static ddi_dma_attr_t ql_64fcsm_rsp_dma_attr;
505 static ddi_dma_attr_t ql_32fcip_cmd_dma_attr;
506 static ddi_dma_attr_t ql_64fcip_cmd_dma_attr;
507 static ddi_dma_attr_t ql_32fcip_rsp_dma_attr;
508 static ddi_dma_attr_t ql_64fcip_rsp_dma_attr;
509 static ddi_dma_attr_t ql_32fcp_cmd_dma_attr;
510 static ddi_dma_attr_t ql_64fcp_cmd_dma_attr;
511 static ddi_dma_attr_t ql_32fcp_rsp_dma_attr;
512 static ddi_dma_attr_t ql_64fcp_rsp_dma_attr;
513 static ddi_dma_attr_t ql_32fcp_data_dma_attr;
514 static ddi_dma_attr_t ql_64fcp_data_dma_attr;
515
516 /* Static declarations of cb_ops entry point functions... */
517 static struct cb_ops ql_cb_ops = {
518 ql_open, /* b/c open */
519 ql_close, /* b/c close */
520 nodev, /* b strategy */
521 nodev, /* b print */
522 nodev, /* b dump */
523 nodev, /* c read */
524 nodev, /* c write */
525 ql_ioctl, /* c ioctl */
526 nodev, /* c devmap */
527 nodev, /* c mmap */
528 nodev, /* c segmap */
529 nochpoll, /* c poll */
530 nodev, /* cb_prop_op */
531 NULL, /* streamtab */
532 D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flag */
533 CB_REV, /* cb_ops revision */
534 nodev, /* c aread */
535 nodev /* c awrite */
536 };
537
538 /* Static declarations of dev_ops entry point functions... */
539 static struct dev_ops ql_devops = {
540 DEVO_REV, /* devo_rev */
541 0, /* refcnt */
542 ql_getinfo, /* getinfo */
543 nulldev, /* identify */
544 nulldev, /* probe */
545 ql_attach, /* attach */
546 ql_detach, /* detach */
547 nodev, /* reset */
548 &ql_cb_ops, /* char/block ops */
549 NULL, /* bus operations */
550 ql_power, /* power management */
551 ql_quiesce /* quiesce device */
552 };
553
554 /* ELS command code to text converter */
555 cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE();
556 /* Mailbox command code to text converter */
557 cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE();
558
559 char qlc_driver_version[] = QL_VERSION;
560
561 /*
562 * Loadable Driver Interface Structures.
563 * Declare and initialize the module configuration section...
564 */
565 static struct modldrv modldrv = {
566 &mod_driverops, /* type of module: driver */
567 "SunFC Qlogic FCA v" QL_VERSION, /* name of module */
568 &ql_devops /* driver dev_ops */
569 };
570
571 static struct modlinkage modlinkage = {
572 MODREV_1,
573 &modldrv,
574 NULL
575 };
576
577 /* ************************************************************************ */
578 /* Loadable Module Routines. */
579 /* ************************************************************************ */
580
581 /*
582 * _init
583 * Initializes a loadable module. It is called before any other
584 * routine in a loadable module.
585 *
586 * Returns:
587 * 0 = success
588 *
589 * Context:
590 * Kernel context.
591 */
592 int
593 _init(void)
594 {
595 uint16_t w16;
596 int rval = 0;
597
598 /* Get OS major release level. */
599 for (w16 = 0; w16 < sizeof (utsname.release); w16++) {
600 if (utsname.release[w16] == '.') {
601 w16++;
602 break;
603 }
604 }
605 if (w16 < sizeof (utsname.release)) {
606 (void) ql_bstr_to_dec(&utsname.release[w16],
607 &ql_os_release_level, 0);
608 } else {
609 ql_os_release_level = 0;
610 }
611 if (ql_os_release_level < 6) {
612 cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
613 QL_NAME, ql_os_release_level);
614 rval = EINVAL;
615 }
616 if (ql_os_release_level == 6) {
617 ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
618 ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
619 }
620
621 if (rval == 0) {
622 rval = ddi_soft_state_init(&ql_state,
623 sizeof (ql_adapter_state_t), 0);
624 }
625 if (rval == 0) {
626 /* allow the FC Transport to tweak the dev_ops */
627 fc_fca_init(&ql_devops);
628
629 mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
630 mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
631 mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
632 rval = mod_install(&modlinkage);
633 if (rval != 0) {
634 mutex_destroy(&ql_global_hw_mutex);
635 mutex_destroy(&ql_global_mutex);
636 mutex_destroy(&ql_global_el_mutex);
637 ddi_soft_state_fini(&ql_state);
638 } else {
639 /*EMPTY*/
640 ql_32fcsm_cmd_dma_attr = ql_32bit_io_dma_attr;
641 ql_64fcsm_cmd_dma_attr = ql_64bit_io_dma_attr;
642 ql_32fcsm_rsp_dma_attr = ql_32bit_io_dma_attr;
643 ql_64fcsm_rsp_dma_attr = ql_64bit_io_dma_attr;
644 ql_32fcip_cmd_dma_attr = ql_32bit_io_dma_attr;
645 ql_64fcip_cmd_dma_attr = ql_64bit_io_dma_attr;
646 ql_32fcip_rsp_dma_attr = ql_32bit_io_dma_attr;
647 ql_64fcip_rsp_dma_attr = ql_64bit_io_dma_attr;
648 ql_32fcp_cmd_dma_attr = ql_32bit_io_dma_attr;
649 ql_64fcp_cmd_dma_attr = ql_64bit_io_dma_attr;
650 ql_32fcp_rsp_dma_attr = ql_32bit_io_dma_attr;
651 ql_64fcp_rsp_dma_attr = ql_64bit_io_dma_attr;
652 ql_32fcp_data_dma_attr = ql_32bit_io_dma_attr;
653 ql_64fcp_data_dma_attr = ql_64bit_io_dma_attr;
654 ql_32fcsm_cmd_dma_attr.dma_attr_sgllen =
655 ql_64fcsm_cmd_dma_attr.dma_attr_sgllen =
656 QL_FCSM_CMD_SGLLEN;
657 ql_32fcsm_rsp_dma_attr.dma_attr_sgllen =
658 ql_64fcsm_rsp_dma_attr.dma_attr_sgllen =
659 QL_FCSM_RSP_SGLLEN;
660 ql_32fcip_cmd_dma_attr.dma_attr_sgllen =
661 ql_64fcip_cmd_dma_attr.dma_attr_sgllen =
662 QL_FCIP_CMD_SGLLEN;
663 ql_32fcip_rsp_dma_attr.dma_attr_sgllen =
664 ql_64fcip_rsp_dma_attr.dma_attr_sgllen =
665 QL_FCIP_RSP_SGLLEN;
666 ql_32fcp_cmd_dma_attr.dma_attr_sgllen =
667 ql_64fcp_cmd_dma_attr.dma_attr_sgllen =
668 QL_FCP_CMD_SGLLEN;
669 ql_32fcp_rsp_dma_attr.dma_attr_sgllen =
670 ql_64fcp_rsp_dma_attr.dma_attr_sgllen =
671 QL_FCP_RSP_SGLLEN;
672 }
673 }
674
675 if (rval != 0) {
676 cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
677 QL_NAME);
678 }
679
680 return (rval);
681 }
682
683 /*
684 * _fini
685 * Prepares a module for unloading. It is called when the system
686 * wants to unload a module. If the module determines that it can
687 * be unloaded, then _fini() returns the value returned by
688 * mod_remove(). Upon successful return from _fini() no other
689 * routine in the module will be called before _init() is called.
690 *
691 * Returns:
692 * 0 = success
693 *
694 * Context:
695 * Kernel context.
696 */
697 int
698 _fini(void)
699 {
700 int rval;
701
702 rval = mod_remove(&modlinkage);
703 if (rval == 0) {
704 mutex_destroy(&ql_global_hw_mutex);
705 mutex_destroy(&ql_global_mutex);
706 mutex_destroy(&ql_global_el_mutex);
707 ddi_soft_state_fini(&ql_state);
708 }
709
710 return (rval);
711 }
712
713 /*
714 * _info
715 * Returns information about loadable module.
716 *
717 * Input:
718 * modinfo = pointer to module information structure.
719 *
720 * Returns:
721 * Value returned by mod_info().
722 *
723 * Context:
724 * Kernel context.
725 */
726 int
727 _info(struct modinfo *modinfop)
728 {
729 return (mod_info(&modlinkage, modinfop));
730 }
731
732 /* ************************************************************************ */
733 /* dev_ops functions */
734 /* ************************************************************************ */
735
736 /*
737 * ql_getinfo
738 * Returns the pointer associated with arg when cmd is
739 * set to DDI_INFO_DEVT2DEVINFO, or it should return the
740 * instance number associated with arg when cmd is set
741 * to DDI_INFO_DEV2INSTANCE.
742 *
743 * Input:
744 * dip = Do not use.
745 * cmd = command argument.
746 * arg = command specific argument.
747 * resultp = pointer to where request information is stored.
748 *
749 * Returns:
750 * DDI_SUCCESS or DDI_FAILURE.
751 *
752 * Context:
753 * Kernel context.
754 */
755 /* ARGSUSED */
756 static int
757 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
758 {
759 ql_adapter_state_t *ha;
760 int minor;
761 int rval = DDI_FAILURE;
762
763 minor = (int)(getminor((dev_t)arg));
764 ha = ddi_get_soft_state(ql_state, minor);
765 if (ha == NULL) {
766 QL_PRINT_2(CE_CONT, "failed, unknown minor=%d\n",
767 getminor((dev_t)arg));
768 *resultp = NULL;
769 return (rval);
770 }
771
772 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
773
774 switch (cmd) {
775 case DDI_INFO_DEVT2DEVINFO:
776 *resultp = ha->dip;
777 rval = DDI_SUCCESS;
778 break;
779 case DDI_INFO_DEVT2INSTANCE:
780 *resultp = (void *)(uintptr_t)(ha->instance);
781 rval = DDI_SUCCESS;
782 break;
783 default:
784 EL(ha, "failed, unsupported cmd=%d\n", cmd);
785 rval = DDI_FAILURE;
786 break;
787 }
788
789 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
790
791 return (rval);
792 }
793
794 /*
795 * ql_attach
796 * Configure and attach an instance of the driver
797 * for a port.
798 *
799 * Input:
800 * dip = pointer to device information structure.
801 * cmd = attach type.
802 *
803 * Returns:
804 * DDI_SUCCESS or DDI_FAILURE.
805 *
806 * Context:
807 * Kernel context.
808 */
809 static int
810 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
811 {
812 off_t regsize;
813 uint32_t size;
814 int rval, *ptr;
815 int instance;
816 uint_t progress = 0;
817 char *buf;
818 ushort_t caps_ptr, cap;
819 fc_fca_tran_t *tran;
820 ql_adapter_state_t *ha = NULL;
821
822 static char *pmcomps[] = {
823 NULL,
824 PM_LEVEL_D3_STR, /* Device OFF */
825 PM_LEVEL_D0_STR, /* Device ON */
826 };
827
828 QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n",
829 ddi_get_instance(dip), cmd);
830
831 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
832
833 switch (cmd) {
834 case DDI_ATTACH:
835 /* first get the instance */
836 instance = ddi_get_instance(dip);
837
838 cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
839 QL_NAME, instance, QL_VERSION);
840
841 /* Correct OS version? */
842 if (ql_os_release_level != 11) {
843 cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
844 "11", QL_NAME, instance);
845 goto attach_failed;
846 }
847
848 /* Hardware is installed in a DMA-capable slot? */
849 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
850 cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
851 instance);
852 goto attach_failed;
853 }
854
855 /* No support for high-level interrupts */
856 if (ddi_intr_hilevel(dip, 0) != 0) {
857 cmn_err(CE_WARN, "%s(%d): High level interrupt"
858 " not supported", QL_NAME, instance);
859 goto attach_failed;
860 }
861
862 /* Allocate our per-device-instance structure */
863 if (ddi_soft_state_zalloc(ql_state,
864 instance) != DDI_SUCCESS) {
865 cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
866 QL_NAME, instance);
867 goto attach_failed;
868 }
869 progress |= QL_SOFT_STATE_ALLOCED;
870
871 ha = ddi_get_soft_state(ql_state, instance);
872 if (ha == NULL) {
873 cmn_err(CE_WARN, "%s(%d): can't get soft state",
874 QL_NAME, instance);
875 goto attach_failed;
876 }
877 ha->dip = dip;
878 ha->instance = instance;
879 ha->hba.base_address = ha;
880 ha->pha = ha;
881
882 if (ql_el_trace_desc_ctor(ha) != DDI_SUCCESS) {
883 cmn_err(CE_WARN, "%s(%d): can't setup el tracing",
884 QL_NAME, instance);
885 goto attach_failed;
886 }
887
888 /* Get extended logging and dump flags. */
889 ql_common_properties(ha);
890
891 if (strcmp(ddi_driver_name(ddi_get_parent(dip)),
892 "sbus") == 0) {
893 EL(ha, "%s SBUS card detected", QL_NAME);
894 ha->cfg_flags |= CFG_SBUS_CARD;
895 }
896
897 ha->dev = kmem_zalloc(sizeof (*ha->dev) *
898 DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
899
900 ha->outstanding_cmds = kmem_zalloc(
901 sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS,
902 KM_SLEEP);
903
904 ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
905 QL_UB_LIMIT, KM_SLEEP);
906
907 ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
908 KM_SLEEP);
909
910 (void) ddi_pathname(dip, buf);
911 ha->devpath = kmem_zalloc(strlen(buf)+1, KM_SLEEP);
912 if (ha->devpath == NULL) {
913 EL(ha, "devpath mem alloc failed\n");
914 } else {
915 (void) strcpy(ha->devpath, buf);
916 EL(ha, "devpath is: %s\n", ha->devpath);
917 }
918
919 if (CFG_IST(ha, CFG_SBUS_CARD)) {
920 /*
921 * For cards where PCI is mapped to sbus e.g. Ivory.
922 *
923 * 0x00 : 0x000 - 0x0FF PCI Config Space for 2200
924 * : 0x100 - 0x3FF PCI IO space for 2200
925 * 0x01 : 0x000 - 0x0FF PCI Config Space for fpga
926 * : 0x100 - 0x3FF PCI IO Space for fpga
927 */
928 if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
929 0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle) !=
930 DDI_SUCCESS) {
931 cmn_err(CE_WARN, "%s(%d): Unable to map device"
932 " registers", QL_NAME, instance);
933 goto attach_failed;
934 }
935 if (ddi_regs_map_setup(dip, 1,
936 (caddr_t *)&ha->sbus_fpga_iobase, 0, 0x400,
937 &ql_dev_acc_attr, &ha->sbus_fpga_dev_handle) !=
938 DDI_SUCCESS) {
939 /* We should not fail attach here */
940 cmn_err(CE_WARN, "%s(%d): Unable to map FPGA",
941 QL_NAME, instance);
942 ha->sbus_fpga_iobase = NULL;
943 }
944 progress |= QL_REGS_MAPPED;
945
946 /*
947 * We should map config space before adding interrupt
948 * So that the chip type (2200 or 2300) can be
949 * determined before the interrupt routine gets a
950 * chance to execute.
951 */
952 if (ddi_regs_map_setup(dip, 0,
953 (caddr_t *)&ha->sbus_config_base, 0, 0x100,
954 &ql_dev_acc_attr, &ha->sbus_config_handle) !=
955 DDI_SUCCESS) {
956 cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
957 "config registers", QL_NAME, instance);
958 goto attach_failed;
959 }
960 progress |= QL_CONFIG_SPACE_SETUP;
961 } else {
962 /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
963 rval = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
964 DDI_PROP_DONTPASS, "reg", &ptr, &size);
965 if (rval != DDI_PROP_SUCCESS) {
966 cmn_err(CE_WARN, "%s(%d): Unable to get PCI "
967 "address registers", QL_NAME, instance);
968 goto attach_failed;
969 } else {
970 ha->pci_bus_addr = ptr[0];
971 ha->function_number = (uint8_t)
972 (ha->pci_bus_addr >> 8 & 7);
973 ddi_prop_free(ptr);
974 }
975
976 /*
977 * We should map config space before adding interrupt
978 * So that the chip type (2200 or 2300) can be
979 * determined before the interrupt routine gets a
980 * chance to execute.
981 */
982 if (pci_config_setup(ha->dip, &ha->pci_handle) !=
983 DDI_SUCCESS) {
984 cmn_err(CE_WARN, "%s(%d): can't setup PCI "
985 "config space", QL_NAME, instance);
986 goto attach_failed;
987 }
988 progress |= QL_CONFIG_SPACE_SETUP;
989
990 /*
991 * Setup the ISP2200 registers address mapping to be
992 * accessed by this particular driver.
993 * 0x0 Configuration Space
994 * 0x1 I/O Space
995 * 0x2 32-bit Memory Space address
996 * 0x3 64-bit Memory Space address
997 */
998 size = ql_pci_config_get32(ha, PCI_CONF_BASE0) & BIT_0 ?
999 2 : 1;
1000 if (ddi_dev_regsize(dip, size, ®size) !=
1001 DDI_SUCCESS ||
1002 ddi_regs_map_setup(dip, size, &ha->iobase,
1003 0, regsize, &ql_dev_acc_attr, &ha->dev_handle) !=
1004 DDI_SUCCESS) {
1005 cmn_err(CE_WARN, "%s(%d): regs_map_setup(mem) "
1006 "failed", QL_NAME, instance);
1007 goto attach_failed;
1008 }
1009 progress |= QL_REGS_MAPPED;
1010
1011 /*
1012 * We need I/O space mappings for 23xx HBAs for
1013 * loading flash (FCode). The chip has a bug due to
1014 * which loading flash fails through mem space
1015 * mappings in PCI-X mode.
1016 */
1017 if (size == 1) {
1018 ha->iomap_iobase = ha->iobase;
1019 ha->iomap_dev_handle = ha->dev_handle;
1020 } else {
1021 if (ddi_dev_regsize(dip, 1, ®size) !=
1022 DDI_SUCCESS ||
1023 ddi_regs_map_setup(dip, 1,
1024 &ha->iomap_iobase, 0, regsize,
1025 &ql_dev_acc_attr, &ha->iomap_dev_handle) !=
1026 DDI_SUCCESS) {
1027 cmn_err(CE_WARN, "%s(%d): regs_map_"
1028 "setup(I/O) failed", QL_NAME,
1029 instance);
1030 goto attach_failed;
1031 }
1032 progress |= QL_IOMAP_IOBASE_MAPPED;
1033 }
1034 }
1035
1036 ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
1037 PCI_CONF_SUBSYSID);
1038 ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
1039 PCI_CONF_SUBVENID);
1040 ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
1041 PCI_CONF_VENID);
1042 ha->device_id = (uint16_t)ql_pci_config_get16(ha,
1043 PCI_CONF_DEVID);
1044 ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
1045 PCI_CONF_REVID);
1046
1047 EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
1048 "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
1049 ha->subven_id, ha->subsys_id);
1050
1051 switch (ha->device_id) {
1052 case 0x2300:
1053 case 0x2312:
1054 case 0x2322:
1055 case 0x6312:
1056 case 0x6322:
1057 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1058 ha->flags |= FUNCTION_1;
1059 }
1060 if ((ha->device_id == 0x6322) ||
1061 (ha->device_id == 0x2322)) {
1062 ha->cfg_flags |= CFG_CTRL_6322;
1063 ha->fw_class = 0x6322;
1064 ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
1065 } else {
1066 ha->cfg_flags |= CFG_CTRL_2300;
1067 ha->fw_class = 0x2300;
1068 ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
1069 }
1070 ha->reg_off = ®_off_2300;
1071 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1072 goto attach_failed;
1073 }
1074 ha->fcp_cmd = ql_command_iocb;
1075 ha->ip_cmd = ql_ip_iocb;
1076 ha->ms_cmd = ql_ms_iocb;
1077 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1078 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1079 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1080 } else {
1081 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1082 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1083 }
1084 break;
1085
1086 case 0x2200:
1087 ha->cfg_flags |= CFG_CTRL_2200;
1088 ha->reg_off = ®_off_2200;
1089 ha->fw_class = 0x2200;
1090 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1091 goto attach_failed;
1092 }
1093 ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
1094 ha->fcp_cmd = ql_command_iocb;
1095 ha->ip_cmd = ql_ip_iocb;
1096 ha->ms_cmd = ql_ms_iocb;
1097 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1098 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1099 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1100 } else {
1101 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1102 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1103 }
1104 break;
1105
1106 case 0x2422:
1107 case 0x2432:
1108 case 0x5422:
1109 case 0x5432:
1110 case 0x8432:
1111 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1112 ha->flags |= FUNCTION_1;
1113 }
1114 ha->cfg_flags |= CFG_CTRL_2422;
1115 if (ha->device_id == 0x8432) {
1116 ha->cfg_flags |= CFG_CTRL_MENLO;
1117 } else {
1118 ha->flags |= VP_ENABLED;
1119 }
1120
1121 ha->reg_off = ®_off_2400_2500;
1122 ha->fw_class = 0x2400;
1123 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1124 goto attach_failed;
1125 }
1126 ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
1127 ha->fcp_cmd = ql_command_24xx_iocb;
1128 ha->ip_cmd = ql_ip_24xx_iocb;
1129 ha->ms_cmd = ql_ms_24xx_iocb;
1130 ha->els_cmd = ql_els_24xx_iocb;
1131 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1132 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1133 break;
1134
1135 case 0x2522:
1136 case 0x2532:
1137 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1138 ha->flags |= FUNCTION_1;
1139 }
1140 ha->cfg_flags |= CFG_CTRL_25XX;
1141 ha->flags |= VP_ENABLED;
1142 ha->fw_class = 0x2500;
1143 ha->reg_off = ®_off_2400_2500;
1144 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1145 goto attach_failed;
1146 }
1147 ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1148 ha->fcp_cmd = ql_command_24xx_iocb;
1149 ha->ip_cmd = ql_ip_24xx_iocb;
1150 ha->ms_cmd = ql_ms_24xx_iocb;
1151 ha->els_cmd = ql_els_24xx_iocb;
1152 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1153 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1154 break;
1155
1156 case 0x8001:
1157 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 4) {
1158 ha->flags |= FUNCTION_1;
1159 }
1160 ha->cfg_flags |= CFG_CTRL_81XX;
1161 ha->flags |= VP_ENABLED;
1162 ha->fw_class = 0x8100;
1163 ha->reg_off = ®_off_2400_2500;
1164 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1165 goto attach_failed;
1166 }
1167 ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1168 ha->fcp_cmd = ql_command_24xx_iocb;
1169 ha->ip_cmd = ql_ip_24xx_iocb;
1170 ha->ms_cmd = ql_ms_24xx_iocb;
1171 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1172 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1173 break;
1174
1175 case 0x8021:
1176 if (ha->function_number & BIT_0) {
1177 ha->flags |= FUNCTION_1;
1178 }
1179 ha->cfg_flags |= CFG_CTRL_8021;
1180 ha->reg_off = ®_off_8021;
1181 ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1182 ha->fcp_cmd = ql_command_24xx_iocb;
1183 ha->ms_cmd = ql_ms_24xx_iocb;
1184 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1185 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1186
1187 ha->nx_pcibase = ha->iobase;
1188 ha->iobase += 0xBC000 + (ha->function_number << 11);
1189 ha->iomap_iobase += 0xBC000 +
1190 (ha->function_number << 11);
1191
1192 /* map doorbell */
1193 if (ddi_dev_regsize(dip, 2, ®size) != DDI_SUCCESS ||
1194 ddi_regs_map_setup(dip, 2, &ha->db_iobase,
1195 0, regsize, &ql_dev_acc_attr, &ha->db_dev_handle) !=
1196 DDI_SUCCESS) {
1197 cmn_err(CE_WARN, "%s(%d): regs_map_setup"
1198 "(doorbell) failed", QL_NAME, instance);
1199 goto attach_failed;
1200 }
1201 progress |= QL_DB_IOBASE_MAPPED;
1202
1203 ha->nx_req_in = (uint32_t *)(ha->db_iobase +
1204 (ha->function_number << 12));
1205 ha->db_read = ha->nx_pcibase + (512 * 1024) +
1206 (ha->function_number * 8);
1207
1208 ql_8021_update_crb_int_ptr(ha);
1209 ql_8021_set_drv_active(ha);
1210 break;
1211
1212 default:
1213 cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1214 QL_NAME, instance, ha->device_id);
1215 goto attach_failed;
1216 }
1217
1218 /* Setup hba buffer. */
1219
1220 size = CFG_IST(ha, CFG_CTRL_24258081) ?
1221 (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE) :
1222 (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE +
1223 RCVBUF_QUEUE_SIZE);
1224
1225 if (ql_get_dma_mem(ha, &ha->hba_buf, size, LITTLE_ENDIAN_DMA,
1226 QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1227 cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1228 "alloc failed", QL_NAME, instance);
1229 goto attach_failed;
1230 }
1231 progress |= QL_HBA_BUFFER_SETUP;
1232
1233 /* Setup buffer pointers. */
1234 ha->request_dvma = ha->hba_buf.cookie.dmac_laddress +
1235 REQUEST_Q_BUFFER_OFFSET;
1236 ha->request_ring_bp = (struct cmd_entry *)
1237 ((caddr_t)ha->hba_buf.bp + REQUEST_Q_BUFFER_OFFSET);
1238
1239 ha->response_dvma = ha->hba_buf.cookie.dmac_laddress +
1240 RESPONSE_Q_BUFFER_OFFSET;
1241 ha->response_ring_bp = (struct sts_entry *)
1242 ((caddr_t)ha->hba_buf.bp + RESPONSE_Q_BUFFER_OFFSET);
1243
1244 ha->rcvbuf_dvma = ha->hba_buf.cookie.dmac_laddress +
1245 RCVBUF_Q_BUFFER_OFFSET;
1246 ha->rcvbuf_ring_bp = (struct rcvbuf *)
1247 ((caddr_t)ha->hba_buf.bp + RCVBUF_Q_BUFFER_OFFSET);
1248
1249 /* Allocate resource for QLogic IOCTL */
1250 (void) ql_alloc_xioctl_resource(ha);
1251
1252 /* Setup interrupts */
1253 if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1254 cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1255 "rval=%xh", QL_NAME, instance, rval);
1256 goto attach_failed;
1257 }
1258
1259 progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1260
1261 if (ql_nvram_cache_desc_ctor(ha) != DDI_SUCCESS) {
1262 cmn_err(CE_WARN, "%s(%d): can't setup nvram cache",
1263 QL_NAME, instance);
1264 goto attach_failed;
1265 }
1266
1267 /*
1268 * Allocate an N Port information structure
1269 * for use when in P2P topology.
1270 */
1271 ha->n_port = (ql_n_port_info_t *)
1272 kmem_zalloc(sizeof (ql_n_port_info_t), KM_SLEEP);
1273 if (ha->n_port == NULL) {
1274 cmn_err(CE_WARN, "%s(%d): Failed to create N Port info",
1275 QL_NAME, instance);
1276 goto attach_failed;
1277 }
1278
1279 progress |= QL_N_PORT_INFO_CREATED;
1280
1281 /*
1282 * Determine support for Power Management
1283 */
1284 caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1285
1286 while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1287 cap = (uint8_t)ql_pci_config_get8(ha, caps_ptr);
1288 if (cap == PCI_CAP_ID_PM) {
1289 ha->pm_capable = 1;
1290 break;
1291 }
1292 caps_ptr = (uint8_t)ql_pci_config_get8(ha, caps_ptr +
1293 PCI_CAP_NEXT_PTR);
1294 }
1295
1296 if (ha->pm_capable) {
1297 /*
1298 * Enable PM for 2200 based HBAs only.
1299 */
1300 if (ha->device_id != 0x2200) {
1301 ha->pm_capable = 0;
1302 }
1303 }
1304
1305 if (ha->pm_capable) {
1306 ha->pm_capable = ql_enable_pm;
1307 }
1308
1309 if (ha->pm_capable) {
1310 /*
1311 * Initialize power management bookkeeping;
1312 * components are created idle.
1313 */
1314 (void) sprintf(buf, "NAME=%s(%d)", QL_NAME, instance);
1315 pmcomps[0] = buf;
1316
1317 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1318 if (ddi_prop_update_string_array(DDI_DEV_T_NONE,
1319 dip, "pm-components", pmcomps,
1320 sizeof (pmcomps) / sizeof (pmcomps[0])) !=
1321 DDI_PROP_SUCCESS) {
1322 cmn_err(CE_WARN, "%s(%d): failed to create"
1323 " pm-components property", QL_NAME,
1324 instance);
1325
1326 /* Initialize adapter. */
1327 ha->power_level = PM_LEVEL_D0;
1328 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1329 cmn_err(CE_WARN, "%s(%d): failed to"
1330 " initialize adapter", QL_NAME,
1331 instance);
1332 goto attach_failed;
1333 }
1334 } else {
1335 ha->power_level = PM_LEVEL_D3;
1336 if (pm_raise_power(dip, QL_POWER_COMPONENT,
1337 PM_LEVEL_D0) != DDI_SUCCESS) {
1338 cmn_err(CE_WARN, "%s(%d): failed to"
1339 " raise power or initialize"
1340 " adapter", QL_NAME, instance);
1341 }
1342 }
1343 } else {
1344 /* Initialize adapter. */
1345 ha->power_level = PM_LEVEL_D0;
1346 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1347 cmn_err(CE_WARN, "%s(%d): failed to initialize"
1348 " adapter", QL_NAME, instance);
1349 }
1350 }
1351
1352 if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1353 ha->fw_subminor_version == 0) {
1354 cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1355 QL_NAME, ha->instance);
1356 } else {
1357 int rval;
1358 char ver_fmt[256];
1359
1360 rval = (int)snprintf(ver_fmt, (size_t)sizeof (ver_fmt),
1361 "Firmware version %d.%d.%d", ha->fw_major_version,
1362 ha->fw_minor_version, ha->fw_subminor_version);
1363
1364 if (CFG_IST(ha, CFG_CTRL_81XX)) {
1365 rval = (int)snprintf(ver_fmt + rval,
1366 (size_t)sizeof (ver_fmt),
1367 ", MPI fw version %d.%d.%d",
1368 ha->mpi_fw_major_version,
1369 ha->mpi_fw_minor_version,
1370 ha->mpi_fw_subminor_version);
1371
1372 if (ha->subsys_id == 0x17B ||
1373 ha->subsys_id == 0x17D) {
1374 (void) snprintf(ver_fmt + rval,
1375 (size_t)sizeof (ver_fmt),
1376 ", PHY fw version %d.%d.%d",
1377 ha->phy_fw_major_version,
1378 ha->phy_fw_minor_version,
1379 ha->phy_fw_subminor_version);
1380 }
1381 }
1382 cmn_err(CE_NOTE, "!%s(%d): %s",
1383 QL_NAME, ha->instance, ver_fmt);
1384 }
1385
1386 ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1387 "controller", KSTAT_TYPE_RAW,
1388 (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1389 if (ha->k_stats == NULL) {
1390 cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1391 QL_NAME, instance);
1392 goto attach_failed;
1393 }
1394 progress |= QL_KSTAT_CREATED;
1395
1396 ha->adapter_stats->version = 1;
1397 ha->k_stats->ks_data = (void *)ha->adapter_stats;
1398 ha->k_stats->ks_private = ha;
1399 ha->k_stats->ks_update = ql_kstat_update;
1400 ha->k_stats->ks_ndata = 1;
1401 ha->k_stats->ks_data_size = sizeof (ql_adapter_stat_t);
1402 kstat_install(ha->k_stats);
1403
1404 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
1405 instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1406 cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1407 QL_NAME, instance);
1408 goto attach_failed;
1409 }
1410 progress |= QL_MINOR_NODE_CREATED;
1411
1412 /* Allocate a transport structure for this instance */
1413 tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1414 if (tran == NULL) {
1415 cmn_err(CE_WARN, "%s(%d): failed to allocate transport",
1416 QL_NAME, instance);
1417 goto attach_failed;
1418 }
1419
1420 progress |= QL_FCA_TRAN_ALLOCED;
1421
1422 /* fill in the structure */
1423 tran->fca_numports = 1;
1424 tran->fca_version = FCTL_FCA_MODREV_5;
1425 if (CFG_IST(ha, CFG_CTRL_2422)) {
1426 tran->fca_num_npivports = MAX_24_VIRTUAL_PORTS;
1427 } else if (CFG_IST(ha, CFG_CTRL_2581)) {
1428 tran->fca_num_npivports = MAX_25_VIRTUAL_PORTS;
1429 }
1430 bcopy(ha->loginparams.node_ww_name.raw_wwn,
1431 tran->fca_perm_pwwn.raw_wwn, 8);
1432
1433 EL(ha, "FCA version %d\n", tran->fca_version);
1434
1435 /* Specify the amount of space needed in each packet */
1436 tran->fca_pkt_size = sizeof (ql_srb_t);
1437
1438 /* command limits are usually dictated by hardware */
1439 tran->fca_cmd_max = MAX_OUTSTANDING_COMMANDS;
1440
1441 /* dmaattr are static, set elsewhere. */
1442 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1443 tran->fca_dma_attr = &ql_64bit_io_dma_attr;
1444 tran->fca_dma_fcp_cmd_attr = &ql_64fcp_cmd_dma_attr;
1445 tran->fca_dma_fcp_rsp_attr = &ql_64fcp_rsp_dma_attr;
1446 tran->fca_dma_fcp_data_attr = &ql_64fcp_data_dma_attr;
1447 tran->fca_dma_fcsm_cmd_attr = &ql_64fcsm_cmd_dma_attr;
1448 tran->fca_dma_fcsm_rsp_attr = &ql_64fcsm_rsp_dma_attr;
1449 tran->fca_dma_fcip_cmd_attr = &ql_64fcip_cmd_dma_attr;
1450 tran->fca_dma_fcip_rsp_attr = &ql_64fcip_rsp_dma_attr;
1451 } else {
1452 tran->fca_dma_attr = &ql_32bit_io_dma_attr;
1453 tran->fca_dma_fcp_cmd_attr = &ql_32fcp_cmd_dma_attr;
1454 tran->fca_dma_fcp_rsp_attr = &ql_32fcp_rsp_dma_attr;
1455 tran->fca_dma_fcp_data_attr = &ql_32fcp_data_dma_attr;
1456 tran->fca_dma_fcsm_cmd_attr = &ql_32fcsm_cmd_dma_attr;
1457 tran->fca_dma_fcsm_rsp_attr = &ql_32fcsm_rsp_dma_attr;
1458 tran->fca_dma_fcip_cmd_attr = &ql_32fcip_cmd_dma_attr;
1459 tran->fca_dma_fcip_rsp_attr = &ql_32fcip_rsp_dma_attr;
1460 }
1461
1462 tran->fca_acc_attr = &ql_dev_acc_attr;
1463 tran->fca_iblock = &(ha->iblock_cookie);
1464
1465 /* the remaining values are simply function vectors */
1466 tran->fca_bind_port = ql_bind_port;
1467 tran->fca_unbind_port = ql_unbind_port;
1468 tran->fca_init_pkt = ql_init_pkt;
1469 tran->fca_un_init_pkt = ql_un_init_pkt;
1470 tran->fca_els_send = ql_els_send;
1471 tran->fca_get_cap = ql_get_cap;
1472 tran->fca_set_cap = ql_set_cap;
1473 tran->fca_getmap = ql_getmap;
1474 tran->fca_transport = ql_transport;
1475 tran->fca_ub_alloc = ql_ub_alloc;
1476 tran->fca_ub_free = ql_ub_free;
1477 tran->fca_ub_release = ql_ub_release;
1478 tran->fca_abort = ql_abort;
1479 tran->fca_reset = ql_reset;
1480 tran->fca_port_manage = ql_port_manage;
1481 tran->fca_get_device = ql_get_device;
1482
1483 /* give it to the FC transport */
1484 if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1485 cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1486 instance);
1487 goto attach_failed;
1488 }
1489 progress |= QL_FCA_ATTACH_DONE;
1490
1491 /* Stash the structure so it can be freed at detach */
1492 ha->tran = tran;
1493
1494 /* Acquire global state lock. */
1495 GLOBAL_STATE_LOCK();
1496
1497 /* Add adapter structure to link list. */
1498 ql_add_link_b(&ql_hba, &ha->hba);
1499
1500 /* Start one second driver timer. */
1501 if (ql_timer_timeout_id == NULL) {
1502 ql_timer_ticks = drv_usectohz(1000000);
1503 ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1504 ql_timer_ticks);
1505 }
1506
1507 /* Release global state lock. */
1508 GLOBAL_STATE_UNLOCK();
1509
1510 /* Determine and populate HBA fru info */
1511 ql_setup_fruinfo(ha);
1512
1513 /* Setup task_daemon thread. */
1514 (void) thread_create(NULL, 0, (void (*)())ql_task_daemon, ha,
1515 0, &p0, TS_RUN, minclsyspri);
1516
1517 progress |= QL_TASK_DAEMON_STARTED;
1518
1519 ddi_report_dev(dip);
1520
1521 /* Disable link reset in panic path */
1522 ha->lip_on_panic = 1;
1523
1524 rval = DDI_SUCCESS;
1525 break;
1526
1527 attach_failed:
1528 if (progress & QL_FCA_ATTACH_DONE) {
1529 (void) fc_fca_detach(dip);
1530 progress &= ~QL_FCA_ATTACH_DONE;
1531 }
1532
1533 if (progress & QL_FCA_TRAN_ALLOCED) {
1534 kmem_free(tran, sizeof (fc_fca_tran_t));
1535 progress &= ~QL_FCA_TRAN_ALLOCED;
1536 }
1537
1538 if (progress & QL_MINOR_NODE_CREATED) {
1539 ddi_remove_minor_node(dip, "devctl");
1540 progress &= ~QL_MINOR_NODE_CREATED;
1541 }
1542
1543 if (progress & QL_KSTAT_CREATED) {
1544 kstat_delete(ha->k_stats);
1545 progress &= ~QL_KSTAT_CREATED;
1546 }
1547
1548 if (progress & QL_N_PORT_INFO_CREATED) {
1549 kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1550 progress &= ~QL_N_PORT_INFO_CREATED;
1551 }
1552
1553 if (progress & QL_TASK_DAEMON_STARTED) {
1554 TASK_DAEMON_LOCK(ha);
1555
1556 ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1557
1558 cv_signal(&ha->cv_task_daemon);
1559
1560 /* Release task daemon lock. */
1561 TASK_DAEMON_UNLOCK(ha);
1562
1563 /* Wait for for task daemon to stop running. */
1564 while (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1565 ql_delay(ha, 10000);
1566 }
1567 progress &= ~QL_TASK_DAEMON_STARTED;
1568 }
1569
1570 if (progress & QL_DB_IOBASE_MAPPED) {
1571 ql_8021_clr_drv_active(ha);
1572 ddi_regs_map_free(&ha->db_dev_handle);
1573 progress &= ~QL_DB_IOBASE_MAPPED;
1574 }
1575 if (progress & QL_IOMAP_IOBASE_MAPPED) {
1576 ddi_regs_map_free(&ha->iomap_dev_handle);
1577 progress &= ~QL_IOMAP_IOBASE_MAPPED;
1578 }
1579
1580 if (progress & QL_CONFIG_SPACE_SETUP) {
1581 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1582 ddi_regs_map_free(&ha->sbus_config_handle);
1583 } else {
1584 pci_config_teardown(&ha->pci_handle);
1585 }
1586 progress &= ~QL_CONFIG_SPACE_SETUP;
1587 }
1588
1589 if (progress & QL_INTR_ADDED) {
1590 ql_disable_intr(ha);
1591 ql_release_intr(ha);
1592 progress &= ~QL_INTR_ADDED;
1593 }
1594
1595 if (progress & QL_MUTEX_CV_INITED) {
1596 ql_destroy_mutex(ha);
1597 progress &= ~QL_MUTEX_CV_INITED;
1598 }
1599
1600 if (progress & QL_HBA_BUFFER_SETUP) {
1601 ql_free_phys(ha, &ha->hba_buf);
1602 progress &= ~QL_HBA_BUFFER_SETUP;
1603 }
1604
1605 if (progress & QL_REGS_MAPPED) {
1606 ddi_regs_map_free(&ha->dev_handle);
1607 if (ha->sbus_fpga_iobase != NULL) {
1608 ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1609 }
1610 progress &= ~QL_REGS_MAPPED;
1611 }
1612
1613 if (progress & QL_SOFT_STATE_ALLOCED) {
1614
1615 ql_fcache_rel(ha->fcache);
1616
1617 kmem_free(ha->adapter_stats,
1618 sizeof (*ha->adapter_stats));
1619
1620 kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1621 QL_UB_LIMIT);
1622
1623 kmem_free(ha->outstanding_cmds,
1624 sizeof (*ha->outstanding_cmds) *
1625 MAX_OUTSTANDING_COMMANDS);
1626
1627 if (ha->devpath != NULL) {
1628 kmem_free(ha->devpath,
1629 strlen(ha->devpath) + 1);
1630 }
1631
1632 kmem_free(ha->dev, sizeof (*ha->dev) *
1633 DEVICE_HEAD_LIST_SIZE);
1634
1635 if (ha->xioctl != NULL) {
1636 ql_free_xioctl_resource(ha);
1637 }
1638
1639 if (ha->fw_module != NULL) {
1640 (void) ddi_modclose(ha->fw_module);
1641 }
1642 (void) ql_el_trace_desc_dtor(ha);
1643 (void) ql_nvram_cache_desc_dtor(ha);
1644
1645 ddi_soft_state_free(ql_state, instance);
1646 progress &= ~QL_SOFT_STATE_ALLOCED;
1647 }
1648
1649 ddi_prop_remove_all(dip);
1650 rval = DDI_FAILURE;
1651 break;
1652
1653 case DDI_RESUME:
1654 rval = DDI_FAILURE;
1655
1656 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1657 if (ha == NULL) {
1658 cmn_err(CE_WARN, "%s(%d): can't get soft state",
1659 QL_NAME, instance);
1660 break;
1661 }
1662
1663 ha->power_level = PM_LEVEL_D3;
1664 if (ha->pm_capable) {
1665 /*
1666 * Get ql_power to do power on initialization
1667 */
1668 if (pm_raise_power(dip, QL_POWER_COMPONENT,
1669 PM_LEVEL_D0) != DDI_SUCCESS) {
1670 cmn_err(CE_WARN, "%s(%d): can't raise adapter"
1671 " power", QL_NAME, instance);
1672 }
1673 }
1674
1675 /*
1676 * There is a bug in DR that prevents PM framework
1677 * from calling ql_power.
1678 */
1679 if (ha->power_level == PM_LEVEL_D3) {
1680 ha->power_level = PM_LEVEL_D0;
1681
1682 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1683 cmn_err(CE_WARN, "%s(%d): can't initialize the"
1684 " adapter", QL_NAME, instance);
1685 }
1686
1687 /* Wake up task_daemon. */
1688 ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1689 0);
1690 }
1691
1692 /* Acquire global state lock. */
1693 GLOBAL_STATE_LOCK();
1694
1695 /* Restart driver timer. */
1696 if (ql_timer_timeout_id == NULL) {
1697 ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1698 ql_timer_ticks);
1699 }
1700
1701 /* Release global state lock. */
1702 GLOBAL_STATE_UNLOCK();
1703
1704 /* Wake up command start routine. */
1705 ADAPTER_STATE_LOCK(ha);
1706 ha->flags &= ~ADAPTER_SUSPENDED;
1707 ADAPTER_STATE_UNLOCK(ha);
1708
1709 /*
1710 * Transport doesn't make FC discovery in polled
1711 * mode; So we need the daemon thread's services
1712 * right here.
1713 */
1714 (void) callb_generic_cpr(&ha->cprinfo, CB_CODE_CPR_RESUME);
1715
1716 rval = DDI_SUCCESS;
1717
1718 /* Restart IP if it was running. */
1719 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1720 (void) ql_initialize_ip(ha);
1721 ql_isp_rcvbuf(ha);
1722 }
1723 break;
1724
1725 default:
1726 cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1727 " %x", QL_NAME, ddi_get_instance(dip), cmd);
1728 rval = DDI_FAILURE;
1729 break;
1730 }
1731
1732 kmem_free(buf, MAXPATHLEN);
1733
1734 if (rval != DDI_SUCCESS) {
1735 /*EMPTY*/
1736 QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1737 ddi_get_instance(dip), rval);
1738 } else {
1739 /*EMPTY*/
1740 QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1741 }
1742
1743 return (rval);
1744 }
1745
1746 /*
1747 * ql_detach
1748 * Used to remove all the states associated with a given
1749 * instances of a device node prior to the removal of that
1750 * instance from the system.
1751 *
1752 * Input:
1753 * dip = pointer to device information structure.
1754 * cmd = type of detach.
1755 *
1756 * Returns:
1757 * DDI_SUCCESS or DDI_FAILURE.
1758 *
1759 * Context:
1760 * Kernel context.
1761 */
1762 static int
1763 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1764 {
1765 ql_adapter_state_t *ha, *vha;
1766 ql_tgt_t *tq;
1767 int delay_cnt;
1768 uint16_t index;
1769 ql_link_t *link;
1770 char *buf;
1771 timeout_id_t timer_id = NULL;
1772 int suspend, rval = DDI_SUCCESS;
1773
1774 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1775 if (ha == NULL) {
1776 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
1777 ddi_get_instance(dip));
1778 return (DDI_FAILURE);
1779 }
1780
1781 QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n", ha->instance, cmd);
1782
1783 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1784
1785 switch (cmd) {
1786 case DDI_DETACH:
1787 ADAPTER_STATE_LOCK(ha);
1788 ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO);
1789 ADAPTER_STATE_UNLOCK(ha);
1790
1791 TASK_DAEMON_LOCK(ha);
1792
1793 if (ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
1794 ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1795 cv_signal(&ha->cv_task_daemon);
1796
1797 TASK_DAEMON_UNLOCK(ha);
1798
1799 (void) ql_wait_for_td_stop(ha);
1800
1801 TASK_DAEMON_LOCK(ha);
1802 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1803 ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1804 EL(ha, "failed, could not stop task daemon\n");
1805 }
1806 }
1807 TASK_DAEMON_UNLOCK(ha);
1808
1809 GLOBAL_STATE_LOCK();
1810
1811 /* Disable driver timer if no adapters. */
1812 if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1813 ql_hba.last == &ha->hba) {
1814 timer_id = ql_timer_timeout_id;
1815 ql_timer_timeout_id = NULL;
1816 }
1817 ql_remove_link(&ql_hba, &ha->hba);
1818
1819 GLOBAL_STATE_UNLOCK();
1820
1821 if (timer_id) {
1822 (void) untimeout(timer_id);
1823 }
1824
1825 if (ha->pm_capable) {
1826 if (pm_lower_power(dip, QL_POWER_COMPONENT,
1827 PM_LEVEL_D3) != DDI_SUCCESS) {
1828 cmn_err(CE_WARN, "%s(%d): failed to lower the"
1829 " power", QL_NAME, ha->instance);
1830 }
1831 }
1832
1833 /*
1834 * If pm_lower_power shutdown the adapter, there
1835 * isn't much else to do
1836 */
1837 if (ha->power_level != PM_LEVEL_D3) {
1838 ql_halt(ha, PM_LEVEL_D3);
1839 }
1840
1841 /* Remove virtual ports. */
1842 while ((vha = ha->vp_next) != NULL) {
1843 ql_vport_destroy(vha);
1844 }
1845
1846 /* Free target queues. */
1847 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1848 link = ha->dev[index].first;
1849 while (link != NULL) {
1850 tq = link->base_address;
1851 link = link->next;
1852 ql_dev_free(ha, tq);
1853 }
1854 }
1855
1856 /*
1857 * Free unsolicited buffers.
1858 * If we are here then there are no ULPs still
1859 * alive that wish to talk to ql so free up
1860 * any SRB_IP_UB_UNUSED buffers that are
1861 * lingering around
1862 */
1863 QL_UB_LOCK(ha);
1864 for (index = 0; index < QL_UB_LIMIT; index++) {
1865 fc_unsol_buf_t *ubp = ha->ub_array[index];
1866
1867 if (ubp != NULL) {
1868 ql_srb_t *sp = ubp->ub_fca_private;
1869
1870 sp->flags |= SRB_UB_FREE_REQUESTED;
1871
1872 while (!(sp->flags & SRB_UB_IN_FCA) ||
1873 (sp->flags & (SRB_UB_CALLBACK |
1874 SRB_UB_ACQUIRED))) {
1875 QL_UB_UNLOCK(ha);
1876 delay(drv_usectohz(100000));
1877 QL_UB_LOCK(ha);
1878 }
1879 ha->ub_array[index] = NULL;
1880
1881 QL_UB_UNLOCK(ha);
1882 ql_free_unsolicited_buffer(ha, ubp);
1883 QL_UB_LOCK(ha);
1884 }
1885 }
1886 QL_UB_UNLOCK(ha);
1887
1888 /* Free any saved RISC code. */
1889 if (ha->risc_code != NULL) {
1890 kmem_free(ha->risc_code, ha->risc_code_size);
1891 ha->risc_code = NULL;
1892 ha->risc_code_size = 0;
1893 }
1894
1895 if (ha->fw_module != NULL) {
1896 (void) ddi_modclose(ha->fw_module);
1897 ha->fw_module = NULL;
1898 }
1899
1900 /* Free resources. */
1901 ddi_prop_remove_all(dip);
1902 (void) fc_fca_detach(dip);
1903 kmem_free(ha->tran, sizeof (fc_fca_tran_t));
1904 ddi_remove_minor_node(dip, "devctl");
1905 if (ha->k_stats != NULL) {
1906 kstat_delete(ha->k_stats);
1907 }
1908
1909 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1910 ddi_regs_map_free(&ha->sbus_config_handle);
1911 } else {
1912 if (CFG_IST(ha, CFG_CTRL_8021)) {
1913 ql_8021_clr_drv_active(ha);
1914 ddi_regs_map_free(&ha->db_dev_handle);
1915 }
1916 if (ha->iomap_dev_handle != ha->dev_handle) {
1917 ddi_regs_map_free(&ha->iomap_dev_handle);
1918 }
1919 pci_config_teardown(&ha->pci_handle);
1920 }
1921
1922 ql_disable_intr(ha);
1923 ql_release_intr(ha);
1924
1925 ql_free_xioctl_resource(ha);
1926
1927 ql_destroy_mutex(ha);
1928
1929 ql_free_phys(ha, &ha->hba_buf);
1930 ql_free_phys(ha, &ha->fwexttracebuf);
1931 ql_free_phys(ha, &ha->fwfcetracebuf);
1932
1933 ddi_regs_map_free(&ha->dev_handle);
1934 if (ha->sbus_fpga_iobase != NULL) {
1935 ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1936 }
1937
1938 ql_fcache_rel(ha->fcache);
1939 if (ha->vcache != NULL) {
1940 kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
1941 }
1942
1943 if (ha->pi_attrs != NULL) {
1944 kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
1945 }
1946
1947 kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
1948
1949 kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
1950
1951 kmem_free(ha->outstanding_cmds,
1952 sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS);
1953
1954 if (ha->n_port != NULL) {
1955 kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1956 }
1957
1958 if (ha->devpath != NULL) {
1959 kmem_free(ha->devpath, strlen(ha->devpath) + 1);
1960 }
1961
1962 kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
1963
1964 EL(ha, "detached\n");
1965
1966 ddi_soft_state_free(ql_state, (int)ha->instance);
1967
1968 break;
1969
1970 case DDI_SUSPEND:
1971 ADAPTER_STATE_LOCK(ha);
1972
1973 delay_cnt = 0;
1974 ha->flags |= ADAPTER_SUSPENDED;
1975 while (ha->flags & ADAPTER_TIMER_BUSY && delay_cnt++ < 10) {
1976 ADAPTER_STATE_UNLOCK(ha);
1977 delay(drv_usectohz(1000000));
1978 ADAPTER_STATE_LOCK(ha);
1979 }
1980 if (ha->busy || ha->flags & ADAPTER_TIMER_BUSY) {
1981 ha->flags &= ~ADAPTER_SUSPENDED;
1982 ADAPTER_STATE_UNLOCK(ha);
1983 rval = DDI_FAILURE;
1984 cmn_err(CE_WARN, "!%s(%d): Fail suspend"
1985 " busy %xh flags %xh", QL_NAME, ha->instance,
1986 ha->busy, ha->flags);
1987 break;
1988 }
1989
1990 ADAPTER_STATE_UNLOCK(ha);
1991
1992 if (ha->flags & IP_INITIALIZED) {
1993 (void) ql_shutdown_ip(ha);
1994 }
1995
1996 if ((suspend = ql_suspend_adapter(ha)) != QL_SUCCESS) {
1997 ADAPTER_STATE_LOCK(ha);
1998 ha->flags &= ~ADAPTER_SUSPENDED;
1999 ADAPTER_STATE_UNLOCK(ha);
2000 cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
2001 QL_NAME, ha->instance, suspend);
2002
2003 /* Restart IP if it was running. */
2004 if (ha->flags & IP_ENABLED &&
2005 !(ha->flags & IP_INITIALIZED)) {
2006 (void) ql_initialize_ip(ha);
2007 ql_isp_rcvbuf(ha);
2008 }
2009 rval = DDI_FAILURE;
2010 break;
2011 }
2012
2013 /* Acquire global state lock. */
2014 GLOBAL_STATE_LOCK();
2015
2016 /* Disable driver timer if last adapter. */
2017 if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
2018 ql_hba.last == &ha->hba) {
2019 timer_id = ql_timer_timeout_id;
2020 ql_timer_timeout_id = NULL;
2021 }
2022 GLOBAL_STATE_UNLOCK();
2023
2024 if (timer_id) {
2025 (void) untimeout(timer_id);
2026 }
2027
2028 EL(ha, "suspended\n");
2029
2030 break;
2031
2032 default:
2033 rval = DDI_FAILURE;
2034 break;
2035 }
2036
2037 kmem_free(buf, MAXPATHLEN);
2038
2039 if (rval != DDI_SUCCESS) {
2040 if (ha != NULL) {
2041 EL(ha, "failed, rval = %xh\n", rval);
2042 } else {
2043 /*EMPTY*/
2044 QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
2045 ddi_get_instance(dip), rval);
2046 }
2047 } else {
2048 /*EMPTY*/
2049 QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
2050 }
2051
2052 return (rval);
2053 }
2054
2055
2056 /*
2057 * ql_power
2058 * Power a device attached to the system.
2059 *
2060 * Input:
2061 * dip = pointer to device information structure.
2062 * component = device.
2063 * level = power level.
2064 *
2065 * Returns:
2066 * DDI_SUCCESS or DDI_FAILURE.
2067 *
2068 * Context:
2069 * Kernel context.
2070 */
2071 /* ARGSUSED */
2072 static int
2073 ql_power(dev_info_t *dip, int component, int level)
2074 {
2075 int rval = DDI_FAILURE;
2076 off_t csr;
2077 uint8_t saved_pm_val;
2078 ql_adapter_state_t *ha;
2079 char *buf;
2080 char *path;
2081
2082 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2083 if (ha == NULL || ha->pm_capable == 0) {
2084 QL_PRINT_2(CE_CONT, "(%d): no hba or PM not supported\n",
2085 ddi_get_instance(dip));
2086 return (rval);
2087 }
2088
2089 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2090
2091 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2092 path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2093
2094 if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
2095 level != PM_LEVEL_D3)) {
2096 EL(ha, "invalid, component=%xh or level=%xh\n",
2097 component, level);
2098 return (rval);
2099 }
2100
2101 GLOBAL_HW_LOCK();
2102 csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
2103 GLOBAL_HW_UNLOCK();
2104
2105 (void) snprintf(buf, sizeof (buf),
2106 "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
2107 ddi_pathname(dip, path));
2108
2109 switch (level) {
2110 case PM_LEVEL_D0: /* power up to D0 state - fully on */
2111
2112 QL_PM_LOCK(ha);
2113 if (ha->power_level == PM_LEVEL_D0) {
2114 QL_PM_UNLOCK(ha);
2115 rval = DDI_SUCCESS;
2116 break;
2117 }
2118
2119 /*
2120 * Enable interrupts now
2121 */
2122 saved_pm_val = ha->power_level;
2123 ha->power_level = PM_LEVEL_D0;
2124 QL_PM_UNLOCK(ha);
2125
2126 GLOBAL_HW_LOCK();
2127
2128 ql_pci_config_put16(ha, csr, PCI_PMCSR_D0);
2129
2130 /*
2131 * Delay after reset, for chip to recover.
2132 * Otherwise causes system PANIC
2133 */
2134 drv_usecwait(200000);
2135
2136 GLOBAL_HW_UNLOCK();
2137
2138 if (ha->config_saved) {
2139 ha->config_saved = 0;
2140 if (QL_RESTORE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2141 QL_PM_LOCK(ha);
2142 ha->power_level = saved_pm_val;
2143 QL_PM_UNLOCK(ha);
2144 cmn_err(CE_WARN, "%s failed to restore "
2145 "config regs", buf);
2146 break;
2147 }
2148 }
2149
2150 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
2151 cmn_err(CE_WARN, "%s adapter initialization failed",
2152 buf);
2153 }
2154
2155 /* Wake up task_daemon. */
2156 ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
2157 TASK_DAEMON_SLEEPING_FLG, 0);
2158
2159 /* Restart IP if it was running. */
2160 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
2161 (void) ql_initialize_ip(ha);
2162 ql_isp_rcvbuf(ha);
2163 }
2164
2165 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
2166 ha->instance, QL_NAME);
2167
2168 rval = DDI_SUCCESS;
2169 break;
2170
2171 case PM_LEVEL_D3: /* power down to D3 state - off */
2172
2173 QL_PM_LOCK(ha);
2174
2175 if (ha->busy || ((ha->task_daemon_flags &
2176 TASK_DAEMON_SLEEPING_FLG) == 0)) {
2177 QL_PM_UNLOCK(ha);
2178 break;
2179 }
2180
2181 if (ha->power_level == PM_LEVEL_D3) {
2182 rval = DDI_SUCCESS;
2183 QL_PM_UNLOCK(ha);
2184 break;
2185 }
2186 QL_PM_UNLOCK(ha);
2187
2188 if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2189 cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
2190 " config regs", QL_NAME, ha->instance, buf);
2191 break;
2192 }
2193 ha->config_saved = 1;
2194
2195 /*
2196 * Don't enable interrupts. Running mailbox commands with
2197 * interrupts enabled could cause hangs since pm_run_scan()
2198 * runs out of a callout thread and on single cpu systems
2199 * cv_reltimedwait_sig(), called from ql_mailbox_command(),
2200 * would not get to run.
2201 */
2202 TASK_DAEMON_LOCK(ha);
2203 ha->task_daemon_flags |= TASK_DAEMON_POWERING_DOWN;
2204 TASK_DAEMON_UNLOCK(ha);
2205
2206 ql_halt(ha, PM_LEVEL_D3);
2207
2208 /*
2209 * Setup ql_intr to ignore interrupts from here on.
2210 */
2211 QL_PM_LOCK(ha);
2212 ha->power_level = PM_LEVEL_D3;
2213 QL_PM_UNLOCK(ha);
2214
2215 /*
2216 * Wait for ISR to complete.
2217 */
2218 INTR_LOCK(ha);
2219 ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
2220 INTR_UNLOCK(ha);
2221
2222 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
2223 ha->instance, QL_NAME);
2224
2225 rval = DDI_SUCCESS;
2226 break;
2227 }
2228
2229 kmem_free(buf, MAXPATHLEN);
2230 kmem_free(path, MAXPATHLEN);
2231
2232 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
2233
2234 return (rval);
2235 }
2236
2237 /*
2238 * ql_quiesce
2239 * quiesce a device attached to the system.
2240 *
2241 * Input:
2242 * dip = pointer to device information structure.
2243 *
2244 * Returns:
2245 * DDI_SUCCESS
2246 *
2247 * Context:
2248 * Kernel context.
2249 */
2250 static int
2251 ql_quiesce(dev_info_t *dip)
2252 {
2253 ql_adapter_state_t *ha;
2254 uint32_t timer;
2255 uint32_t stat;
2256
2257 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2258 if (ha == NULL) {
2259 /* Oh well.... */
2260 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2261 ddi_get_instance(dip));
2262 return (DDI_SUCCESS);
2263 }
2264
2265 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2266
2267 if (CFG_IST(ha, CFG_CTRL_8021)) {
2268 (void) ql_stop_firmware(ha);
2269 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
2270 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2271 WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE);
2272 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
2273 for (timer = 0; timer < 30000; timer++) {
2274 stat = RD32_IO_REG(ha, risc2host);
2275 if (stat & BIT_15) {
2276 if ((stat & 0xff) < 0x12) {
2277 WRT32_IO_REG(ha, hccr,
2278 HC24_CLR_RISC_INT);
2279 break;
2280 }
2281 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2282 }
2283 drv_usecwait(100);
2284 }
2285 /* Reset the chip. */
2286 WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2287 MWB_4096_BYTES);
2288 drv_usecwait(100);
2289
2290 } else {
2291 /* Disable ISP interrupts. */
2292 WRT16_IO_REG(ha, ictrl, 0);
2293 /* Select RISC module registers. */
2294 WRT16_IO_REG(ha, ctrl_status, 0);
2295 /* Reset ISP semaphore. */
2296 WRT16_IO_REG(ha, semaphore, 0);
2297 /* Reset RISC module. */
2298 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
2299 /* Release RISC module. */
2300 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
2301 }
2302
2303 ql_disable_intr(ha);
2304
2305 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2306
2307 return (DDI_SUCCESS);
2308 }
2309
2310 /* ************************************************************************ */
2311 /* Fibre Channel Adapter (FCA) Transport Functions. */
2312 /* ************************************************************************ */
2313
2314 /*
2315 * ql_bind_port
2316 * Handling port binding. The FC Transport attempts to bind an FCA port
2317 * when it is ready to start transactions on the port. The FC Transport
2318 * will call the fca_bind_port() function specified in the fca_transport
2319 * structure it receives. The FCA must fill in the port_info structure
2320 * passed in the call and also stash the information for future calls.
2321 *
2322 * Input:
2323 * dip = pointer to FCA information structure.
2324 * port_info = pointer to port information structure.
2325 * bind_info = pointer to bind information structure.
2326 *
2327 * Returns:
2328 * NULL = failure
2329 *
2330 * Context:
2331 * Kernel context.
2332 */
2333 static opaque_t
2334 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2335 fc_fca_bind_info_t *bind_info)
2336 {
2337 ql_adapter_state_t *ha, *vha;
2338 opaque_t fca_handle = NULL;
2339 port_id_t d_id;
2340 int port_npiv = bind_info->port_npiv;
2341 uchar_t *port_nwwn = bind_info->port_nwwn.raw_wwn;
2342 uchar_t *port_pwwn = bind_info->port_pwwn.raw_wwn;
2343
2344 /* get state info based on the dip */
2345 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2346 if (ha == NULL) {
2347 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2348 ddi_get_instance(dip));
2349 return (NULL);
2350 }
2351 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2352
2353 /* Verify port number is supported. */
2354 if (port_npiv != 0) {
2355 if (!(ha->flags & VP_ENABLED)) {
2356 QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_NOT_SUPPORTED\n",
2357 ha->instance);
2358 port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2359 return (NULL);
2360 }
2361 if (!(ha->flags & POINT_TO_POINT)) {
2362 QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_WRONG_TOPOLOGY\n",
2363 ha->instance);
2364 port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2365 return (NULL);
2366 }
2367 if (!(ha->flags & FDISC_ENABLED)) {
2368 QL_PRINT_2(CE_CONT, "(%d): switch does not support "
2369 "FDISC\n", ha->instance);
2370 port_info->pi_error = FC_NPIV_FDISC_FAILED;
2371 return (NULL);
2372 }
2373 if (bind_info->port_num > (CFG_IST(ha, CFG_CTRL_2422) ?
2374 MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
2375 QL_PRINT_2(CE_CONT, "(%d): port number=%d "
2376 "FC_OUTOFBOUNDS\n", ha->instance);
2377 port_info->pi_error = FC_OUTOFBOUNDS;
2378 return (NULL);
2379 }
2380 } else if (bind_info->port_num != 0) {
2381 QL_PRINT_2(CE_CONT, "(%d): failed, port number=%d is not "
2382 "supported\n", ha->instance, bind_info->port_num);
2383 port_info->pi_error = FC_OUTOFBOUNDS;
2384 return (NULL);
2385 }
2386
2387 /* Locate port context. */
2388 for (vha = ha; vha != NULL; vha = vha->vp_next) {
2389 if (vha->vp_index == bind_info->port_num) {
2390 break;
2391 }
2392 }
2393
2394 /* If virtual port does not exist. */
2395 if (vha == NULL) {
2396 vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2397 }
2398
2399 /* make sure this port isn't already bound */
2400 if (vha->flags & FCA_BOUND) {
2401 port_info->pi_error = FC_ALREADY;
2402 } else {
2403 if (vha->vp_index != 0) {
2404 bcopy(port_nwwn,
2405 vha->loginparams.node_ww_name.raw_wwn, 8);
2406 bcopy(port_pwwn,
2407 vha->loginparams.nport_ww_name.raw_wwn, 8);
2408 }
2409 if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2410 if (ql_vport_enable(vha) != QL_SUCCESS) {
2411 QL_PRINT_2(CE_CONT, "(%d): failed to enable "
2412 "virtual port=%d\n", ha->instance,
2413 vha->vp_index);
2414 port_info->pi_error = FC_NPIV_FDISC_FAILED;
2415 return (NULL);
2416 }
2417 cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2418 "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2419 "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2420 QL_NAME, ha->instance, vha->vp_index,
2421 port_pwwn[0], port_pwwn[1], port_pwwn[2],
2422 port_pwwn[3], port_pwwn[4], port_pwwn[5],
2423 port_pwwn[6], port_pwwn[7],
2424 port_nwwn[0], port_nwwn[1], port_nwwn[2],
2425 port_nwwn[3], port_nwwn[4], port_nwwn[5],
2426 port_nwwn[6], port_nwwn[7]);
2427 }
2428
2429 /* stash the bind_info supplied by the FC Transport */
2430 vha->bind_info.port_handle = bind_info->port_handle;
2431 vha->bind_info.port_statec_cb =
2432 bind_info->port_statec_cb;
2433 vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2434
2435 /* Set port's source ID. */
2436 port_info->pi_s_id.port_id = vha->d_id.b24;
2437
2438 /* copy out the default login parameters */
2439 bcopy((void *)&vha->loginparams,
2440 (void *)&port_info->pi_login_params,
2441 sizeof (la_els_logi_t));
2442
2443 /* Set port's hard address if enabled. */
2444 port_info->pi_hard_addr.hard_addr = 0;
2445 if (bind_info->port_num == 0) {
2446 d_id.b24 = ha->d_id.b24;
2447 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2448 if (ha->init_ctrl_blk.cb24.
2449 firmware_options_1[0] & BIT_0) {
2450 d_id.b.al_pa = ql_index_to_alpa[ha->
2451 init_ctrl_blk.cb24.
2452 hard_address[0]];
2453 port_info->pi_hard_addr.hard_addr =
2454 d_id.b24;
2455 }
2456 } else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2457 BIT_0) {
2458 d_id.b.al_pa = ql_index_to_alpa[ha->
2459 init_ctrl_blk.cb.hard_address[0]];
2460 port_info->pi_hard_addr.hard_addr = d_id.b24;
2461 }
2462
2463 /* Set the node id data */
2464 if (ql_get_rnid_params(ha,
2465 sizeof (port_info->pi_rnid_params.params),
2466 (caddr_t)&port_info->pi_rnid_params.params) ==
2467 QL_SUCCESS) {
2468 port_info->pi_rnid_params.status = FC_SUCCESS;
2469 } else {
2470 port_info->pi_rnid_params.status = FC_FAILURE;
2471 }
2472
2473 /* Populate T11 FC-HBA details */
2474 ql_populate_hba_fru_details(ha, port_info);
2475 ha->pi_attrs = kmem_zalloc(sizeof (fca_port_attrs_t),
2476 KM_SLEEP);
2477 if (ha->pi_attrs != NULL) {
2478 bcopy(&port_info->pi_attrs, ha->pi_attrs,
2479 sizeof (fca_port_attrs_t));
2480 }
2481 } else {
2482 port_info->pi_rnid_params.status = FC_FAILURE;
2483 if (ha->pi_attrs != NULL) {
2484 bcopy(ha->pi_attrs, &port_info->pi_attrs,
2485 sizeof (fca_port_attrs_t));
2486 }
2487 }
2488
2489 /* Generate handle for this FCA. */
2490 fca_handle = (opaque_t)vha;
2491
2492 ADAPTER_STATE_LOCK(ha);
2493 vha->flags |= FCA_BOUND;
2494 ADAPTER_STATE_UNLOCK(ha);
2495 /* Set port's current state. */
2496 port_info->pi_port_state = vha->state;
2497 }
2498
2499 QL_PRINT_10(CE_CONT, "(%d,%d): done, pi_port_state=%xh, "
2500 "pi_s_id.port_id=%xh\n", ha->instance, ha->vp_index,
2501 port_info->pi_port_state, port_info->pi_s_id.port_id);
2502
2503 return (fca_handle);
2504 }
2505
2506 /*
2507 * ql_unbind_port
2508 * To unbind a Fibre Channel Adapter from an FC Port driver.
2509 *
2510 * Input:
2511 * fca_handle = handle setup by ql_bind_port().
2512 *
2513 * Context:
2514 * Kernel context.
2515 */
2516 static void
2517 ql_unbind_port(opaque_t fca_handle)
2518 {
2519 ql_adapter_state_t *ha;
2520 ql_tgt_t *tq;
2521 uint32_t flgs;
2522
2523 ha = ql_fca_handle_to_state(fca_handle);
2524 if (ha == NULL) {
2525 /*EMPTY*/
2526 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2527 (void *)fca_handle);
2528 } else {
2529 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance,
2530 ha->vp_index);
2531
2532 if (!(ha->flags & FCA_BOUND)) {
2533 /*EMPTY*/
2534 QL_PRINT_2(CE_CONT, "(%d): port=%d already unbound\n",
2535 ha->instance, ha->vp_index);
2536 } else {
2537 if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2538 if ((tq = ql_loop_id_to_queue(ha,
2539 FL_PORT_24XX_HDL)) != NULL) {
2540 (void) ql_logout_fabric_port(ha, tq);
2541 }
2542 (void) ql_vport_control(ha, (uint8_t)
2543 (CFG_IST(ha, CFG_CTRL_2425) ?
2544 VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT));
2545 flgs = FCA_BOUND | VP_ENABLED;
2546 } else {
2547 flgs = FCA_BOUND;
2548 }
2549 ADAPTER_STATE_LOCK(ha);
2550 ha->flags &= ~flgs;
2551 ADAPTER_STATE_UNLOCK(ha);
2552 }
2553
2554 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
2555 ha->vp_index);
2556 }
2557 }
2558
2559 /*
2560 * ql_init_pkt
2561 * Initialize FCA portion of packet.
2562 *
2563 * Input:
2564 * fca_handle = handle setup by ql_bind_port().
2565 * pkt = pointer to fc_packet.
2566 *
2567 * Returns:
2568 * FC_SUCCESS - the packet has successfully been initialized.
2569 * FC_UNBOUND - the fca_handle specified is not bound.
2570 * FC_NOMEM - the FCA failed initialization due to an allocation error.
2571 * FC_FAILURE - the FCA failed initialization for undisclosed reasons
2572 *
2573 * Context:
2574 * Kernel context.
2575 */
2576 /* ARGSUSED */
2577 static int
2578 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2579 {
2580 ql_adapter_state_t *ha;
2581 ql_srb_t *sp;
2582 int rval = FC_SUCCESS;
2583
2584 ha = ql_fca_handle_to_state(fca_handle);
2585 if (ha == NULL) {
2586 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2587 (void *)fca_handle);
2588 return (FC_UNBOUND);
2589 }
2590 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2591
2592 sp = (ql_srb_t *)pkt->pkt_fca_private;
2593 sp->flags = 0;
2594
2595 /* init cmd links */
2596 sp->cmd.base_address = sp;
2597 sp->cmd.prev = NULL;
2598 sp->cmd.next = NULL;
2599 sp->cmd.head = NULL;
2600
2601 /* init watchdog links */
2602 sp->wdg.base_address = sp;
2603 sp->wdg.prev = NULL;
2604 sp->wdg.next = NULL;
2605 sp->wdg.head = NULL;
2606 sp->pkt = pkt;
2607 sp->ha = ha;
2608 sp->magic_number = QL_FCA_BRAND;
2609 sp->sg_dma.dma_handle = NULL;
2610 #ifndef __sparc
2611 if (CFG_IST(ha, CFG_CTRL_8021)) {
2612 /* Setup DMA for scatter gather list. */
2613 sp->sg_dma.size = sizeof (cmd6_2400_dma_t);
2614 sp->sg_dma.type = LITTLE_ENDIAN_DMA;
2615 sp->sg_dma.cookie_count = 1;
2616 sp->sg_dma.alignment = 64;
2617 if (ql_alloc_phys(ha, &sp->sg_dma, KM_SLEEP) != QL_SUCCESS) {
2618 rval = FC_NOMEM;
2619 }
2620 }
2621 #endif /* __sparc */
2622
2623 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2624
2625 return (rval);
2626 }
2627
2628 /*
2629 * ql_un_init_pkt
2630 * Release all local resources bound to packet.
2631 *
2632 * Input:
2633 * fca_handle = handle setup by ql_bind_port().
2634 * pkt = pointer to fc_packet.
2635 *
2636 * Returns:
2637 * FC_SUCCESS - the packet has successfully been invalidated.
2638 * FC_UNBOUND - the fca_handle specified is not bound.
2639 * FC_BADPACKET - the packet has not been initialized or has
2640 * already been freed by this FCA.
2641 *
2642 * Context:
2643 * Kernel context.
2644 */
2645 static int
2646 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2647 {
2648 ql_adapter_state_t *ha;
2649 int rval;
2650 ql_srb_t *sp;
2651
2652 ha = ql_fca_handle_to_state(fca_handle);
2653 if (ha == NULL) {
2654 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2655 (void *)fca_handle);
2656 return (FC_UNBOUND);
2657 }
2658 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2659
2660 sp = (ql_srb_t *)pkt->pkt_fca_private;
2661
2662 if (sp->magic_number != QL_FCA_BRAND) {
2663 EL(ha, "failed, FC_BADPACKET\n");
2664 rval = FC_BADPACKET;
2665 } else {
2666 sp->magic_number = NULL;
2667 ql_free_phys(ha, &sp->sg_dma);
2668 rval = FC_SUCCESS;
2669 }
2670
2671 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2672
2673 return (rval);
2674 }
2675
2676 /*
2677 * ql_els_send
2678 * Issue a extended link service request.
2679 *
2680 * Input:
2681 * fca_handle = handle setup by ql_bind_port().
2682 * pkt = pointer to fc_packet.
2683 *
2684 * Returns:
2685 * FC_SUCCESS - the command was successful.
2686 * FC_ELS_FREJECT - the command was rejected by a Fabric.
2687 * FC_ELS_PREJECT - the command was rejected by an N-port.
2688 * FC_TRANSPORT_ERROR - a transport error occurred.
2689 * FC_UNBOUND - the fca_handle specified is not bound.
2690 * FC_ELS_BAD - the FCA can not issue the requested ELS.
2691 *
2692 * Context:
2693 * Kernel context.
2694 */
2695 static int
2696 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2697 {
2698 ql_adapter_state_t *ha;
2699 int rval;
2700 clock_t timer = drv_usectohz(30000000);
2701 ls_code_t els;
2702 la_els_rjt_t rjt;
2703 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
2704
2705 /* Verify proper command. */
2706 ha = ql_cmd_setup(fca_handle, pkt, &rval);
2707 if (ha == NULL) {
2708 QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2709 rval, fca_handle);
2710 return (FC_INVALID_REQUEST);
2711 }
2712 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2713
2714 /* Wait for suspension to end. */
2715 TASK_DAEMON_LOCK(ha);
2716 while (ha->task_daemon_flags & QL_SUSPENDED) {
2717 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2718
2719 /* 30 seconds from now */
2720 if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
2721 &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
2722 /*
2723 * The timeout time 'timer' was
2724 * reached without the condition
2725 * being signaled.
2726 */
2727 pkt->pkt_state = FC_PKT_TRAN_BSY;
2728 pkt->pkt_reason = FC_REASON_XCHG_BSY;
2729
2730 /* Release task daemon lock. */
2731 TASK_DAEMON_UNLOCK(ha);
2732
2733 EL(ha, "QL_SUSPENDED failed=%xh\n",
2734 QL_FUNCTION_TIMEOUT);
2735 return (FC_TRAN_BUSY);
2736 }
2737 }
2738 /* Release task daemon lock. */
2739 TASK_DAEMON_UNLOCK(ha);
2740
2741 /* Setup response header. */
2742 bcopy((void *)&pkt->pkt_cmd_fhdr, (void *)&pkt->pkt_resp_fhdr,
2743 sizeof (fc_frame_hdr_t));
2744
2745 if (pkt->pkt_rsplen) {
2746 bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2747 }
2748
2749 pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2750 pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2751 pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
2752 R_CTL_SOLICITED_CONTROL;
2753 pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
2754 F_CTL_END_SEQ;
2755
2756 sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
2757 SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
2758 SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
2759
2760 sp->flags |= SRB_ELS_PKT;
2761
2762 /* map the type of ELS to a function */
2763 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
2764 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
2765
2766 #if 0
2767 QL_PRINT_3(CE_CONT, "(%d): command fhdr:\n", ha->instance);
2768 QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2769 sizeof (fc_frame_hdr_t) / 4);
2770 QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2771 QL_DUMP_3((uint8_t *)&els, 32, sizeof (els) / 4);
2772 #endif
2773
2774 sp->iocb = ha->els_cmd;
2775 sp->req_cnt = 1;
2776
2777 switch (els.ls_code) {
2778 case LA_ELS_RJT:
2779 case LA_ELS_ACC:
2780 EL(ha, "LA_ELS_RJT\n");
2781 pkt->pkt_state = FC_PKT_SUCCESS;
2782 rval = FC_SUCCESS;
2783 break;
2784 case LA_ELS_PLOGI:
2785 case LA_ELS_PDISC:
2786 rval = ql_els_plogi(ha, pkt);
2787 break;
2788 case LA_ELS_FLOGI:
2789 case LA_ELS_FDISC:
2790 rval = ql_els_flogi(ha, pkt);
2791 break;
2792 case LA_ELS_LOGO:
2793 rval = ql_els_logo(ha, pkt);
2794 break;
2795 case LA_ELS_PRLI:
2796 rval = ql_els_prli(ha, pkt);
2797 break;
2798 case LA_ELS_PRLO:
2799 rval = ql_els_prlo(ha, pkt);
2800 break;
2801 case LA_ELS_ADISC:
2802 rval = ql_els_adisc(ha, pkt);
2803 break;
2804 case LA_ELS_LINIT:
2805 rval = ql_els_linit(ha, pkt);
2806 break;
2807 case LA_ELS_LPC:
2808 rval = ql_els_lpc(ha, pkt);
2809 break;
2810 case LA_ELS_LSTS:
2811 rval = ql_els_lsts(ha, pkt);
2812 break;
2813 case LA_ELS_SCR:
2814 rval = ql_els_scr(ha, pkt);
2815 break;
2816 case LA_ELS_RSCN:
2817 rval = ql_els_rscn(ha, pkt);
2818 break;
2819 case LA_ELS_FARP_REQ:
2820 rval = ql_els_farp_req(ha, pkt);
2821 break;
2822 case LA_ELS_FARP_REPLY:
2823 rval = ql_els_farp_reply(ha, pkt);
2824 break;
2825 case LA_ELS_RLS:
2826 rval = ql_els_rls(ha, pkt);
2827 break;
2828 case LA_ELS_RNID:
2829 rval = ql_els_rnid(ha, pkt);
2830 break;
2831 default:
2832 EL(ha, "LA_ELS_RJT, FC_REASON_CMD_UNSUPPORTED=%xh\n",
2833 els.ls_code);
2834 /* Build RJT. */
2835 bzero(&rjt, sizeof (rjt));
2836 rjt.ls_code.ls_code = LA_ELS_RJT;
2837 rjt.reason = FC_REASON_CMD_UNSUPPORTED;
2838
2839 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
2840 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
2841
2842 pkt->pkt_state = FC_PKT_LOCAL_RJT;
2843 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2844 rval = FC_SUCCESS;
2845 break;
2846 }
2847
2848 #if 0
2849 QL_PRINT_3(CE_CONT, "(%d): response fhdr:\n", ha->instance);
2850 QL_DUMP_3((uint8_t *)&pkt->pkt_resp_fhdr, 32,
2851 sizeof (fc_frame_hdr_t) / 4);
2852 #endif
2853 /*
2854 * Return success if the srb was consumed by an iocb. The packet
2855 * completion callback will be invoked by the response handler.
2856 */
2857 if (rval == QL_CONSUMED) {
2858 rval = FC_SUCCESS;
2859 } else if (rval == FC_SUCCESS &&
2860 !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
2861 /* Do command callback only if no error */
2862 ql_awaken_task_daemon(ha, sp, 0, 0);
2863 }
2864
2865 if (rval != FC_SUCCESS) {
2866 EL(ha, "failed, rval = %xh\n", rval);
2867 } else {
2868 /*EMPTY*/
2869 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2870 }
2871 return (rval);
2872 }
2873
2874 /*
2875 * ql_get_cap
2876 * Export FCA hardware and software capabilities.
2877 *
2878 * Input:
2879 * fca_handle = handle setup by ql_bind_port().
2880 * cap = pointer to the capabilities string.
2881 * ptr = buffer pointer for return capability.
2882 *
2883 * Returns:
2884 * FC_CAP_ERROR - no such capability
2885 * FC_CAP_FOUND - the capability was returned and cannot be set
2886 * FC_CAP_SETTABLE - the capability was returned and can be set
2887 * FC_UNBOUND - the fca_handle specified is not bound.
2888 *
2889 * Context:
2890 * Kernel context.
2891 */
2892 static int
2893 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
2894 {
2895 ql_adapter_state_t *ha;
2896 int rval;
2897 uint32_t *rptr = (uint32_t *)ptr;
2898
2899 ha = ql_fca_handle_to_state(fca_handle);
2900 if (ha == NULL) {
2901 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2902 (void *)fca_handle);
2903 return (FC_UNBOUND);
2904 }
2905 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2906
2907 if (strcmp(cap, FC_NODE_WWN) == 0) {
2908 bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
2909 ptr, 8);
2910 rval = FC_CAP_FOUND;
2911 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2912 bcopy((void *)&ha->loginparams, ptr,
2913 sizeof (la_els_logi_t));
2914 rval = FC_CAP_FOUND;
2915 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2916 *rptr = (uint32_t)QL_UB_LIMIT;
2917 rval = FC_CAP_FOUND;
2918 } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2919
2920 dev_info_t *psydip = NULL;
2921 #ifdef __sparc
2922 /*
2923 * Disable streaming for certain 2 chip adapters
2924 * below Psycho to handle Psycho byte hole issue.
2925 */
2926 if ((CFG_IST(ha, CFG_MULTI_CHIP_ADAPTER)) &&
2927 (!CFG_IST(ha, CFG_SBUS_CARD))) {
2928 for (psydip = ddi_get_parent(ha->dip); psydip;
2929 psydip = ddi_get_parent(psydip)) {
2930 if (strcmp(ddi_driver_name(psydip),
2931 "pcipsy") == 0) {
2932 break;
2933 }
2934 }
2935 }
2936 #endif /* __sparc */
2937
2938 if (psydip) {
2939 *rptr = (uint32_t)FC_NO_STREAMING;
2940 EL(ha, "No Streaming\n");
2941 } else {
2942 *rptr = (uint32_t)FC_ALLOW_STREAMING;
2943 EL(ha, "Allow Streaming\n");
2944 }
2945 rval = FC_CAP_FOUND;
2946 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2947 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2948 *rptr = (uint32_t)CHAR_TO_SHORT(
2949 ha->init_ctrl_blk.cb24.max_frame_length[0],
2950 ha->init_ctrl_blk.cb24.max_frame_length[1]);
2951 } else {
2952 *rptr = (uint32_t)CHAR_TO_SHORT(
2953 ha->init_ctrl_blk.cb.max_frame_length[0],
2954 ha->init_ctrl_blk.cb.max_frame_length[1]);
2955 }
2956 rval = FC_CAP_FOUND;
2957 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2958 *rptr = FC_RESET_RETURN_ALL;
2959 rval = FC_CAP_FOUND;
2960 } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2961 *rptr = FC_NO_DVMA_SPACE;
2962 rval = FC_CAP_FOUND;
2963 } else {
2964 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2965 rval = FC_CAP_ERROR;
2966 }
2967
2968 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2969
2970 return (rval);
2971 }
2972
2973 /*
2974 * ql_set_cap
2975 * Allow the FC Transport to set FCA capabilities if possible.
2976 *
2977 * Input:
2978 * fca_handle = handle setup by ql_bind_port().
2979 * cap = pointer to the capabilities string.
2980 * ptr = buffer pointer for capability.
2981 *
2982 * Returns:
2983 * FC_CAP_ERROR - no such capability
2984 * FC_CAP_FOUND - the capability cannot be set by the FC Transport.
2985 * FC_CAP_SETTABLE - the capability was successfully set.
2986 * FC_UNBOUND - the fca_handle specified is not bound.
2987 *
2988 * Context:
2989 * Kernel context.
2990 */
2991 /* ARGSUSED */
2992 static int
2993 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
2994 {
2995 ql_adapter_state_t *ha;
2996 int rval;
2997
2998 ha = ql_fca_handle_to_state(fca_handle);
2999 if (ha == NULL) {
3000 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3001 (void *)fca_handle);
3002 return (FC_UNBOUND);
3003 }
3004 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3005
3006 if (strcmp(cap, FC_NODE_WWN) == 0) {
3007 rval = FC_CAP_FOUND;
3008 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
3009 rval = FC_CAP_FOUND;
3010 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
3011 rval = FC_CAP_FOUND;
3012 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
3013 rval = FC_CAP_FOUND;
3014 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
3015 rval = FC_CAP_FOUND;
3016 } else {
3017 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
3018 rval = FC_CAP_ERROR;
3019 }
3020
3021 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3022
3023 return (rval);
3024 }
3025
3026 /*
3027 * ql_getmap
3028 * Request of Arbitrated Loop (AL-PA) map.
3029 *
3030 * Input:
3031 * fca_handle = handle setup by ql_bind_port().
3032 * mapbuf= buffer pointer for map.
3033 *
3034 * Returns:
3035 * FC_OLDPORT - the specified port is not operating in loop mode.
3036 * FC_OFFLINE - the specified port is not online.
3037 * FC_NOMAP - there is no loop map available for this port.
3038 * FC_UNBOUND - the fca_handle specified is not bound.
3039 * FC_SUCCESS - a valid map has been placed in mapbuf.
3040 *
3041 * Context:
3042 * Kernel context.
3043 */
3044 static int
3045 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
3046 {
3047 ql_adapter_state_t *ha;
3048 clock_t timer = drv_usectohz(30000000);
3049 int rval = FC_SUCCESS;
3050
3051 ha = ql_fca_handle_to_state(fca_handle);
3052 if (ha == NULL) {
3053 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3054 (void *)fca_handle);
3055 return (FC_UNBOUND);
3056 }
3057 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3058
3059 mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
3060 mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
3061
3062 /* Wait for suspension to end. */
3063 TASK_DAEMON_LOCK(ha);
3064 while (ha->task_daemon_flags & QL_SUSPENDED) {
3065 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
3066
3067 /* 30 seconds from now */
3068 if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
3069 &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
3070 /*
3071 * The timeout time 'timer' was
3072 * reached without the condition
3073 * being signaled.
3074 */
3075
3076 /* Release task daemon lock. */
3077 TASK_DAEMON_UNLOCK(ha);
3078
3079 EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
3080 return (FC_TRAN_BUSY);
3081 }
3082 }
3083 /* Release task daemon lock. */
3084 TASK_DAEMON_UNLOCK(ha);
3085
3086 if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
3087 (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
3088 /*
3089 * Now, since transport drivers cosider this as an
3090 * offline condition, let's wait for few seconds
3091 * for any loop transitions before we reset the.
3092 * chip and restart all over again.
3093 */
3094 ql_delay(ha, 2000000);
3095 EL(ha, "failed, FC_NOMAP\n");
3096 rval = FC_NOMAP;
3097 } else {
3098 /*EMPTY*/
3099 QL_PRINT_3(CE_CONT, "(%d): my_alpa %xh len %xh "
3100 "data %xh %xh %xh %xh\n", ha->instance,
3101 mapbuf->lilp_myalpa, mapbuf->lilp_length,
3102 mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
3103 mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
3104 }
3105
3106 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3107 #if 0
3108 QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
3109 #endif
3110 return (rval);
3111 }
3112
3113 /*
3114 * ql_transport
3115 * Issue an I/O request. Handles all regular requests.
3116 *
3117 * Input:
3118 * fca_handle = handle setup by ql_bind_port().
3119 * pkt = pointer to fc_packet.
3120 *
3121 * Returns:
3122 * FC_SUCCESS - the packet was accepted for transport.
3123 * FC_TRANSPORT_ERROR - a transport error occurred.
3124 * FC_BADPACKET - the packet to be transported had not been
3125 * initialized by this FCA.
3126 * FC_UNBOUND - the fca_handle specified is not bound.
3127 *
3128 * Context:
3129 * Kernel context.
3130 */
3131 static int
3132 ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
3133 {
3134 ql_adapter_state_t *ha;
3135 int rval = FC_TRANSPORT_ERROR;
3136 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
3137
3138 /* Verify proper command. */
3139 ha = ql_cmd_setup(fca_handle, pkt, &rval);
3140 if (ha == NULL) {
3141 QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
3142 rval, fca_handle);
3143 return (rval);
3144 }
3145 QL_PRINT_3(CE_CONT, "(%d): started command:\n", ha->instance);
3146 #if 0
3147 QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
3148 sizeof (fc_frame_hdr_t) / 4);
3149 QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
3150 QL_DUMP_3((uint8_t *)pkt->pkt_cmd, 8, pkt->pkt_cmdlen);
3151 #endif
3152
3153 /* Reset SRB flags. */
3154 sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
3155 SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_ABORT | SRB_UB_CALLBACK |
3156 SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
3157 SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
3158 SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
3159 SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
3160 SRB_MS_PKT | SRB_ELS_PKT);
3161
3162 pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
3163 pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
3164 pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
3165 pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
3166 pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
3167
3168 switch (pkt->pkt_cmd_fhdr.r_ctl) {
3169 case R_CTL_COMMAND:
3170 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
3171 sp->flags |= SRB_FCP_CMD_PKT;
3172 rval = ql_fcp_scsi_cmd(ha, pkt, sp);
3173 }
3174 break;
3175
3176 default:
3177 /* Setup response header and buffer. */
3178 if (pkt->pkt_rsplen) {
3179 bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
3180 }
3181
3182 switch (pkt->pkt_cmd_fhdr.r_ctl) {
3183 case R_CTL_UNSOL_DATA:
3184 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
3185 sp->flags |= SRB_IP_PKT;
3186 rval = ql_fcp_ip_cmd(ha, pkt, sp);
3187 }
3188 break;
3189
3190 case R_CTL_UNSOL_CONTROL:
3191 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
3192 sp->flags |= SRB_GENERIC_SERVICES_PKT;
3193 rval = ql_fc_services(ha, pkt);
3194 }
3195 break;
3196
3197 case R_CTL_SOLICITED_DATA:
3198 case R_CTL_STATUS:
3199 default:
3200 pkt->pkt_state = FC_PKT_LOCAL_RJT;
3201 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3202 rval = FC_TRANSPORT_ERROR;
3203 EL(ha, "unknown, r_ctl=%xh\n",
3204 pkt->pkt_cmd_fhdr.r_ctl);
3205 break;
3206 }
3207 }
3208
3209 if (rval != FC_SUCCESS) {
3210 EL(ha, "failed, rval = %xh\n", rval);
3211 } else {
3212 /*EMPTY*/
3213 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3214 }
3215
3216 return (rval);
3217 }
3218
3219 /*
3220 * ql_ub_alloc
3221 * Allocate buffers for unsolicited exchanges.
3222 *
3223 * Input:
3224 * fca_handle = handle setup by ql_bind_port().
3225 * tokens = token array for each buffer.
3226 * size = size of each buffer.
3227 * count = pointer to number of buffers.
3228 * type = the FC-4 type the buffers are reserved for.
3229 * 1 = Extended Link Services, 5 = LLC/SNAP
3230 *
3231 * Returns:
3232 * FC_FAILURE - buffers could not be allocated.
3233 * FC_TOOMANY - the FCA could not allocate the requested
3234 * number of buffers.
3235 * FC_SUCCESS - unsolicited buffers were allocated.
3236 * FC_UNBOUND - the fca_handle specified is not bound.
3237 *
3238 * Context:
3239 * Kernel context.
3240 */
3241 static int
3242 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
3243 uint32_t *count, uint32_t type)
3244 {
3245 ql_adapter_state_t *ha;
3246 caddr_t bufp = NULL;
3247 fc_unsol_buf_t *ubp;
3248 ql_srb_t *sp;
3249 uint32_t index;
3250 uint32_t cnt;
3251 uint32_t ub_array_index = 0;
3252 int rval = FC_SUCCESS;
3253 int ub_updated = FALSE;
3254
3255 /* Check handle. */
3256 ha = ql_fca_handle_to_state(fca_handle);
3257 if (ha == NULL) {
3258 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3259 (void *)fca_handle);
3260 return (FC_UNBOUND);
3261 }
3262 QL_PRINT_3(CE_CONT, "(%d,%d): started, count = %xh\n",
3263 ha->instance, ha->vp_index, *count);
3264
3265 QL_PM_LOCK(ha);
3266 if (ha->power_level != PM_LEVEL_D0) {
3267 QL_PM_UNLOCK(ha);
3268 QL_PRINT_3(CE_CONT, "(%d,%d): down done\n", ha->instance,
3269 ha->vp_index);
3270 return (FC_FAILURE);
3271 }
3272 QL_PM_UNLOCK(ha);
3273
3274 /* Acquire adapter state lock. */
3275 ADAPTER_STATE_LOCK(ha);
3276
3277 /* Check the count. */
3278 if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
3279 *count = 0;
3280 EL(ha, "failed, FC_TOOMANY\n");
3281 rval = FC_TOOMANY;
3282 }
3283
3284 /*
3285 * reset ub_array_index
3286 */
3287 ub_array_index = 0;
3288
3289 /*
3290 * Now proceed to allocate any buffers required
3291 */
3292 for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
3293 /* Allocate all memory needed. */
3294 ubp = (fc_unsol_buf_t *)kmem_zalloc(sizeof (fc_unsol_buf_t),
3295 KM_SLEEP);
3296 if (ubp == NULL) {
3297 EL(ha, "failed, FC_FAILURE\n");
3298 rval = FC_FAILURE;
3299 } else {
3300 sp = kmem_zalloc(sizeof (ql_srb_t), KM_SLEEP);
3301 if (sp == NULL) {
3302 kmem_free(ubp, sizeof (fc_unsol_buf_t));
3303 rval = FC_FAILURE;
3304 } else {
3305 if (type == FC_TYPE_IS8802_SNAP) {
3306 #ifdef __sparc
3307 if (ql_get_dma_mem(ha,
3308 &sp->ub_buffer, size,
3309 BIG_ENDIAN_DMA,
3310 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3311 rval = FC_FAILURE;
3312 kmem_free(ubp,
3313 sizeof (fc_unsol_buf_t));
3314 kmem_free(sp,
3315 sizeof (ql_srb_t));
3316 } else {
3317 bufp = sp->ub_buffer.bp;
3318 sp->ub_size = size;
3319 }
3320 #else
3321 if (ql_get_dma_mem(ha,
3322 &sp->ub_buffer, size,
3323 LITTLE_ENDIAN_DMA,
3324 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3325 rval = FC_FAILURE;
3326 kmem_free(ubp,
3327 sizeof (fc_unsol_buf_t));
3328 kmem_free(sp,
3329 sizeof (ql_srb_t));
3330 } else {
3331 bufp = sp->ub_buffer.bp;
3332 sp->ub_size = size;
3333 }
3334 #endif
3335 } else {
3336 bufp = kmem_zalloc(size, KM_SLEEP);
3337 if (bufp == NULL) {
3338 rval = FC_FAILURE;
3339 kmem_free(ubp,
3340 sizeof (fc_unsol_buf_t));
3341 kmem_free(sp,
3342 sizeof (ql_srb_t));
3343 } else {
3344 sp->ub_size = size;
3345 }
3346 }
3347 }
3348 }
3349
3350 if (rval == FC_SUCCESS) {
3351 /* Find next available slot. */
3352 QL_UB_LOCK(ha);
3353 while (ha->ub_array[ub_array_index] != NULL) {
3354 ub_array_index++;
3355 }
3356
3357 ubp->ub_fca_private = (void *)sp;
3358
3359 /* init cmd links */
3360 sp->cmd.base_address = sp;
3361 sp->cmd.prev = NULL;
3362 sp->cmd.next = NULL;
3363 sp->cmd.head = NULL;
3364
3365 /* init wdg links */
3366 sp->wdg.base_address = sp;
3367 sp->wdg.prev = NULL;
3368 sp->wdg.next = NULL;
3369 sp->wdg.head = NULL;
3370 sp->ha = ha;
3371
3372 ubp->ub_buffer = bufp;
3373 ubp->ub_bufsize = size;
3374 ubp->ub_port_handle = fca_handle;
3375 ubp->ub_token = ub_array_index;
3376
3377 /* Save the token. */
3378 tokens[index] = ub_array_index;
3379
3380 /* Setup FCA private information. */
3381 sp->ub_type = type;
3382 sp->handle = ub_array_index;
3383 sp->flags |= SRB_UB_IN_FCA;
3384
3385 ha->ub_array[ub_array_index] = ubp;
3386 ha->ub_allocated++;
3387 ub_updated = TRUE;
3388 QL_UB_UNLOCK(ha);
3389 }
3390 }
3391
3392 /* Release adapter state lock. */
3393 ADAPTER_STATE_UNLOCK(ha);
3394
3395 /* IP buffer. */
3396 if (ub_updated) {
3397 if ((type == FC_TYPE_IS8802_SNAP) &&
3398 (!(CFG_IST(ha, (CFG_CTRL_6322 | CFG_CTRL_2581))))) {
3399
3400 ADAPTER_STATE_LOCK(ha);
3401 ha->flags |= IP_ENABLED;
3402 ADAPTER_STATE_UNLOCK(ha);
3403
3404 if (!(ha->flags & IP_INITIALIZED)) {
3405 if (CFG_IST(ha, CFG_CTRL_2422)) {
3406 ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3407 LSB(ql_ip_mtu);
3408 ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3409 MSB(ql_ip_mtu);
3410 ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3411 LSB(size);
3412 ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3413 MSB(size);
3414
3415 cnt = CHAR_TO_SHORT(
3416 ha->ip_init_ctrl_blk.cb24.cc[0],
3417 ha->ip_init_ctrl_blk.cb24.cc[1]);
3418
3419 if (cnt < *count) {
3420 ha->ip_init_ctrl_blk.cb24.cc[0]
3421 = LSB(*count);
3422 ha->ip_init_ctrl_blk.cb24.cc[1]
3423 = MSB(*count);
3424 }
3425 } else {
3426 ha->ip_init_ctrl_blk.cb.mtu_size[0] =
3427 LSB(ql_ip_mtu);
3428 ha->ip_init_ctrl_blk.cb.mtu_size[1] =
3429 MSB(ql_ip_mtu);
3430 ha->ip_init_ctrl_blk.cb.buf_size[0] =
3431 LSB(size);
3432 ha->ip_init_ctrl_blk.cb.buf_size[1] =
3433 MSB(size);
3434
3435 cnt = CHAR_TO_SHORT(
3436 ha->ip_init_ctrl_blk.cb.cc[0],
3437 ha->ip_init_ctrl_blk.cb.cc[1]);
3438
3439 if (cnt < *count) {
3440 ha->ip_init_ctrl_blk.cb.cc[0] =
3441 LSB(*count);
3442 ha->ip_init_ctrl_blk.cb.cc[1] =
3443 MSB(*count);
3444 }
3445 }
3446
3447 (void) ql_initialize_ip(ha);
3448 }
3449 ql_isp_rcvbuf(ha);
3450 }
3451 }
3452
3453 if (rval != FC_SUCCESS) {
3454 EL(ha, "failed=%xh\n", rval);
3455 } else {
3456 /*EMPTY*/
3457 QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3458 ha->vp_index);
3459 }
3460 return (rval);
3461 }
3462
3463 /*
3464 * ql_ub_free
3465 * Free unsolicited buffers.
3466 *
3467 * Input:
3468 * fca_handle = handle setup by ql_bind_port().
3469 * count = number of buffers.
3470 * tokens = token array for each buffer.
3471 *
3472 * Returns:
3473 * FC_SUCCESS - the requested buffers have been freed.
3474 * FC_UNBOUND - the fca_handle specified is not bound.
3475 * FC_UB_BADTOKEN - an invalid token was encountered.
3476 * No buffers have been released.
3477 *
3478 * Context:
3479 * Kernel context.
3480 */
3481 static int
3482 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3483 {
3484 ql_adapter_state_t *ha;
3485 ql_srb_t *sp;
3486 uint32_t index;
3487 uint64_t ub_array_index;
3488 int rval = FC_SUCCESS;
3489
3490 /* Check handle. */
3491 ha = ql_fca_handle_to_state(fca_handle);
3492 if (ha == NULL) {
3493 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3494 (void *)fca_handle);
3495 return (FC_UNBOUND);
3496 }
3497 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3498
3499 /* Acquire adapter state lock. */
3500 ADAPTER_STATE_LOCK(ha);
3501
3502 /* Check all returned tokens. */
3503 for (index = 0; index < count; index++) {
3504 fc_unsol_buf_t *ubp;
3505
3506 /* Check the token range. */
3507 if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3508 EL(ha, "failed, FC_UB_BADTOKEN\n");
3509 rval = FC_UB_BADTOKEN;
3510 break;
3511 }
3512
3513 /* Check the unsolicited buffer array. */
3514 QL_UB_LOCK(ha);
3515 ubp = ha->ub_array[ub_array_index];
3516
3517 if (ubp == NULL) {
3518 EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3519 rval = FC_UB_BADTOKEN;
3520 QL_UB_UNLOCK(ha);
3521 break;
3522 }
3523
3524 /* Check the state of the unsolicited buffer. */
3525 sp = ha->ub_array[ub_array_index]->ub_fca_private;
3526 sp->flags |= SRB_UB_FREE_REQUESTED;
3527
3528 while (!(sp->flags & SRB_UB_IN_FCA) ||
3529 (sp->flags & (SRB_UB_CALLBACK | SRB_UB_ACQUIRED))) {
3530 QL_UB_UNLOCK(ha);
3531 ADAPTER_STATE_UNLOCK(ha);
3532 delay(drv_usectohz(100000));
3533 ADAPTER_STATE_LOCK(ha);
3534 QL_UB_LOCK(ha);
3535 }
3536 ha->ub_array[ub_array_index] = NULL;
3537 QL_UB_UNLOCK(ha);
3538 ql_free_unsolicited_buffer(ha, ubp);
3539 }
3540
3541 if (rval == FC_SUCCESS) {
3542 /*
3543 * Signal any pending hardware reset when there are
3544 * no more unsolicited buffers in use.
3545 */
3546 if (ha->ub_allocated == 0) {
3547 cv_broadcast(&ha->pha->cv_ub);
3548 }
3549 }
3550
3551 /* Release adapter state lock. */
3552 ADAPTER_STATE_UNLOCK(ha);
3553
3554 if (rval != FC_SUCCESS) {
3555 EL(ha, "failed=%xh\n", rval);
3556 } else {
3557 /*EMPTY*/
3558 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3559 }
3560 return (rval);
3561 }
3562
3563 /*
3564 * ql_ub_release
3565 * Release unsolicited buffers from FC Transport
3566 * to FCA for future use.
3567 *
3568 * Input:
3569 * fca_handle = handle setup by ql_bind_port().
3570 * count = number of buffers.
3571 * tokens = token array for each buffer.
3572 *
3573 * Returns:
3574 * FC_SUCCESS - the requested buffers have been released.
3575 * FC_UNBOUND - the fca_handle specified is not bound.
3576 * FC_UB_BADTOKEN - an invalid token was encountered.
3577 * No buffers have been released.
3578 *
3579 * Context:
3580 * Kernel context.
3581 */
3582 static int
3583 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3584 {
3585 ql_adapter_state_t *ha;
3586 ql_srb_t *sp;
3587 uint32_t index;
3588 uint64_t ub_array_index;
3589 int rval = FC_SUCCESS;
3590 int ub_ip_updated = FALSE;
3591
3592 /* Check handle. */
3593 ha = ql_fca_handle_to_state(fca_handle);
3594 if (ha == NULL) {
3595 QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3596 (void *)fca_handle);
3597 return (FC_UNBOUND);
3598 }
3599 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3600
3601 /* Acquire adapter state lock. */
3602 ADAPTER_STATE_LOCK(ha);
3603 QL_UB_LOCK(ha);
3604
3605 /* Check all returned tokens. */
3606 for (index = 0; index < count; index++) {
3607 /* Check the token range. */
3608 if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3609 EL(ha, "failed, FC_UB_BADTOKEN\n");
3610 rval = FC_UB_BADTOKEN;
3611 break;
3612 }
3613
3614 /* Check the unsolicited buffer array. */
3615 if (ha->ub_array[ub_array_index] == NULL) {
3616 EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3617 rval = FC_UB_BADTOKEN;
3618 break;
3619 }
3620
3621 /* Check the state of the unsolicited buffer. */
3622 sp = ha->ub_array[ub_array_index]->ub_fca_private;
3623 if (sp->flags & SRB_UB_IN_FCA) {
3624 EL(ha, "failed, FC_UB_BADTOKEN-3\n");
3625 rval = FC_UB_BADTOKEN;
3626 break;
3627 }
3628 }
3629
3630 /* If all tokens checkout, release the buffers. */
3631 if (rval == FC_SUCCESS) {
3632 /* Check all returned tokens. */
3633 for (index = 0; index < count; index++) {
3634 fc_unsol_buf_t *ubp;
3635
3636 ub_array_index = tokens[index];
3637 ubp = ha->ub_array[ub_array_index];
3638 sp = ubp->ub_fca_private;
3639
3640 ubp->ub_resp_flags = 0;
3641 sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
3642 sp->flags |= SRB_UB_IN_FCA;
3643
3644 /* IP buffer. */
3645 if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3646 ub_ip_updated = TRUE;
3647 }
3648 }
3649 }
3650
3651 QL_UB_UNLOCK(ha);
3652 /* Release adapter state lock. */
3653 ADAPTER_STATE_UNLOCK(ha);
3654
3655 /*
3656 * XXX: We should call ql_isp_rcvbuf() to return a
3657 * buffer to ISP only if the number of buffers fall below
3658 * the low water mark.
3659 */
3660 if (ub_ip_updated) {
3661 ql_isp_rcvbuf(ha);
3662 }
3663
3664 if (rval != FC_SUCCESS) {
3665 EL(ha, "failed, rval = %xh\n", rval);
3666 } else {
3667 /*EMPTY*/
3668 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3669 }
3670 return (rval);
3671 }
3672
3673 /*
3674 * ql_abort
3675 * Abort a packet.
3676 *
3677 * Input:
3678 * fca_handle = handle setup by ql_bind_port().
3679 * pkt = pointer to fc_packet.
3680 * flags = KM_SLEEP flag.
3681 *
3682 * Returns:
3683 * FC_SUCCESS - the packet has successfully aborted.
3684 * FC_ABORTED - the packet has successfully aborted.
3685 * FC_ABORTING - the packet is being aborted.
3686 * FC_ABORT_FAILED - the packet could not be aborted.
3687 * FC_TRANSPORT_ERROR - a transport error occurred while attempting
3688 * to abort the packet.
3689 * FC_BADEXCHANGE - no packet found.
3690 * FC_UNBOUND - the fca_handle specified is not bound.
3691 *
3692 * Context:
3693 * Kernel context.
3694 */
3695 static int
3696 ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3697 {
3698 port_id_t d_id;
3699 ql_link_t *link;
3700 ql_adapter_state_t *ha, *pha;
3701 ql_srb_t *sp;
3702 ql_tgt_t *tq;
3703 ql_lun_t *lq;
3704 int rval = FC_ABORTED;
3705
3706 ha = ql_fca_handle_to_state(fca_handle);
3707 if (ha == NULL) {
3708 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3709 (void *)fca_handle);
3710 return (FC_UNBOUND);
3711 }
3712
3713 pha = ha->pha;
3714
3715 QL_PRINT_3(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3716
3717 /* Get target queue pointer. */
3718 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3719 tq = ql_d_id_to_queue(ha, d_id);
3720
3721 if ((tq == NULL) || (pha->task_daemon_flags & LOOP_DOWN)) {
3722 if (tq == NULL) {
3723 EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3724 rval = FC_TRANSPORT_ERROR;
3725 } else {
3726 EL(ha, "failed, FC_OFFLINE\n");
3727 rval = FC_OFFLINE;
3728 }
3729 return (rval);
3730 }
3731
3732 sp = (ql_srb_t *)pkt->pkt_fca_private;
3733 lq = sp->lun_queue;
3734
3735 /* Set poll flag if sleep wanted. */
3736 if (flags == KM_SLEEP) {
3737 sp->flags |= SRB_POLL;
3738 }
3739
3740 /* Acquire target queue lock. */
3741 DEVICE_QUEUE_LOCK(tq);
3742 REQUEST_RING_LOCK(ha);
3743
3744 /* If command not already started. */
3745 if (!(sp->flags & SRB_ISP_STARTED)) {
3746 /* Check pending queue for command. */
3747 sp = NULL;
3748 for (link = pha->pending_cmds.first; link != NULL;
3749 link = link->next) {
3750 sp = link->base_address;
3751 if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3752 /* Remove srb from q. */
3753 ql_remove_link(&pha->pending_cmds, &sp->cmd);
3754 break;
3755 } else {
3756 sp = NULL;
3757 }
3758 }
3759 REQUEST_RING_UNLOCK(ha);
3760
3761 if (sp == NULL) {
3762 /* Check for cmd on device queue. */
3763 for (link = lq->cmd.first; link != NULL;
3764 link = link->next) {
3765 sp = link->base_address;
3766 if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3767 /* Remove srb from q. */
3768 ql_remove_link(&lq->cmd, &sp->cmd);
3769 break;
3770 } else {
3771 sp = NULL;
3772 }
3773 }
3774 }
3775 /* Release device lock */
3776 DEVICE_QUEUE_UNLOCK(tq);
3777
3778 /* If command on target queue. */
3779 if (sp != NULL) {
3780 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
3781
3782 /* Set return status */
3783 pkt->pkt_reason = CS_ABORTED;
3784
3785 sp->cmd.next = NULL;
3786 ql_done(&sp->cmd);
3787 rval = FC_ABORTED;
3788 } else {
3789 EL(ha, "failed, FC_BADEXCHANGE\n");
3790 rval = FC_BADEXCHANGE;
3791 }
3792 } else if (sp->flags & SRB_ISP_COMPLETED) {
3793 /* Release device queue lock. */
3794 REQUEST_RING_UNLOCK(ha);
3795 DEVICE_QUEUE_UNLOCK(tq);
3796 EL(ha, "failed, already done, FC_FAILURE\n");
3797 rval = FC_FAILURE;
3798 } else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
3799 (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
3800 /*
3801 * If here, target data/resp ctio is with Fw.
3802 * Since firmware is supposed to terminate such I/Os
3803 * with an error, we need not do any thing. If FW
3804 * decides not to terminate those IOs and simply keep
3805 * quite then we need to initiate cleanup here by
3806 * calling ql_done.
3807 */
3808 REQUEST_RING_UNLOCK(ha);
3809 DEVICE_QUEUE_UNLOCK(tq);
3810 rval = FC_ABORTED;
3811 } else {
3812 request_t *ep = pha->request_ring_bp;
3813 uint16_t cnt;
3814
3815 if (sp->handle != 0) {
3816 for (cnt = 0; cnt < REQUEST_ENTRY_CNT; cnt++) {
3817 if (sp->handle == ddi_get32(
3818 pha->hba_buf.acc_handle, &ep->handle)) {
3819 ep->entry_type = INVALID_ENTRY_TYPE;
3820 break;
3821 }
3822 ep++;
3823 }
3824 }
3825
3826 /* Release device queue lock. */
3827 REQUEST_RING_UNLOCK(ha);
3828 DEVICE_QUEUE_UNLOCK(tq);
3829
3830 sp->flags |= SRB_ABORTING;
3831 (void) ql_abort_command(ha, sp);
3832 pkt->pkt_reason = CS_ABORTED;
3833 rval = FC_ABORTED;
3834 }
3835
3836 QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
3837
3838 return (rval);
3839 }
3840
3841 /*
3842 * ql_reset
3843 * Reset link or hardware.
3844 *
3845 * Input:
3846 * fca_handle = handle setup by ql_bind_port().
3847 * cmd = reset type command.
3848 *
3849 * Returns:
3850 * FC_SUCCESS - reset has successfully finished.
3851 * FC_UNBOUND - the fca_handle specified is not bound.
3852 * FC_FAILURE - reset failed.
3853 *
3854 * Context:
3855 * Kernel context.
3856 */
3857 static int
3858 ql_reset(opaque_t fca_handle, uint32_t cmd)
3859 {
3860 ql_adapter_state_t *ha;
3861 int rval = FC_SUCCESS, rval2;
3862
3863 ha = ql_fca_handle_to_state(fca_handle);
3864 if (ha == NULL) {
3865 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3866 (void *)fca_handle);
3867 return (FC_UNBOUND);
3868 }
3869
3870 QL_PRINT_3(CE_CONT, "(%d,%d): started, cmd=%d\n", ha->instance,
3871 ha->vp_index, cmd);
3872
3873 switch (cmd) {
3874 case FC_FCA_CORE:
3875 /* dump firmware core if specified. */
3876 if (ha->vp_index == 0) {
3877 if (ql_dump_firmware(ha) != QL_SUCCESS) {
3878 EL(ha, "failed, FC_FAILURE\n");
3879 rval = FC_FAILURE;
3880 }
3881 }
3882 break;
3883 case FC_FCA_LINK_RESET:
3884 if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
3885 if (ql_loop_reset(ha) != QL_SUCCESS) {
3886 EL(ha, "failed, FC_FAILURE-2\n");
3887 rval = FC_FAILURE;
3888 }
3889 }
3890 break;
3891 case FC_FCA_RESET_CORE:
3892 case FC_FCA_RESET:
3893 /* if dump firmware core if specified. */
3894 if (cmd == FC_FCA_RESET_CORE) {
3895 if (ha->vp_index != 0) {
3896 rval2 = ha->pha->task_daemon_flags & LOOP_DOWN
3897 ? QL_SUCCESS : ql_loop_reset(ha);
3898 } else {
3899 rval2 = ql_dump_firmware(ha);
3900 }
3901 if (rval2 != QL_SUCCESS) {
3902 EL(ha, "failed, FC_FAILURE-3\n");
3903 rval = FC_FAILURE;
3904 }
3905 }
3906
3907 /* Free up all unsolicited buffers. */
3908 if (ha->ub_allocated != 0) {
3909 /* Inform to release buffers. */
3910 ha->state = FC_PORT_SPEED_MASK(ha->state);
3911 ha->state |= FC_STATE_RESET_REQUESTED;
3912 if (ha->flags & FCA_BOUND) {
3913 (ha->bind_info.port_statec_cb)
3914 (ha->bind_info.port_handle,
3915 ha->state);
3916 }
3917 }
3918
3919 ha->state = FC_PORT_SPEED_MASK(ha->state);
3920
3921 /* All buffers freed */
3922 if (ha->ub_allocated == 0) {
3923 /* Hardware reset. */
3924 if (cmd == FC_FCA_RESET) {
3925 if (ha->vp_index == 0) {
3926 (void) ql_abort_isp(ha);
3927 } else if (!(ha->pha->task_daemon_flags &
3928 LOOP_DOWN)) {
3929 (void) ql_loop_reset(ha);
3930 }
3931 }
3932
3933 /* Inform that the hardware has been reset */
3934 ha->state |= FC_STATE_RESET;
3935 } else {
3936 /*
3937 * the port driver expects an online if
3938 * buffers are not freed.
3939 */
3940 if (ha->topology & QL_LOOP_CONNECTION) {
3941 ha->state |= FC_STATE_LOOP;
3942 } else {
3943 ha->state |= FC_STATE_ONLINE;
3944 }
3945 }
3946
3947 TASK_DAEMON_LOCK(ha);
3948 ha->task_daemon_flags |= FC_STATE_CHANGE;
3949 TASK_DAEMON_UNLOCK(ha);
3950
3951 ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
3952
3953 break;
3954 default:
3955 EL(ha, "unknown cmd=%xh\n", cmd);
3956 break;
3957 }
3958
3959 if (rval != FC_SUCCESS) {
3960 EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
3961 } else {
3962 /*EMPTY*/
3963 QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3964 ha->vp_index);
3965 }
3966
3967 return (rval);
3968 }
3969
3970 /*
3971 * ql_port_manage
3972 * Perform port management or diagnostics.
3973 *
3974 * Input:
3975 * fca_handle = handle setup by ql_bind_port().
3976 * cmd = pointer to command structure.
3977 *
3978 * Returns:
3979 * FC_SUCCESS - the request completed successfully.
3980 * FC_FAILURE - the request did not complete successfully.
3981 * FC_UNBOUND - the fca_handle specified is not bound.
3982 *
3983 * Context:
3984 * Kernel context.
3985 */
3986 static int
3987 ql_port_manage(opaque_t fca_handle, fc_fca_pm_t *cmd)
3988 {
3989 clock_t timer;
3990 uint16_t index;
3991 uint32_t *bp;
3992 port_id_t d_id;
3993 ql_link_t *link;
3994 ql_adapter_state_t *ha, *pha;
3995 ql_tgt_t *tq;
3996 dma_mem_t buffer_xmt, buffer_rcv;
3997 size_t length;
3998 uint32_t cnt;
3999 char buf[80];
4000 lbp_t *lb;
4001 ql_mbx_data_t mr;
4002 app_mbx_cmd_t *mcp;
4003 int i0;
4004 uint8_t *bptr;
4005 int rval2, rval = FC_SUCCESS;
4006 uint32_t opcode;
4007 uint32_t set_flags = 0;
4008
4009 ha = ql_fca_handle_to_state(fca_handle);
4010 if (ha == NULL) {
4011 QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
4012 (void *)fca_handle);
4013 return (FC_UNBOUND);
4014 }
4015 pha = ha->pha;
4016
4017 QL_PRINT_3(CE_CONT, "(%d): started=%xh\n", ha->instance,
4018 cmd->pm_cmd_code);
4019
4020 ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
4021
4022 /*
4023 * Wait for all outstanding commands to complete
4024 */
4025 index = (uint16_t)ql_wait_outstanding(ha);
4026
4027 if (index != MAX_OUTSTANDING_COMMANDS) {
4028 ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
4029 ql_restart_queues(ha);
4030 EL(ha, "failed, FC_TRAN_BUSY\n");
4031 return (FC_TRAN_BUSY);
4032 }
4033
4034 switch (cmd->pm_cmd_code) {
4035 case FC_PORT_BYPASS:
4036 d_id.b24 = *cmd->pm_cmd_buf;
4037 tq = ql_d_id_to_queue(ha, d_id);
4038 if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
4039 EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
4040 rval = FC_FAILURE;
4041 }
4042 break;
4043 case FC_PORT_UNBYPASS:
4044 d_id.b24 = *cmd->pm_cmd_buf;
4045 tq = ql_d_id_to_queue(ha, d_id);
4046 if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
4047 EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
4048 rval = FC_FAILURE;
4049 }
4050 break;
4051 case FC_PORT_GET_FW_REV:
4052 (void) sprintf(buf, "%d.%d.%d", pha->fw_major_version,
4053 pha->fw_minor_version, pha->fw_subminor_version);
4054 length = strlen(buf) + 1;
4055 if (cmd->pm_data_len < length) {
4056 cmd->pm_data_len = length;
4057 EL(ha, "failed, FC_PORT_GET_FW_REV FC_FAILURE\n");
4058 rval = FC_FAILURE;
4059 } else {
4060 (void) strcpy(cmd->pm_data_buf, buf);
4061 }
4062 break;
4063
4064 case FC_PORT_GET_FCODE_REV: {
4065 caddr_t fcode_ver_buf = NULL;
4066
4067 i0 = 0;
4068 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
4069 rval2 = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
4070 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version",
4071 (caddr_t)&fcode_ver_buf, &i0);
4072 length = (uint_t)i0;
4073
4074 if (rval2 != DDI_PROP_SUCCESS) {
4075 EL(ha, "failed, getting version = %xh\n", rval2);
4076 length = 20;
4077 fcode_ver_buf = kmem_alloc(length, KM_SLEEP);
4078 if (fcode_ver_buf != NULL) {
4079 (void) sprintf(fcode_ver_buf,
4080 "NO FCODE FOUND");
4081 }
4082 }
4083
4084 if (cmd->pm_data_len < length) {
4085 EL(ha, "length error, FC_PORT_GET_FCODE_REV "
4086 "dst=%ld, src=%ld\n", cmd->pm_data_len, length);
4087 cmd->pm_data_len = length;
4088 rval = FC_FAILURE;
4089 } else if (fcode_ver_buf != NULL) {
4090 bcopy((void *)fcode_ver_buf, (void *)cmd->pm_data_buf,
4091 length);
4092 }
4093
4094 if (fcode_ver_buf != NULL) {
4095 kmem_free(fcode_ver_buf, length);
4096 }
4097 break;
4098 }
4099
4100 case FC_PORT_GET_DUMP:
4101 QL_DUMP_LOCK(pha);
4102 if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
4103 EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
4104 "length=%lxh\n", cmd->pm_data_len);
4105 cmd->pm_data_len = pha->risc_dump_size;
4106 rval = FC_FAILURE;
4107 } else if (pha->ql_dump_state & QL_DUMPING) {
4108 EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
4109 rval = FC_TRAN_BUSY;
4110 } else if (pha->ql_dump_state & QL_DUMP_VALID) {
4111 (void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
4112 pha->ql_dump_state |= QL_DUMP_UPLOADED;
4113 } else {
4114 EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
4115 rval = FC_FAILURE;
4116 }
4117 QL_DUMP_UNLOCK(pha);
4118 break;
4119 case FC_PORT_FORCE_DUMP:
4120 PORTMANAGE_LOCK(ha);
4121 if (ql_dump_firmware(ha) != QL_SUCCESS) {
4122 EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
4123 rval = FC_FAILURE;
4124 }
4125 PORTMANAGE_UNLOCK(ha);
4126 break;
4127 case FC_PORT_DOWNLOAD_FW:
4128 PORTMANAGE_LOCK(ha);
4129 if (CFG_IST(ha, CFG_CTRL_24258081)) {
4130 if (ql_24xx_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4131 (uint32_t)cmd->pm_data_len,
4132 ha->flash_fw_addr << 2) != QL_SUCCESS) {
4133 EL(ha, "failed, FC_PORT_DOWNLOAD_FW\n");
4134 rval = FC_FAILURE;
4135 }
4136 ql_reset_chip(ha);
4137 set_flags |= ISP_ABORT_NEEDED;
4138 } else {
4139 /* Save copy of the firmware. */
4140 if (pha->risc_code != NULL) {
4141 kmem_free(pha->risc_code, pha->risc_code_size);
4142 pha->risc_code = NULL;
4143 pha->risc_code_size = 0;
4144 }
4145
4146 pha->risc_code = kmem_alloc(cmd->pm_data_len,
4147 KM_SLEEP);
4148 if (pha->risc_code != NULL) {
4149 pha->risc_code_size =
4150 (uint32_t)cmd->pm_data_len;
4151 bcopy(cmd->pm_data_buf, pha->risc_code,
4152 cmd->pm_data_len);
4153
4154 /* Do abort to force reload. */
4155 ql_reset_chip(ha);
4156 if (ql_abort_isp(ha) != QL_SUCCESS) {
4157 kmem_free(pha->risc_code,
4158 pha->risc_code_size);
4159 pha->risc_code = NULL;
4160 pha->risc_code_size = 0;
4161 ql_reset_chip(ha);
4162 (void) ql_abort_isp(ha);
4163 EL(ha, "failed, FC_PORT_DOWNLOAD_FW"
4164 " FC_FAILURE\n");
4165 rval = FC_FAILURE;
4166 }
4167 }
4168 }
4169 PORTMANAGE_UNLOCK(ha);
4170 break;
4171 case FC_PORT_GET_DUMP_SIZE:
4172 bp = (uint32_t *)cmd->pm_data_buf;
4173 *bp = pha->risc_dump_size;
4174 break;
4175 case FC_PORT_DIAG:
4176 /*
4177 * Prevents concurrent diags
4178 */
4179 PORTMANAGE_LOCK(ha);
4180
4181 /* Wait for suspension to end. */
4182 for (timer = 0; timer < 3000 &&
4183 pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
4184 ql_delay(ha, 10000);
4185 }
4186
4187 if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
4188 EL(ha, "failed, FC_TRAN_BUSY-2\n");
4189 rval = FC_TRAN_BUSY;
4190 PORTMANAGE_UNLOCK(ha);
4191 break;
4192 }
4193
4194 switch (cmd->pm_cmd_flags) {
4195 case QL_DIAG_EXEFMW:
4196 if (ql_start_firmware(ha) != QL_SUCCESS) {
4197 EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
4198 rval = FC_FAILURE;
4199 }
4200 break;
4201 case QL_DIAG_CHKCMDQUE:
4202 for (i0 = 1, cnt = 0; i0 < MAX_OUTSTANDING_COMMANDS;
4203 i0++) {
4204 cnt += (pha->outstanding_cmds[i0] != NULL);
4205 }
4206 if (cnt != 0) {
4207 EL(ha, "failed, QL_DIAG_CHKCMDQUE "
4208 "FC_FAILURE\n");
4209 rval = FC_FAILURE;
4210 }
4211 break;
4212 case QL_DIAG_FMWCHKSUM:
4213 if (ql_verify_checksum(ha) != QL_SUCCESS) {
4214 EL(ha, "failed, QL_DIAG_FMWCHKSUM "
4215 "FC_FAILURE\n");
4216 rval = FC_FAILURE;
4217 }
4218 break;
4219 case QL_DIAG_SLFTST:
4220 if (ql_online_selftest(ha) != QL_SUCCESS) {
4221 EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
4222 rval = FC_FAILURE;
4223 }
4224 ql_reset_chip(ha);
4225 set_flags |= ISP_ABORT_NEEDED;
4226 break;
4227 case QL_DIAG_REVLVL:
4228 if (cmd->pm_stat_len <
4229 sizeof (ql_adapter_revlvl_t)) {
4230 EL(ha, "failed, QL_DIAG_REVLVL FC_NOMEM, "
4231 "slen=%lxh, rlvllen=%lxh\n",
4232 cmd->pm_stat_len,
4233 sizeof (ql_adapter_revlvl_t));
4234 rval = FC_NOMEM;
4235 } else {
4236 bcopy((void *)&(pha->adapter_stats->revlvl),
4237 cmd->pm_stat_buf,
4238 (size_t)cmd->pm_stat_len);
4239 cmd->pm_stat_len =
4240 sizeof (ql_adapter_revlvl_t);
4241 }
4242 break;
4243 case QL_DIAG_LPBMBX:
4244
4245 if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
4246 EL(ha, "failed, QL_DIAG_LPBMBX "
4247 "FC_INVALID_REQUEST, pmlen=%lxh, "
4248 "reqd=%lxh\n", cmd->pm_data_len,
4249 sizeof (struct app_mbx_cmd));
4250 rval = FC_INVALID_REQUEST;
4251 break;
4252 }
4253 /*
4254 * Don't do the wrap test on a 2200 when the
4255 * firmware is running.
4256 */
4257 if (!CFG_IST(ha, CFG_CTRL_2200)) {
4258 mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4259 mr.mb[1] = mcp->mb[1];
4260 mr.mb[2] = mcp->mb[2];
4261 mr.mb[3] = mcp->mb[3];
4262 mr.mb[4] = mcp->mb[4];
4263 mr.mb[5] = mcp->mb[5];
4264 mr.mb[6] = mcp->mb[6];
4265 mr.mb[7] = mcp->mb[7];
4266
4267 bcopy(&mr.mb[0], &mr.mb[10],
4268 sizeof (uint16_t) * 8);
4269
4270 if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4271 EL(ha, "failed, QL_DIAG_LPBMBX "
4272 "FC_FAILURE\n");
4273 rval = FC_FAILURE;
4274 break;
4275 } else {
4276 for (i0 = 1; i0 < 8; i0++) {
4277 if (mr.mb[i0] !=
4278 mr.mb[i0 + 10]) {
4279 EL(ha, "failed, "
4280 "QL_DIAG_LPBMBX "
4281 "FC_FAILURE-2\n");
4282 rval = FC_FAILURE;
4283 break;
4284 }
4285 }
4286 }
4287
4288 if (rval == FC_FAILURE) {
4289 (void) ql_flash_errlog(ha,
4290 FLASH_ERRLOG_ISP_ERR, 0,
4291 RD16_IO_REG(ha, hccr),
4292 RD16_IO_REG(ha, istatus));
4293 set_flags |= ISP_ABORT_NEEDED;
4294 }
4295 }
4296 break;
4297 case QL_DIAG_LPBDTA:
4298 /*
4299 * For loopback data, we receive the
4300 * data back in pm_stat_buf. This provides
4301 * the user an opportunity to compare the
4302 * transmitted and received data.
4303 *
4304 * NB: lb->options are:
4305 * 0 --> Ten bit loopback
4306 * 1 --> One bit loopback
4307 * 2 --> External loopback
4308 */
4309 if (cmd->pm_data_len > 65536) {
4310 rval = FC_TOOMANY;
4311 EL(ha, "failed, QL_DIAG_LPBDTA "
4312 "FC_TOOMANY=%lxh\n", cmd->pm_data_len);
4313 break;
4314 }
4315 if (ql_get_dma_mem(ha, &buffer_xmt,
4316 (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4317 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4318 EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4319 rval = FC_NOMEM;
4320 break;
4321 }
4322 if (ql_get_dma_mem(ha, &buffer_rcv,
4323 (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4324 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4325 EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4326 rval = FC_NOMEM;
4327 break;
4328 }
4329 ddi_rep_put8(buffer_xmt.acc_handle,
4330 (uint8_t *)cmd->pm_data_buf,
4331 (uint8_t *)buffer_xmt.bp,
4332 cmd->pm_data_len, DDI_DEV_AUTOINCR);
4333
4334 /* 22xx's adapter must be in loop mode for test. */
4335 if (CFG_IST(ha, CFG_CTRL_2200)) {
4336 bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4337 if (ha->flags & POINT_TO_POINT ||
4338 (ha->task_daemon_flags & LOOP_DOWN &&
4339 *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4340 cnt = *bptr;
4341 *bptr = (uint8_t)
4342 (*bptr & ~(BIT_6|BIT_5|BIT_4));
4343 (void) ql_abort_isp(ha);
4344 *bptr = (uint8_t)cnt;
4345 }
4346 }
4347
4348 /* Shutdown IP. */
4349 if (pha->flags & IP_INITIALIZED) {
4350 (void) ql_shutdown_ip(pha);
4351 }
4352
4353 lb = (lbp_t *)cmd->pm_cmd_buf;
4354 lb->transfer_count =
4355 (uint32_t)cmd->pm_data_len;
4356 lb->transfer_segment_count = 0;
4357 lb->receive_segment_count = 0;
4358 lb->transfer_data_address =
4359 buffer_xmt.cookie.dmac_address;
4360 lb->receive_data_address =
4361 buffer_rcv.cookie.dmac_address;
4362
4363 if (ql_loop_back(ha, 0, lb,
4364 buffer_xmt.cookie.dmac_notused,
4365 buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4366 bzero((void *)cmd->pm_stat_buf,
4367 cmd->pm_stat_len);
4368 ddi_rep_get8(buffer_rcv.acc_handle,
4369 (uint8_t *)cmd->pm_stat_buf,
4370 (uint8_t *)buffer_rcv.bp,
4371 cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4372 rval = FC_SUCCESS;
4373 } else {
4374 EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4375 rval = FC_FAILURE;
4376 }
4377
4378 ql_free_phys(ha, &buffer_xmt);
4379 ql_free_phys(ha, &buffer_rcv);
4380
4381 /* Needed to recover the f/w */
4382 set_flags |= ISP_ABORT_NEEDED;
4383
4384 /* Restart IP if it was shutdown. */
4385 if (pha->flags & IP_ENABLED &&
4386 !(pha->flags & IP_INITIALIZED)) {
4387 (void) ql_initialize_ip(pha);
4388 ql_isp_rcvbuf(pha);
4389 }
4390
4391 break;
4392 case QL_DIAG_ECHO: {
4393 /*
4394 * issue an echo command with a user supplied
4395 * data pattern and destination address
4396 */
4397 echo_t echo; /* temp echo struct */
4398
4399 /* Setup echo cmd & adjust for platform */
4400 opcode = QL_ECHO_CMD;
4401 BIG_ENDIAN_32(&opcode);
4402
4403 /*
4404 * due to limitations in the ql
4405 * firmaware the echo data field is
4406 * limited to 220
4407 */
4408 if ((cmd->pm_cmd_len > QL_ECHO_CMD_LENGTH) ||
4409 (cmd->pm_stat_len > QL_ECHO_CMD_LENGTH)) {
4410 EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY, "
4411 "cmdl1=%lxh, statl2=%lxh\n",
4412 cmd->pm_cmd_len, cmd->pm_stat_len);
4413 rval = FC_TOOMANY;
4414 break;
4415 }
4416
4417 /*
4418 * the input data buffer has the user
4419 * supplied data pattern. The "echoed"
4420 * data will be DMAed into the output
4421 * data buffer. Therefore the length
4422 * of the output buffer must be equal
4423 * to or greater then the input buffer
4424 * length
4425 */
4426 if (cmd->pm_cmd_len > cmd->pm_stat_len) {
4427 EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4428 " cmdl1=%lxh, statl2=%lxh\n",
4429 cmd->pm_cmd_len, cmd->pm_stat_len);
4430 rval = FC_TOOMANY;
4431 break;
4432 }
4433 /* add four bytes for the opcode */
4434 echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4435
4436 /*
4437 * are we 32 or 64 bit addressed???
4438 * We need to get the appropriate
4439 * DMA and set the command options;
4440 * 64 bit (bit 6) or 32 bit
4441 * (no bit 6) addressing.
4442 * while we are at it lets ask for
4443 * real echo (bit 15)
4444 */
4445 echo.options = BIT_15;
4446 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) &&
4447 !(CFG_IST(ha, CFG_CTRL_8081))) {
4448 echo.options = (uint16_t)
4449 (echo.options | BIT_6);
4450 }
4451
4452 /*
4453 * Set up the DMA mappings for the
4454 * output and input data buffers.
4455 * First the output buffer
4456 */
4457 if (ql_get_dma_mem(ha, &buffer_xmt,
4458 (uint32_t)(cmd->pm_data_len + 4),
4459 LITTLE_ENDIAN_DMA,
4460 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4461 EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4462 rval = FC_NOMEM;
4463 break;
4464 }
4465 echo.transfer_data_address = buffer_xmt.cookie;
4466
4467 /* Next the input buffer */
4468 if (ql_get_dma_mem(ha, &buffer_rcv,
4469 (uint32_t)(cmd->pm_data_len + 4),
4470 LITTLE_ENDIAN_DMA,
4471 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4472 /*
4473 * since we could not allocate
4474 * DMA space for the input
4475 * buffer we need to clean up
4476 * by freeing the DMA space
4477 * we allocated for the output
4478 * buffer
4479 */
4480 ql_free_phys(ha, &buffer_xmt);
4481 EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM-2\n");
4482 rval = FC_NOMEM;
4483 break;
4484 }
4485 echo.receive_data_address = buffer_rcv.cookie;
4486
4487 /*
4488 * copy the 4 byte ECHO op code to the
4489 * allocated DMA space
4490 */
4491 ddi_rep_put8(buffer_xmt.acc_handle, (uint8_t *)&opcode,
4492 (uint8_t *)buffer_xmt.bp, 4, DDI_DEV_AUTOINCR);
4493
4494 /*
4495 * copy the user supplied data to the
4496 * allocated DMA space
4497 */
4498 ddi_rep_put8(buffer_xmt.acc_handle,
4499 (uint8_t *)cmd->pm_cmd_buf,
4500 (uint8_t *)buffer_xmt.bp + 4, cmd->pm_cmd_len,
4501 DDI_DEV_AUTOINCR);
4502
4503 /* Shutdown IP. */
4504 if (pha->flags & IP_INITIALIZED) {
4505 (void) ql_shutdown_ip(pha);
4506 }
4507
4508 /* send the echo */
4509 if (ql_echo(ha, 0, &echo) == QL_SUCCESS) {
4510 ddi_rep_put8(buffer_rcv.acc_handle,
4511 (uint8_t *)buffer_rcv.bp + 4,
4512 (uint8_t *)cmd->pm_stat_buf,
4513 cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4514 } else {
4515 EL(ha, "failed, QL_DIAG_ECHO FC_FAILURE\n");
4516 rval = FC_FAILURE;
4517 }
4518
4519 /* Restart IP if it was shutdown. */
4520 if (pha->flags & IP_ENABLED &&
4521 !(pha->flags & IP_INITIALIZED)) {
4522 (void) ql_initialize_ip(pha);
4523 ql_isp_rcvbuf(pha);
4524 }
4525 /* free up our DMA buffers */
4526 ql_free_phys(ha, &buffer_xmt);
4527 ql_free_phys(ha, &buffer_rcv);
4528 break;
4529 }
4530 default:
4531 EL(ha, "unknown=%xh, FC_PORT_DIAG "
4532 "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4533 rval = FC_INVALID_REQUEST;
4534 break;
4535 }
4536 PORTMANAGE_UNLOCK(ha);
4537 break;
4538 case FC_PORT_LINK_STATE:
4539 /* Check for name equal to null. */
4540 for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4541 index++) {
4542 if (cmd->pm_cmd_buf[index] != 0) {
4543 break;
4544 }
4545 }
4546
4547 /* If name not null. */
4548 if (index < 8 && cmd->pm_cmd_len >= 8) {
4549 /* Locate device queue. */
4550 tq = NULL;
4551 for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4552 tq == NULL; index++) {
4553 for (link = ha->dev[index].first; link != NULL;
4554 link = link->next) {
4555 tq = link->base_address;
4556
4557 if (bcmp((void *)&tq->port_name[0],
4558 (void *)cmd->pm_cmd_buf, 8) == 0) {
4559 break;
4560 } else {
4561 tq = NULL;
4562 }
4563 }
4564 }
4565
4566 if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4567 cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4568 cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4569 } else {
4570 cnt = FC_PORT_SPEED_MASK(ha->state) |
4571 FC_STATE_OFFLINE;
4572 cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4573 cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4574 }
4575 } else {
4576 cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4577 cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4578 }
4579 break;
4580 case FC_PORT_INITIALIZE:
4581 if (cmd->pm_cmd_len >= 8) {
4582 tq = NULL;
4583 for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4584 tq == NULL; index++) {
4585 for (link = ha->dev[index].first; link != NULL;
4586 link = link->next) {
4587 tq = link->base_address;
4588
4589 if (bcmp((void *)&tq->port_name[0],
4590 (void *)cmd->pm_cmd_buf, 8) == 0) {
4591 if (!VALID_DEVICE_ID(ha,
4592 tq->loop_id)) {
4593 tq = NULL;
4594 }
4595 break;
4596 } else {
4597 tq = NULL;
4598 }
4599 }
4600 }
4601
4602 if (tq == NULL || ql_target_reset(ha, tq,
4603 ha->loop_reset_delay) != QL_SUCCESS) {
4604 EL(ha, "failed, FC_PORT_INITIALIZE "
4605 "FC_FAILURE\n");
4606 rval = FC_FAILURE;
4607 }
4608 } else {
4609 EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4610 "clen=%lxh\n", cmd->pm_cmd_len);
4611
4612 rval = FC_FAILURE;
4613 }
4614 break;
4615 case FC_PORT_RLS:
4616 if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4617 EL(ha, "failed, buffer size passed: %lxh, "
4618 "req: %lxh\n", cmd->pm_data_len,
4619 (sizeof (fc_rls_acc_t)));
4620 rval = FC_FAILURE;
4621 } else if (LOOP_NOT_READY(pha)) {
4622 EL(ha, "loop NOT ready\n");
4623 bzero(cmd->pm_data_buf, cmd->pm_data_len);
4624 } else if (ql_get_link_status(ha, ha->loop_id,
4625 cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
4626 EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4627 rval = FC_FAILURE;
4628 #ifdef _BIG_ENDIAN
4629 } else {
4630 fc_rls_acc_t *rls;
4631
4632 rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4633 LITTLE_ENDIAN_32(&rls->rls_link_fail);
4634 LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4635 LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4636 LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4637 #endif /* _BIG_ENDIAN */
4638 }
4639 break;
4640 case FC_PORT_GET_NODE_ID:
4641 if (ql_get_rnid_params(ha, cmd->pm_data_len,
4642 cmd->pm_data_buf) != QL_SUCCESS) {
4643 EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4644 rval = FC_FAILURE;
4645 }
4646 break;
4647 case FC_PORT_SET_NODE_ID:
4648 if (ql_set_rnid_params(ha, cmd->pm_data_len,
4649 cmd->pm_data_buf) != QL_SUCCESS) {
4650 EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4651 rval = FC_FAILURE;
4652 }
4653 break;
4654 case FC_PORT_DOWNLOAD_FCODE:
4655 PORTMANAGE_LOCK(ha);
4656 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
4657 rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4658 (uint32_t)cmd->pm_data_len);
4659 } else {
4660 if (cmd->pm_data_buf[0] == 4 &&
4661 cmd->pm_data_buf[8] == 0 &&
4662 cmd->pm_data_buf[9] == 0x10 &&
4663 cmd->pm_data_buf[10] == 0 &&
4664 cmd->pm_data_buf[11] == 0) {
4665 rval = ql_24xx_load_flash(ha,
4666 (uint8_t *)cmd->pm_data_buf,
4667 (uint32_t)cmd->pm_data_len,
4668 ha->flash_fw_addr << 2);
4669 } else {
4670 rval = ql_24xx_load_flash(ha,
4671 (uint8_t *)cmd->pm_data_buf,
4672 (uint32_t)cmd->pm_data_len, 0);
4673 }
4674 }
4675
4676 if (rval != QL_SUCCESS) {
4677 EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4678 rval = FC_FAILURE;
4679 } else {
4680 rval = FC_SUCCESS;
4681 }
4682 ql_reset_chip(ha);
4683 set_flags |= ISP_ABORT_NEEDED;
4684 PORTMANAGE_UNLOCK(ha);
4685 break;
4686 default:
4687 EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
4688 rval = FC_BADCMD;
4689 break;
4690 }
4691
4692 /* Wait for suspension to end. */
4693 ql_awaken_task_daemon(ha, NULL, set_flags, DRIVER_STALL);
4694 timer = 0;
4695
4696 while (timer++ < 3000 &&
4697 ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
4698 ql_delay(ha, 10000);
4699 }
4700
4701 ql_restart_queues(ha);
4702
4703 if (rval != FC_SUCCESS) {
4704 EL(ha, "failed, rval = %xh\n", rval);
4705 } else {
4706 /*EMPTY*/
4707 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4708 }
4709
4710 return (rval);
4711 }
4712
4713 static opaque_t
4714 ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
4715 {
4716 port_id_t id;
4717 ql_adapter_state_t *ha;
4718 ql_tgt_t *tq;
4719
4720 id.r.rsvd_1 = 0;
4721 id.b24 = d_id.port_id;
4722
4723 ha = ql_fca_handle_to_state(fca_handle);
4724 if (ha == NULL) {
4725 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4726 (void *)fca_handle);
4727 return (NULL);
4728 }
4729 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance, id.b24);
4730
4731 tq = ql_d_id_to_queue(ha, id);
4732
4733 if (tq == NULL) {
4734 EL(ha, "failed, tq=NULL\n");
4735 } else {
4736 /*EMPTY*/
4737 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4738 }
4739 return (tq);
4740 }
4741
4742 /* ************************************************************************ */
4743 /* FCA Driver Local Support Functions. */
4744 /* ************************************************************************ */
4745
4746 /*
4747 * ql_cmd_setup
4748 * Verifies proper command.
4749 *
4750 * Input:
4751 * fca_handle = handle setup by ql_bind_port().
4752 * pkt = pointer to fc_packet.
4753 * rval = pointer for return value.
4754 *
4755 * Returns:
4756 * Adapter state pointer, NULL = failure.
4757 *
4758 * Context:
4759 * Kernel context.
4760 */
4761 static ql_adapter_state_t *
4762 ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
4763 {
4764 ql_adapter_state_t *ha, *pha;
4765 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
4766 ql_tgt_t *tq;
4767 port_id_t d_id;
4768
4769 pkt->pkt_resp_resid = 0;
4770 pkt->pkt_data_resid = 0;
4771
4772 /* check that the handle is assigned by this FCA */
4773 ha = ql_fca_handle_to_state(fca_handle);
4774 if (ha == NULL) {
4775 *rval = FC_UNBOUND;
4776 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4777 (void *)fca_handle);
4778 return (NULL);
4779 }
4780 pha = ha->pha;
4781
4782 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4783
4784 if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
4785 return (ha);
4786 }
4787
4788 if (!(pha->flags & ONLINE)) {
4789 pkt->pkt_state = FC_PKT_LOCAL_RJT;
4790 pkt->pkt_reason = FC_REASON_HW_ERROR;
4791 *rval = FC_TRANSPORT_ERROR;
4792 EL(ha, "failed, not online hf=%xh\n", pha->flags);
4793 return (NULL);
4794 }
4795
4796 /* Exit on loop down. */
4797 if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
4798 pha->task_daemon_flags & LOOP_DOWN &&
4799 pha->loop_down_timer <= pha->loop_down_abort_time) {
4800 pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4801 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4802 *rval = FC_OFFLINE;
4803 EL(ha, "failed, loop down tdf=%xh\n", pha->task_daemon_flags);
4804 return (NULL);
4805 }
4806
4807 if (pkt->pkt_cmd_fhdr.r_ctl == R_CTL_COMMAND &&
4808 pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
4809 tq = (ql_tgt_t *)pkt->pkt_fca_device;
4810 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id))) {
4811 d_id.r.rsvd_1 = 0;
4812 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4813 tq = ql_d_id_to_queue(ha, d_id);
4814
4815 pkt->pkt_fca_device = (opaque_t)tq;
4816 }
4817
4818 if (tq != NULL) {
4819 DEVICE_QUEUE_LOCK(tq);
4820 if (tq->flags & (TQF_RSCN_RCVD |
4821 TQF_NEED_AUTHENTICATION)) {
4822 *rval = FC_DEVICE_BUSY;
4823 DEVICE_QUEUE_UNLOCK(tq);
4824 EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
4825 tq->flags, tq->d_id.b24);
4826 return (NULL);
4827 }
4828 DEVICE_QUEUE_UNLOCK(tq);
4829 }
4830 }
4831
4832 /*
4833 * Check DMA pointers.
4834 */
4835 *rval = DDI_SUCCESS;
4836 if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
4837 QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
4838 *rval = ddi_check_dma_handle(pkt->pkt_cmd_dma);
4839 if (*rval == DDI_SUCCESS) {
4840 *rval = ddi_check_acc_handle(pkt->pkt_cmd_acc);
4841 }
4842 }
4843
4844 if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
4845 pkt->pkt_rsplen != 0) {
4846 QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
4847 *rval = ddi_check_dma_handle(pkt->pkt_resp_dma);
4848 if (*rval == DDI_SUCCESS) {
4849 *rval = ddi_check_acc_handle(pkt->pkt_resp_acc);
4850 }
4851 }
4852
4853 /*
4854 * Minimum branch conditional; Change it with care.
4855 */
4856 if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
4857 (pkt->pkt_datalen != 0)) != 0) {
4858 QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
4859 *rval = ddi_check_dma_handle(pkt->pkt_data_dma);
4860 if (*rval == DDI_SUCCESS) {
4861 *rval = ddi_check_acc_handle(pkt->pkt_data_acc);
4862 }
4863 }
4864
4865 if (*rval != DDI_SUCCESS) {
4866 pkt->pkt_state = FC_PKT_TRAN_ERROR;
4867 pkt->pkt_reason = FC_REASON_DMA_ERROR;
4868
4869 /* Do command callback. */
4870 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
4871 ql_awaken_task_daemon(ha, sp, 0, 0);
4872 }
4873 *rval = FC_BADPACKET;
4874 EL(ha, "failed, bad DMA pointers\n");
4875 return (NULL);
4876 }
4877
4878 if (sp->magic_number != QL_FCA_BRAND) {
4879 *rval = FC_BADPACKET;
4880 EL(ha, "failed, magic number=%xh\n", sp->magic_number);
4881 return (NULL);
4882 }
4883 *rval = FC_SUCCESS;
4884
4885 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4886
4887 return (ha);
4888 }
4889
4890 /*
4891 * ql_els_plogi
4892 * Issue a extended link service port login request.
4893 *
4894 * Input:
4895 * ha = adapter state pointer.
4896 * pkt = pointer to fc_packet.
4897 *
4898 * Returns:
4899 * FC_SUCCESS - the packet was accepted for transport.
4900 * FC_TRANSPORT_ERROR - a transport error occurred.
4901 *
4902 * Context:
4903 * Kernel context.
4904 */
4905 static int
4906 ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4907 {
4908 ql_tgt_t *tq = NULL;
4909 port_id_t d_id;
4910 la_els_logi_t acc;
4911 class_svc_param_t *class3_param;
4912 int ret;
4913 int rval = FC_SUCCESS;
4914
4915 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4916 pkt->pkt_cmd_fhdr.d_id);
4917
4918 TASK_DAEMON_LOCK(ha);
4919 if (!(ha->task_daemon_flags & STATE_ONLINE)) {
4920 TASK_DAEMON_UNLOCK(ha);
4921 QL_PRINT_3(CE_CONT, "(%d): offline done\n", ha->instance);
4922 return (FC_OFFLINE);
4923 }
4924 TASK_DAEMON_UNLOCK(ha);
4925
4926 bzero(&acc, sizeof (acc));
4927 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4928
4929 ret = QL_SUCCESS;
4930
4931 if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
4932 /*
4933 * In p2p topology it sends a PLOGI after determining
4934 * it has the N_Port login initiative.
4935 */
4936 ret = ql_p2p_plogi(ha, pkt);
4937 }
4938 if (ret == QL_CONSUMED) {
4939 return (ret);
4940 }
4941
4942 switch (ret = ql_login_port(ha, d_id)) {
4943 case QL_SUCCESS:
4944 tq = ql_d_id_to_queue(ha, d_id);
4945 break;
4946
4947 case QL_LOOP_ID_USED:
4948 if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
4949 tq = ql_d_id_to_queue(ha, d_id);
4950 }
4951 break;
4952
4953 default:
4954 break;
4955 }
4956
4957 if (ret != QL_SUCCESS) {
4958 /*
4959 * Invalidate this entry so as to seek a fresh loop ID
4960 * in case firmware reassigns it to something else
4961 */
4962 tq = ql_d_id_to_queue(ha, d_id);
4963 if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
4964 tq->loop_id = PORT_NO_LOOP_ID;
4965 }
4966 } else if (tq) {
4967 (void) ql_get_port_database(ha, tq, PDF_ADISC);
4968 }
4969
4970 if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
4971 (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
4972
4973 /* Build ACC. */
4974 acc.ls_code.ls_code = LA_ELS_ACC;
4975 acc.common_service.fcph_version = 0x2006;
4976 acc.common_service.cmn_features = 0x8800;
4977 acc.common_service.rx_bufsize = QL_MAX_FRAME_SIZE(ha);
4978 acc.common_service.conc_sequences = 0xff;
4979 acc.common_service.relative_offset = 0x03;
4980 acc.common_service.e_d_tov = 0x7d0;
4981
4982 bcopy((void *)&tq->port_name[0],
4983 (void *)&acc.nport_ww_name.raw_wwn[0], 8);
4984 bcopy((void *)&tq->node_name[0],
4985 (void *)&acc.node_ww_name.raw_wwn[0], 8);
4986
4987 class3_param = (class_svc_param_t *)&acc.class_3;
4988 class3_param->class_valid_svc_opt = 0x8000;
4989 class3_param->recipient_ctl = tq->class3_recipient_ctl;
4990 class3_param->rcv_data_size = tq->class3_rcv_data_size;
4991 class3_param->conc_sequences = tq->class3_conc_sequences;
4992 class3_param->open_sequences_per_exch =
4993 tq->class3_open_sequences_per_exch;
4994
4995 if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
4996 acc.ls_code.ls_code = LA_ELS_RJT;
4997 pkt->pkt_state = FC_PKT_TRAN_BSY;
4998 pkt->pkt_reason = FC_REASON_XCHG_BSY;
4999 EL(ha, "LA_ELS_RJT, FC_REASON_XCHG_BSY\n");
5000 rval = FC_TRAN_BUSY;
5001 } else {
5002 DEVICE_QUEUE_LOCK(tq);
5003 tq->logout_sent = 0;
5004 tq->flags &= ~TQF_NEED_AUTHENTICATION;
5005 if (CFG_IST(ha, CFG_CTRL_242581)) {
5006 tq->flags |= TQF_IIDMA_NEEDED;
5007 }
5008 DEVICE_QUEUE_UNLOCK(tq);
5009
5010 if (CFG_IST(ha, CFG_CTRL_242581)) {
5011 TASK_DAEMON_LOCK(ha);
5012 ha->task_daemon_flags |= TD_IIDMA_NEEDED;
5013 TASK_DAEMON_UNLOCK(ha);
5014 }
5015
5016 pkt->pkt_state = FC_PKT_SUCCESS;
5017 }
5018 } else {
5019 /* Build RJT. */
5020 acc.ls_code.ls_code = LA_ELS_RJT;
5021
5022 switch (ret) {
5023 case QL_FUNCTION_TIMEOUT:
5024 pkt->pkt_state = FC_PKT_TIMEOUT;
5025 pkt->pkt_reason = FC_REASON_HW_ERROR;
5026 break;
5027
5028 case QL_MEMORY_ALLOC_FAILED:
5029 pkt->pkt_state = FC_PKT_LOCAL_BSY;
5030 pkt->pkt_reason = FC_REASON_NOMEM;
5031 rval = FC_TRAN_BUSY;
5032 break;
5033
5034 case QL_FABRIC_NOT_INITIALIZED:
5035 pkt->pkt_state = FC_PKT_FABRIC_BSY;
5036 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5037 rval = FC_TRAN_BUSY;
5038 break;
5039
5040 default:
5041 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5042 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5043 break;
5044 }
5045
5046 EL(ha, "Plogi unsuccess for %xh state %xh reason %xh "
5047 "ret %xh rval %xh\n", d_id.b24, pkt->pkt_state,
5048 pkt->pkt_reason, ret, rval);
5049 }
5050
5051 if (tq != NULL) {
5052 DEVICE_QUEUE_LOCK(tq);
5053 tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
5054 if (rval == FC_TRAN_BUSY) {
5055 if (tq->d_id.b24 != BROADCAST_ADDR) {
5056 tq->flags |= TQF_NEED_AUTHENTICATION;
5057 }
5058 }
5059 DEVICE_QUEUE_UNLOCK(tq);
5060 }
5061
5062 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5063 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5064
5065 if (rval != FC_SUCCESS) {
5066 EL(ha, "failed, rval = %xh\n", rval);
5067 } else {
5068 /*EMPTY*/
5069 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5070 }
5071 return (rval);
5072 }
5073
5074 /*
5075 * ql_p2p_plogi
5076 * Start an extended link service port login request using
5077 * an ELS Passthru iocb.
5078 *
5079 * Input:
5080 * ha = adapter state pointer.
5081 * pkt = pointer to fc_packet.
5082 *
5083 * Returns:
5084 * QL_CONSUMMED - the iocb was queued for transport.
5085 *
5086 * Context:
5087 * Kernel context.
5088 */
5089 static int
5090 ql_p2p_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5091 {
5092 uint16_t id;
5093 ql_tgt_t tmp;
5094 ql_tgt_t *tq = &tmp;
5095 int rval;
5096 port_id_t d_id;
5097 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
5098
5099 tq->d_id.b.al_pa = 0;
5100 tq->d_id.b.area = 0;
5101 tq->d_id.b.domain = 0;
5102
5103 /*
5104 * Verify that the port database hasn't moved beneath our feet by
5105 * switching to the appropriate n_port_handle if necessary. This is
5106 * less unplesant than the error recovery if the wrong one is used.
5107 */
5108 for (id = 0; id <= LAST_LOCAL_LOOP_ID; id++) {
5109 tq->loop_id = id;
5110 rval = ql_get_port_database(ha, tq, PDF_NONE);
5111 EL(ha, "rval=%xh\n", rval);
5112 /* check all the ones not logged in for possible use */
5113 if (rval == QL_NOT_LOGGED_IN) {
5114 if (tq->master_state == PD_STATE_PLOGI_PENDING) {
5115 ha->n_port->n_port_handle = tq->loop_id;
5116 EL(ha, "n_port_handle =%xh, master state=%x\n",
5117 tq->loop_id, tq->master_state);
5118 break;
5119 }
5120 /*
5121 * Use a 'port unavailable' entry only
5122 * if we used it before.
5123 */
5124 if (tq->master_state == PD_STATE_PORT_UNAVAILABLE) {
5125 /* if the port_id matches, reuse it */
5126 if (pkt->pkt_cmd_fhdr.d_id == tq->d_id.b24) {
5127 EL(ha, "n_port_handle =%xh,"
5128 "master state=%xh\n",
5129 tq->loop_id, tq->master_state);
5130 break;
5131 } else if (tq->loop_id ==
5132 ha->n_port->n_port_handle) {
5133 // avoid a lint error
5134 uint16_t *hndl;
5135 uint16_t val;
5136
5137 hndl = &ha->n_port->n_port_handle;
5138 val = *hndl;
5139 val++;
5140 val++;
5141 *hndl = val;
5142 }
5143 EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
5144 "master state=%x\n", rval, id, tq->loop_id,
5145 tq->master_state);
5146 }
5147
5148 }
5149 if (rval == QL_SUCCESS) {
5150 if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
5151 ha->n_port->n_port_handle = tq->loop_id;
5152 EL(ha, "n_port_handle =%xh, master state=%x\n",
5153 tq->loop_id, tq->master_state);
5154 break;
5155 }
5156 EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
5157 "master state=%x\n", rval, id, tq->loop_id,
5158 tq->master_state);
5159 }
5160 }
5161 (void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0, DDI_DMA_SYNC_FORDEV);
5162
5163 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5164 tq = ql_d_id_to_queue(ha, d_id);
5165 ql_timeout_insert(ha, tq, sp);
5166 ql_start_iocb(ha, sp);
5167
5168 return (QL_CONSUMED);
5169 }
5170
5171
5172 /*
5173 * ql_els_flogi
5174 * Issue a extended link service fabric login request.
5175 *
5176 * Input:
5177 * ha = adapter state pointer.
5178 * pkt = pointer to fc_packet.
5179 *
5180 * Returns:
5181 * FC_SUCCESS - the packet was accepted for transport.
5182 * FC_TRANSPORT_ERROR - a transport error occurred.
5183 *
5184 * Context:
5185 * Kernel context.
5186 */
5187 static int
5188 ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5189 {
5190 ql_tgt_t *tq = NULL;
5191 port_id_t d_id;
5192 la_els_logi_t acc;
5193 class_svc_param_t *class3_param;
5194 int rval = FC_SUCCESS;
5195 int accept = 0;
5196
5197 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5198 pkt->pkt_cmd_fhdr.d_id);
5199
5200 bzero(&acc, sizeof (acc));
5201 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5202
5203 if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
5204 /*
5205 * d_id of zero in a FLOGI accept response in a point to point
5206 * topology triggers evaluation of N Port login initiative.
5207 */
5208 pkt->pkt_resp_fhdr.d_id = 0;
5209 /*
5210 * An N_Port already logged in with the firmware
5211 * will have the only database entry.
5212 */
5213 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
5214 tq = ql_loop_id_to_queue(ha, ha->n_port->n_port_handle);
5215 }
5216
5217 if (tq != NULL) {
5218 /*
5219 * If the target port has initiative send
5220 * up a PLOGI about the new device.
5221 */
5222 if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
5223 (la_wwn_t *)(CFG_IST(ha, CFG_CTRL_2425) ?
5224 &ha->init_ctrl_blk.cb24.port_name[0] :
5225 &ha->init_ctrl_blk.cb.port_name[0])) == 1)) {
5226 ha->send_plogi_timer = 3;
5227 } else {
5228 ha->send_plogi_timer = 0;
5229 }
5230 pkt->pkt_resp_fhdr.s_id = tq->d_id.b24;
5231 } else {
5232 /*
5233 * An N_Port not logged in with the firmware will not
5234 * have a database entry. We accept anyway and rely
5235 * on a PLOGI from the upper layers to set the d_id
5236 * and s_id.
5237 */
5238 accept = 1;
5239 }
5240 } else {
5241 tq = ql_d_id_to_queue(ha, d_id);
5242 }
5243 if ((tq != NULL) || (accept != NULL)) {
5244 /* Build ACC. */
5245 pkt->pkt_state = FC_PKT_SUCCESS;
5246 class3_param = (class_svc_param_t *)&acc.class_3;
5247
5248 acc.ls_code.ls_code = LA_ELS_ACC;
5249 acc.common_service.fcph_version = 0x2006;
5250 if (ha->topology & QL_N_PORT) {
5251 /* clear F_Port indicator */
5252 acc.common_service.cmn_features = 0x0800;
5253 } else {
5254 acc.common_service.cmn_features = 0x1b00;
5255 }
5256 CFG_IST(ha, CFG_CTRL_24258081) ?
5257 (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5258 ha->init_ctrl_blk.cb24.max_frame_length[0],
5259 ha->init_ctrl_blk.cb24.max_frame_length[1])) :
5260 (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5261 ha->init_ctrl_blk.cb.max_frame_length[0],
5262 ha->init_ctrl_blk.cb.max_frame_length[1]));
5263 acc.common_service.conc_sequences = 0xff;
5264 acc.common_service.relative_offset = 0x03;
5265 acc.common_service.e_d_tov = 0x7d0;
5266 if (accept) {
5267 /* Use the saved N_Port WWNN and WWPN */
5268 if (ha->n_port != NULL) {
5269 bcopy((void *)&ha->n_port->port_name[0],
5270 (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5271 bcopy((void *)&ha->n_port->node_name[0],
5272 (void *)&acc.node_ww_name.raw_wwn[0], 8);
5273 /* mark service options invalid */
5274 class3_param->class_valid_svc_opt = 0x0800;
5275 } else {
5276 EL(ha, "ha->n_port is NULL\n");
5277 /* Build RJT. */
5278 acc.ls_code.ls_code = LA_ELS_RJT;
5279
5280 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5281 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5282 }
5283 } else {
5284 bcopy((void *)&tq->port_name[0],
5285 (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5286 bcopy((void *)&tq->node_name[0],
5287 (void *)&acc.node_ww_name.raw_wwn[0], 8);
5288
5289 class3_param = (class_svc_param_t *)&acc.class_3;
5290 class3_param->class_valid_svc_opt = 0x8800;
5291 class3_param->recipient_ctl = tq->class3_recipient_ctl;
5292 class3_param->rcv_data_size = tq->class3_rcv_data_size;
5293 class3_param->conc_sequences =
5294 tq->class3_conc_sequences;
5295 class3_param->open_sequences_per_exch =
5296 tq->class3_open_sequences_per_exch;
5297 }
5298 } else {
5299 /* Build RJT. */
5300 acc.ls_code.ls_code = LA_ELS_RJT;
5301
5302 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5303 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5304 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5305 }
5306
5307 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5308 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5309
5310 if (rval != FC_SUCCESS) {
5311 EL(ha, "failed, rval = %xh\n", rval);
5312 } else {
5313 /*EMPTY*/
5314 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5315 }
5316 return (rval);
5317 }
5318
5319 /*
5320 * ql_els_logo
5321 * Issue a extended link service logout request.
5322 *
5323 * Input:
5324 * ha = adapter state pointer.
5325 * pkt = pointer to fc_packet.
5326 *
5327 * Returns:
5328 * FC_SUCCESS - the packet was accepted for transport.
5329 * FC_TRANSPORT_ERROR - a transport error occurred.
5330 *
5331 * Context:
5332 * Kernel context.
5333 */
5334 static int
5335 ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5336 {
5337 port_id_t d_id;
5338 ql_tgt_t *tq;
5339 la_els_logo_t acc;
5340 int rval = FC_SUCCESS;
5341
5342 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5343 pkt->pkt_cmd_fhdr.d_id);
5344
5345 bzero(&acc, sizeof (acc));
5346 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5347
5348 tq = ql_d_id_to_queue(ha, d_id);
5349 if (tq) {
5350 DEVICE_QUEUE_LOCK(tq);
5351 if (tq->d_id.b24 == BROADCAST_ADDR) {
5352 DEVICE_QUEUE_UNLOCK(tq);
5353 return (FC_SUCCESS);
5354 }
5355
5356 tq->flags |= TQF_NEED_AUTHENTICATION;
5357
5358 do {
5359 DEVICE_QUEUE_UNLOCK(tq);
5360 (void) ql_abort_device(ha, tq, 1);
5361
5362 /*
5363 * Wait for commands to drain in F/W (doesn't
5364 * take more than a few milliseconds)
5365 */
5366 ql_delay(ha, 10000);
5367
5368 DEVICE_QUEUE_LOCK(tq);
5369 } while (tq->outcnt);
5370
5371 DEVICE_QUEUE_UNLOCK(tq);
5372 }
5373
5374 if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5375 /* Build ACC. */
5376 acc.ls_code.ls_code = LA_ELS_ACC;
5377
5378 pkt->pkt_state = FC_PKT_SUCCESS;
5379 } else {
5380 /* Build RJT. */
5381 acc.ls_code.ls_code = LA_ELS_RJT;
5382
5383 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5384 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5385 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5386 }
5387
5388 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5389 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5390
5391 if (rval != FC_SUCCESS) {
5392 EL(ha, "failed, rval = %xh\n", rval);
5393 } else {
5394 /*EMPTY*/
5395 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5396 }
5397 return (rval);
5398 }
5399
5400 /*
5401 * ql_els_prli
5402 * Issue a extended link service process login request.
5403 *
5404 * Input:
5405 * ha = adapter state pointer.
5406 * pkt = pointer to fc_packet.
5407 *
5408 * Returns:
5409 * FC_SUCCESS - the packet was accepted for transport.
5410 * FC_TRANSPORT_ERROR - a transport error occurred.
5411 *
5412 * Context:
5413 * Kernel context.
5414 */
5415 static int
5416 ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5417 {
5418 ql_tgt_t *tq;
5419 port_id_t d_id;
5420 la_els_prli_t acc;
5421 prli_svc_param_t *param;
5422 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
5423 int rval = FC_SUCCESS;
5424
5425 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5426 pkt->pkt_cmd_fhdr.d_id);
5427
5428 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5429
5430 tq = ql_d_id_to_queue(ha, d_id);
5431 if (tq != NULL) {
5432 (void) ql_get_port_database(ha, tq, PDF_NONE);
5433
5434 if ((ha->topology & QL_N_PORT) &&
5435 (tq->master_state == PD_STATE_PLOGI_COMPLETED)) {
5436 ql_timeout_insert(ha, tq, sp);
5437 ql_start_iocb(ha, sp);
5438 rval = QL_CONSUMED;
5439 } else {
5440 /* Build ACC. */
5441 bzero(&acc, sizeof (acc));
5442 acc.ls_code = LA_ELS_ACC;
5443 acc.page_length = 0x10;
5444 acc.payload_length = tq->prli_payload_length;
5445
5446 param = (prli_svc_param_t *)&acc.service_params[0];
5447 param->type = 0x08;
5448 param->rsvd = 0x00;
5449 param->process_assoc_flags = tq->prli_svc_param_word_0;
5450 param->process_flags = tq->prli_svc_param_word_3;
5451
5452 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5453 (uint8_t *)pkt->pkt_resp, sizeof (acc),
5454 DDI_DEV_AUTOINCR);
5455
5456 pkt->pkt_state = FC_PKT_SUCCESS;
5457 }
5458 } else {
5459 la_els_rjt_t rjt;
5460
5461 /* Build RJT. */
5462 bzero(&rjt, sizeof (rjt));
5463 rjt.ls_code.ls_code = LA_ELS_RJT;
5464
5465 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5466 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5467
5468 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5469 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5470 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5471 }
5472
5473 if ((rval != FC_SUCCESS) && (rval != QL_CONSUMED)) {
5474 EL(ha, "failed, rval = %xh\n", rval);
5475 } else {
5476 /*EMPTY*/
5477 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5478 }
5479 return (rval);
5480 }
5481
5482 /*
5483 * ql_els_prlo
5484 * Issue a extended link service process logout request.
5485 *
5486 * Input:
5487 * ha = adapter state pointer.
5488 * pkt = pointer to fc_packet.
5489 *
5490 * Returns:
5491 * FC_SUCCESS - the packet was accepted for transport.
5492 * FC_TRANSPORT_ERROR - a transport error occurred.
5493 *
5494 * Context:
5495 * Kernel context.
5496 */
5497 /* ARGSUSED */
5498 static int
5499 ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5500 {
5501 la_els_prli_t acc;
5502 int rval = FC_SUCCESS;
5503
5504 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5505 pkt->pkt_cmd_fhdr.d_id);
5506
5507 /* Build ACC. */
5508 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5509 (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5510
5511 acc.ls_code = LA_ELS_ACC;
5512 acc.service_params[2] = 1;
5513
5514 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5515 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5516
5517 pkt->pkt_state = FC_PKT_SUCCESS;
5518
5519 if (rval != FC_SUCCESS) {
5520 EL(ha, "failed, rval = %xh\n", rval);
5521 } else {
5522 /*EMPTY*/
5523 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5524 }
5525 return (rval);
5526 }
5527
5528 /*
5529 * ql_els_adisc
5530 * Issue a extended link service address discovery request.
5531 *
5532 * Input:
5533 * ha = adapter state pointer.
5534 * pkt = pointer to fc_packet.
5535 *
5536 * Returns:
5537 * FC_SUCCESS - the packet was accepted for transport.
5538 * FC_TRANSPORT_ERROR - a transport error occurred.
5539 *
5540 * Context:
5541 * Kernel context.
5542 */
5543 static int
5544 ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5545 {
5546 ql_dev_id_list_t *list;
5547 uint32_t list_size;
5548 ql_link_t *link;
5549 ql_tgt_t *tq;
5550 ql_lun_t *lq;
5551 port_id_t d_id;
5552 la_els_adisc_t acc;
5553 uint16_t index, loop_id;
5554 ql_mbx_data_t mr;
5555 int rval = FC_SUCCESS;
5556
5557 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5558
5559 bzero(&acc, sizeof (acc));
5560 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5561
5562 /*
5563 * MBC_GET_PORT_DATABASE causes ADISC to go out to
5564 * the device from the firmware
5565 */
5566 index = ql_alpa_to_index[d_id.b.al_pa];
5567 tq = NULL;
5568 for (link = ha->dev[index].first; link != NULL; link = link->next) {
5569 tq = link->base_address;
5570 if (tq->d_id.b24 == d_id.b24) {
5571 break;
5572 } else {
5573 tq = NULL;
5574 }
5575 }
5576
5577 if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5578 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
5579 list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
5580
5581 if (list != NULL &&
5582 ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
5583 QL_SUCCESS) {
5584
5585 for (index = 0; index < mr.mb[1]; index++) {
5586 ql_dev_list(ha, list, index, &d_id, &loop_id);
5587
5588 if (tq->d_id.b24 == d_id.b24) {
5589 tq->loop_id = loop_id;
5590 break;
5591 }
5592 }
5593 } else {
5594 cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
5595 QL_NAME, ha->instance, d_id.b24);
5596 tq = NULL;
5597 }
5598 if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5599 cmn_err(CE_WARN, "!%s(%d) no loop_id for adisc %xh",
5600 QL_NAME, ha->instance, tq->d_id.b24);
5601 tq = NULL;
5602 }
5603
5604 if (list != NULL) {
5605 kmem_free(list, list_size);
5606 }
5607 }
5608
5609 if ((tq != NULL) && (VALID_DEVICE_ID(ha, tq->loop_id)) &&
5610 ql_get_port_database(ha, tq, PDF_ADISC) == QL_SUCCESS) {
5611
5612 /* Build ACC. */
5613
5614 DEVICE_QUEUE_LOCK(tq);
5615 tq->flags &= ~TQF_NEED_AUTHENTICATION;
5616 if (tq->prli_svc_param_word_3 & PRLI_W3_RETRY) {
5617 for (link = tq->lun_queues.first; link != NULL;
5618 link = link->next) {
5619 lq = link->base_address;
5620
5621 if (lq->cmd.first != NULL) {
5622 ql_next(ha, lq);
5623 DEVICE_QUEUE_LOCK(tq);
5624 }
5625 }
5626 }
5627 DEVICE_QUEUE_UNLOCK(tq);
5628
5629 acc.ls_code.ls_code = LA_ELS_ACC;
5630 acc.hard_addr.hard_addr = tq->hard_addr.b24;
5631
5632 bcopy((void *)&tq->port_name[0],
5633 (void *)&acc.port_wwn.raw_wwn[0], 8);
5634 bcopy((void *)&tq->node_name[0],
5635 (void *)&acc.node_wwn.raw_wwn[0], 8);
5636
5637 acc.nport_id.port_id = tq->d_id.b24;
5638
5639 pkt->pkt_state = FC_PKT_SUCCESS;
5640 } else {
5641 /* Build RJT. */
5642 acc.ls_code.ls_code = LA_ELS_RJT;
5643
5644 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5645 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5646 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5647 }
5648
5649 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5650 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5651
5652 if (rval != FC_SUCCESS) {
5653 EL(ha, "failed, rval = %xh\n", rval);
5654 } else {
5655 /*EMPTY*/
5656 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5657 }
5658 return (rval);
5659 }
5660
5661 /*
5662 * ql_els_linit
5663 * Issue a extended link service loop initialize request.
5664 *
5665 * Input:
5666 * ha = adapter state pointer.
5667 * pkt = pointer to fc_packet.
5668 *
5669 * Returns:
5670 * FC_SUCCESS - the packet was accepted for transport.
5671 * FC_TRANSPORT_ERROR - a transport error occurred.
5672 *
5673 * Context:
5674 * Kernel context.
5675 */
5676 static int
5677 ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
5678 {
5679 ddi_dma_cookie_t *cp;
5680 uint32_t cnt;
5681 conv_num_t n;
5682 port_id_t d_id;
5683 int rval = FC_SUCCESS;
5684
5685 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5686
5687 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5688 if (ha->topology & QL_SNS_CONNECTION) {
5689 fc_linit_req_t els;
5690 lfa_cmd_t lfa;
5691
5692 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5693 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5694
5695 /* Setup LFA mailbox command data. */
5696 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5697
5698 lfa.resp_buffer_length[0] = 4;
5699
5700 cp = pkt->pkt_resp_cookie;
5701 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5702 n.size64 = (uint64_t)cp->dmac_laddress;
5703 LITTLE_ENDIAN_64(&n.size64);
5704 } else {
5705 n.size32[0] = LSD(cp->dmac_laddress);
5706 LITTLE_ENDIAN_32(&n.size32[0]);
5707 n.size32[1] = MSD(cp->dmac_laddress);
5708 LITTLE_ENDIAN_32(&n.size32[1]);
5709 }
5710
5711 /* Set buffer address. */
5712 for (cnt = 0; cnt < 8; cnt++) {
5713 lfa.resp_buffer_address[cnt] = n.size8[cnt];
5714 }
5715
5716 lfa.subcommand_length[0] = 4;
5717 n.size32[0] = d_id.b24;
5718 LITTLE_ENDIAN_32(&n.size32[0]);
5719 lfa.addr[0] = n.size8[0];
5720 lfa.addr[1] = n.size8[1];
5721 lfa.addr[2] = n.size8[2];
5722 lfa.subcommand[1] = 0x70;
5723 lfa.payload[2] = els.func;
5724 lfa.payload[4] = els.lip_b3;
5725 lfa.payload[5] = els.lip_b4;
5726
5727 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5728 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5729 } else {
5730 pkt->pkt_state = FC_PKT_SUCCESS;
5731 }
5732 } else {
5733 fc_linit_resp_t rjt;
5734
5735 /* Build RJT. */
5736 bzero(&rjt, sizeof (rjt));
5737 rjt.ls_code.ls_code = LA_ELS_RJT;
5738
5739 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5740 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5741
5742 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5743 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5744 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5745 }
5746
5747 if (rval != FC_SUCCESS) {
5748 EL(ha, "failed, rval = %xh\n", rval);
5749 } else {
5750 /*EMPTY*/
5751 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5752 }
5753 return (rval);
5754 }
5755
5756 /*
5757 * ql_els_lpc
5758 * Issue a extended link service loop control request.
5759 *
5760 * Input:
5761 * ha = adapter state pointer.
5762 * pkt = pointer to fc_packet.
5763 *
5764 * Returns:
5765 * FC_SUCCESS - the packet was accepted for transport.
5766 * FC_TRANSPORT_ERROR - a transport error occurred.
5767 *
5768 * Context:
5769 * Kernel context.
5770 */
5771 static int
5772 ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5773 {
5774 ddi_dma_cookie_t *cp;
5775 uint32_t cnt;
5776 conv_num_t n;
5777 port_id_t d_id;
5778 int rval = FC_SUCCESS;
5779
5780 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5781
5782 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5783 if (ha->topology & QL_SNS_CONNECTION) {
5784 ql_lpc_t els;
5785 lfa_cmd_t lfa;
5786
5787 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5788 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5789
5790 /* Setup LFA mailbox command data. */
5791 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5792
5793 lfa.resp_buffer_length[0] = 4;
5794
5795 cp = pkt->pkt_resp_cookie;
5796 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5797 n.size64 = (uint64_t)(cp->dmac_laddress);
5798 LITTLE_ENDIAN_64(&n.size64);
5799 } else {
5800 n.size32[0] = cp->dmac_address;
5801 LITTLE_ENDIAN_32(&n.size32[0]);
5802 n.size32[1] = 0;
5803 }
5804
5805 /* Set buffer address. */
5806 for (cnt = 0; cnt < 8; cnt++) {
5807 lfa.resp_buffer_address[cnt] = n.size8[cnt];
5808 }
5809
5810 lfa.subcommand_length[0] = 20;
5811 n.size32[0] = d_id.b24;
5812 LITTLE_ENDIAN_32(&n.size32[0]);
5813 lfa.addr[0] = n.size8[0];
5814 lfa.addr[1] = n.size8[1];
5815 lfa.addr[2] = n.size8[2];
5816 lfa.subcommand[1] = 0x71;
5817 lfa.payload[4] = els.port_control;
5818 bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 32);
5819
5820 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5821 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5822 } else {
5823 pkt->pkt_state = FC_PKT_SUCCESS;
5824 }
5825 } else {
5826 ql_lpc_resp_t rjt;
5827
5828 /* Build RJT. */
5829 bzero(&rjt, sizeof (rjt));
5830 rjt.ls_code.ls_code = LA_ELS_RJT;
5831
5832 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5833 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5834
5835 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5836 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5837 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5838 }
5839
5840 if (rval != FC_SUCCESS) {
5841 EL(ha, "failed, rval = %xh\n", rval);
5842 } else {
5843 /*EMPTY*/
5844 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5845 }
5846 return (rval);
5847 }
5848
5849 /*
5850 * ql_els_lsts
5851 * Issue a extended link service loop status request.
5852 *
5853 * Input:
5854 * ha = adapter state pointer.
5855 * pkt = pointer to fc_packet.
5856 *
5857 * Returns:
5858 * FC_SUCCESS - the packet was accepted for transport.
5859 * FC_TRANSPORT_ERROR - a transport error occurred.
5860 *
5861 * Context:
5862 * Kernel context.
5863 */
5864 static int
5865 ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
5866 {
5867 ddi_dma_cookie_t *cp;
5868 uint32_t cnt;
5869 conv_num_t n;
5870 port_id_t d_id;
5871 int rval = FC_SUCCESS;
5872
5873 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5874
5875 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5876 if (ha->topology & QL_SNS_CONNECTION) {
5877 fc_lsts_req_t els;
5878 lfa_cmd_t lfa;
5879
5880 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5881 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5882
5883 /* Setup LFA mailbox command data. */
5884 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5885
5886 lfa.resp_buffer_length[0] = 84;
5887
5888 cp = pkt->pkt_resp_cookie;
5889 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5890 n.size64 = cp->dmac_laddress;
5891 LITTLE_ENDIAN_64(&n.size64);
5892 } else {
5893 n.size32[0] = cp->dmac_address;
5894 LITTLE_ENDIAN_32(&n.size32[0]);
5895 n.size32[1] = 0;
5896 }
5897
5898 /* Set buffer address. */
5899 for (cnt = 0; cnt < 8; cnt++) {
5900 lfa.resp_buffer_address[cnt] = n.size8[cnt];
5901 }
5902
5903 lfa.subcommand_length[0] = 2;
5904 n.size32[0] = d_id.b24;
5905 LITTLE_ENDIAN_32(&n.size32[0]);
5906 lfa.addr[0] = n.size8[0];
5907 lfa.addr[1] = n.size8[1];
5908 lfa.addr[2] = n.size8[2];
5909 lfa.subcommand[1] = 0x72;
5910
5911 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5912 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5913 } else {
5914 pkt->pkt_state = FC_PKT_SUCCESS;
5915 }
5916 } else {
5917 fc_lsts_resp_t rjt;
5918
5919 /* Build RJT. */
5920 bzero(&rjt, sizeof (rjt));
5921 rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
5922
5923 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5924 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5925
5926 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5927 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5928 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5929 }
5930
5931 if (rval != FC_SUCCESS) {
5932 EL(ha, "failed=%xh\n", rval);
5933 } else {
5934 /*EMPTY*/
5935 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5936 }
5937 return (rval);
5938 }
5939
5940 /*
5941 * ql_els_scr
5942 * Issue a extended link service state change registration request.
5943 *
5944 * Input:
5945 * ha = adapter state pointer.
5946 * pkt = pointer to fc_packet.
5947 *
5948 * Returns:
5949 * FC_SUCCESS - the packet was accepted for transport.
5950 * FC_TRANSPORT_ERROR - a transport error occurred.
5951 *
5952 * Context:
5953 * Kernel context.
5954 */
5955 static int
5956 ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
5957 {
5958 fc_scr_resp_t acc;
5959 int rval = FC_SUCCESS;
5960
5961 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5962
5963 bzero(&acc, sizeof (acc));
5964 if (ha->topology & QL_SNS_CONNECTION) {
5965 fc_scr_req_t els;
5966
5967 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5968 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5969
5970 if (ql_send_change_request(ha, els.scr_func) ==
5971 QL_SUCCESS) {
5972 /* Build ACC. */
5973 acc.scr_acc = LA_ELS_ACC;
5974
5975 pkt->pkt_state = FC_PKT_SUCCESS;
5976 } else {
5977 /* Build RJT. */
5978 acc.scr_acc = LA_ELS_RJT;
5979
5980 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5981 pkt->pkt_reason = FC_REASON_HW_ERROR;
5982 EL(ha, "LA_ELS_RJT, FC_REASON_HW_ERROR\n");
5983 }
5984 } else {
5985 /* Build RJT. */
5986 acc.scr_acc = LA_ELS_RJT;
5987
5988 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5989 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5990 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5991 }
5992
5993 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5994 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5995
5996 if (rval != FC_SUCCESS) {
5997 EL(ha, "failed, rval = %xh\n", rval);
5998 } else {
5999 /*EMPTY*/
6000 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6001 }
6002 return (rval);
6003 }
6004
6005 /*
6006 * ql_els_rscn
6007 * Issue a extended link service register state
6008 * change notification request.
6009 *
6010 * Input:
6011 * ha = adapter state pointer.
6012 * pkt = pointer to fc_packet.
6013 *
6014 * Returns:
6015 * FC_SUCCESS - the packet was accepted for transport.
6016 * FC_TRANSPORT_ERROR - a transport error occurred.
6017 *
6018 * Context:
6019 * Kernel context.
6020 */
6021 static int
6022 ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
6023 {
6024 ql_rscn_resp_t acc;
6025 int rval = FC_SUCCESS;
6026
6027 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6028
6029 bzero(&acc, sizeof (acc));
6030 if (ha->topology & QL_SNS_CONNECTION) {
6031 /* Build ACC. */
6032 acc.scr_acc = LA_ELS_ACC;
6033
6034 pkt->pkt_state = FC_PKT_SUCCESS;
6035 } else {
6036 /* Build RJT. */
6037 acc.scr_acc = LA_ELS_RJT;
6038
6039 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6040 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6041 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6042 }
6043
6044 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6045 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6046
6047 if (rval != FC_SUCCESS) {
6048 EL(ha, "failed, rval = %xh\n", rval);
6049 } else {
6050 /*EMPTY*/
6051 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6052 }
6053 return (rval);
6054 }
6055
6056 /*
6057 * ql_els_farp_req
6058 * Issue FC Address Resolution Protocol (FARP)
6059 * extended link service request.
6060 *
6061 * Note: not supported.
6062 *
6063 * Input:
6064 * ha = adapter state pointer.
6065 * pkt = pointer to fc_packet.
6066 *
6067 * Returns:
6068 * FC_SUCCESS - the packet was accepted for transport.
6069 * FC_TRANSPORT_ERROR - a transport error occurred.
6070 *
6071 * Context:
6072 * Kernel context.
6073 */
6074 static int
6075 ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
6076 {
6077 ql_acc_rjt_t acc;
6078 int rval = FC_SUCCESS;
6079
6080 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6081
6082 bzero(&acc, sizeof (acc));
6083
6084 /* Build ACC. */
6085 acc.ls_code.ls_code = LA_ELS_ACC;
6086
6087 pkt->pkt_state = FC_PKT_SUCCESS;
6088
6089 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6090 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6091
6092 if (rval != FC_SUCCESS) {
6093 EL(ha, "failed, rval = %xh\n", rval);
6094 } else {
6095 /*EMPTY*/
6096 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6097 }
6098 return (rval);
6099 }
6100
6101 /*
6102 * ql_els_farp_reply
6103 * Issue FC Address Resolution Protocol (FARP)
6104 * extended link service reply.
6105 *
6106 * Note: not supported.
6107 *
6108 * Input:
6109 * ha = adapter state pointer.
6110 * pkt = pointer to fc_packet.
6111 *
6112 * Returns:
6113 * FC_SUCCESS - the packet was accepted for transport.
6114 * FC_TRANSPORT_ERROR - a transport error occurred.
6115 *
6116 * Context:
6117 * Kernel context.
6118 */
6119 /* ARGSUSED */
6120 static int
6121 ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
6122 {
6123 ql_acc_rjt_t acc;
6124 int rval = FC_SUCCESS;
6125
6126 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6127
6128 bzero(&acc, sizeof (acc));
6129
6130 /* Build ACC. */
6131 acc.ls_code.ls_code = LA_ELS_ACC;
6132
6133 pkt->pkt_state = FC_PKT_SUCCESS;
6134
6135 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6136 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6137
6138 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6139
6140 return (rval);
6141 }
6142
6143 static int
6144 ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
6145 {
6146 uchar_t *rnid_acc;
6147 port_id_t d_id;
6148 ql_link_t *link;
6149 ql_tgt_t *tq;
6150 uint16_t index;
6151 la_els_rnid_acc_t acc;
6152 la_els_rnid_t *req;
6153 size_t req_len;
6154
6155 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6156
6157 req_len = FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
6158 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6159 index = ql_alpa_to_index[d_id.b.al_pa];
6160
6161 tq = NULL;
6162 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6163 tq = link->base_address;
6164 if (tq->d_id.b24 == d_id.b24) {
6165 break;
6166 } else {
6167 tq = NULL;
6168 }
6169 }
6170
6171 /* Allocate memory for rnid status block */
6172 rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
6173
6174 bzero(&acc, sizeof (acc));
6175
6176 req = (la_els_rnid_t *)pkt->pkt_cmd;
6177 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6178 (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
6179 (caddr_t)rnid_acc) != QL_SUCCESS)) {
6180
6181 kmem_free(rnid_acc, req_len);
6182 acc.ls_code.ls_code = LA_ELS_RJT;
6183
6184 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6185 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6186
6187 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6188 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6189 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6190
6191 return (FC_FAILURE);
6192 }
6193
6194 acc.ls_code.ls_code = LA_ELS_ACC;
6195 bcopy(rnid_acc, &acc.hdr, req_len);
6196 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6197 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6198
6199 kmem_free(rnid_acc, req_len);
6200 pkt->pkt_state = FC_PKT_SUCCESS;
6201
6202 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6203
6204 return (FC_SUCCESS);
6205 }
6206
6207 static int
6208 ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
6209 {
6210 fc_rls_acc_t *rls_acc;
6211 port_id_t d_id;
6212 ql_link_t *link;
6213 ql_tgt_t *tq;
6214 uint16_t index;
6215 la_els_rls_acc_t acc;
6216
6217 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6218
6219 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6220 index = ql_alpa_to_index[d_id.b.al_pa];
6221
6222 tq = NULL;
6223 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6224 tq = link->base_address;
6225 if (tq->d_id.b24 == d_id.b24) {
6226 break;
6227 } else {
6228 tq = NULL;
6229 }
6230 }
6231
6232 /* Allocate memory for link error status block */
6233 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
6234
6235 bzero(&acc, sizeof (la_els_rls_acc_t));
6236
6237 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6238 (ql_get_link_status(ha, tq->loop_id, sizeof (*rls_acc),
6239 (caddr_t)rls_acc, 0) != QL_SUCCESS)) {
6240
6241 kmem_free(rls_acc, sizeof (*rls_acc));
6242 acc.ls_code.ls_code = LA_ELS_RJT;
6243
6244 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6245 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6246
6247 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6248 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6249 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6250
6251 return (FC_FAILURE);
6252 }
6253
6254 LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
6255 LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
6256 LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
6257 LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
6258 LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
6259
6260 acc.ls_code.ls_code = LA_ELS_ACC;
6261 acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
6262 acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
6263 acc.rls_link_params.rls_sig_loss = rls_acc->rls_sig_loss;
6264 acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
6265 acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
6266 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6267 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6268
6269 kmem_free(rls_acc, sizeof (*rls_acc));
6270 pkt->pkt_state = FC_PKT_SUCCESS;
6271
6272 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6273
6274 return (FC_SUCCESS);
6275 }
6276
6277 static int
6278 ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
6279 {
6280 port_id_t d_id;
6281 ql_srb_t *sp;
6282 fc_unsol_buf_t *ubp;
6283 ql_link_t *link, *next_link;
6284 int rval = FC_SUCCESS;
6285 int cnt = 5;
6286
6287 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6288
6289 /*
6290 * we need to ensure that q->outcnt == 0, otherwise
6291 * any cmd completed with PKT_PORT_OFFLINE after PLOGI
6292 * will confuse ulps.
6293 */
6294
6295 DEVICE_QUEUE_LOCK(tq);
6296 do {
6297 /*
6298 * wait for the cmds to get drained. If they
6299 * don't get drained then the transport will
6300 * retry PLOGI after few secs.
6301 */
6302 if (tq->outcnt != 0) {
6303 rval = FC_TRAN_BUSY;
6304 DEVICE_QUEUE_UNLOCK(tq);
6305 ql_delay(ha, 10000);
6306 DEVICE_QUEUE_LOCK(tq);
6307 cnt--;
6308 if (!cnt) {
6309 cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
6310 " for %xh outcount %xh", QL_NAME,
6311 ha->instance, tq->d_id.b24, tq->outcnt);
6312 }
6313 } else {
6314 rval = FC_SUCCESS;
6315 break;
6316 }
6317 } while (cnt > 0);
6318 DEVICE_QUEUE_UNLOCK(tq);
6319
6320 /*
6321 * return, if busy or if the plogi was asynchronous.
6322 */
6323 if ((rval != FC_SUCCESS) ||
6324 (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
6325 pkt->pkt_comp)) {
6326 QL_PRINT_3(CE_CONT, "(%d): done, busy or async\n",
6327 ha->instance);
6328 return (rval);
6329 }
6330
6331 /*
6332 * Let us give daemon sufficient time and hopefully
6333 * when transport retries PLOGI, it would have flushed
6334 * callback queue.
6335 */
6336 TASK_DAEMON_LOCK(ha);
6337 for (link = ha->callback_queue.first; link != NULL;
6338 link = next_link) {
6339 next_link = link->next;
6340 sp = link->base_address;
6341 if (sp->flags & SRB_UB_CALLBACK) {
6342 ubp = ha->ub_array[sp->handle];
6343 d_id.b24 = ubp->ub_frame.s_id;
6344 } else {
6345 d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
6346 }
6347 if (tq->d_id.b24 == d_id.b24) {
6348 cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
6349 ha->instance, tq->d_id.b24);
6350 rval = FC_TRAN_BUSY;
6351 break;
6352 }
6353 }
6354 TASK_DAEMON_UNLOCK(ha);
6355
6356 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6357
6358 return (rval);
6359 }
6360
6361 /*
6362 * ql_login_port
6363 * Logs in a device if not already logged in.
6364 *
6365 * Input:
6366 * ha = adapter state pointer.
6367 * d_id = 24 bit port ID.
6368 * DEVICE_QUEUE_LOCK must be released.
6369 *
6370 * Returns:
6371 * QL local function return status code.
6372 *
6373 * Context:
6374 * Kernel context.
6375 */
6376 static int
6377 ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6378 {
6379 ql_adapter_state_t *vha;
6380 ql_link_t *link;
6381 uint16_t index;
6382 ql_tgt_t *tq, *tq2;
6383 uint16_t loop_id, first_loop_id, last_loop_id;
6384 int rval = QL_SUCCESS;
6385
6386 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6387 d_id.b24);
6388
6389 /* Get head queue index. */
6390 index = ql_alpa_to_index[d_id.b.al_pa];
6391
6392 /* Check for device already has a queue. */
6393 tq = NULL;
6394 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6395 tq = link->base_address;
6396 if (tq->d_id.b24 == d_id.b24) {
6397 loop_id = tq->loop_id;
6398 break;
6399 } else {
6400 tq = NULL;
6401 }
6402 }
6403
6404 /* Let's stop issuing any IO and unsolicited logo */
6405 if ((tq != NULL) && (!(ddi_in_panic()))) {
6406 DEVICE_QUEUE_LOCK(tq);
6407 tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6408 tq->flags &= ~TQF_RSCN_RCVD;
6409 DEVICE_QUEUE_UNLOCK(tq);
6410 }
6411 if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6412 !(tq->flags & TQF_FABRIC_DEVICE)) {
6413 loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6414 }
6415
6416 /* Special case for Nameserver */
6417 if (d_id.b24 == 0xFFFFFC) {
6418 loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
6419 SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6420 if (tq == NULL) {
6421 ADAPTER_STATE_LOCK(ha);
6422 tq = ql_dev_init(ha, d_id, loop_id);
6423 ADAPTER_STATE_UNLOCK(ha);
6424 if (tq == NULL) {
6425 EL(ha, "failed=%xh, d_id=%xh\n",
6426 QL_FUNCTION_FAILED, d_id.b24);
6427 return (QL_FUNCTION_FAILED);
6428 }
6429 }
6430 if (!(CFG_IST(ha, CFG_CTRL_8021))) {
6431 rval = ql_login_fabric_port(ha, tq, loop_id);
6432 if (rval == QL_SUCCESS) {
6433 tq->loop_id = loop_id;
6434 tq->flags |= TQF_FABRIC_DEVICE;
6435 (void) ql_get_port_database(ha, tq, PDF_NONE);
6436 }
6437 } else {
6438 ha->topology = (uint8_t)
6439 (ha->topology | QL_SNS_CONNECTION);
6440 }
6441 /* Check for device already logged in. */
6442 } else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6443 if (tq->flags & TQF_FABRIC_DEVICE) {
6444 rval = ql_login_fabric_port(ha, tq, loop_id);
6445 if (rval == QL_PORT_ID_USED) {
6446 rval = QL_SUCCESS;
6447 }
6448 } else if (LOCAL_LOOP_ID(loop_id)) {
6449 rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6450 (tq->flags & TQF_INITIATOR_DEVICE ?
6451 LLF_NONE : LLF_PLOGI));
6452 if (rval == QL_SUCCESS) {
6453 DEVICE_QUEUE_LOCK(tq);
6454 tq->loop_id = loop_id;
6455 DEVICE_QUEUE_UNLOCK(tq);
6456 }
6457 }
6458 } else if (ha->topology & QL_SNS_CONNECTION) {
6459 /* Locate unused loop ID. */
6460 if (CFG_IST(ha, CFG_CTRL_24258081)) {
6461 first_loop_id = 0;
6462 last_loop_id = LAST_N_PORT_HDL;
6463 } else if (ha->topology & QL_F_PORT) {
6464 first_loop_id = 0;
6465 last_loop_id = SNS_LAST_LOOP_ID;
6466 } else {
6467 first_loop_id = SNS_FIRST_LOOP_ID;
6468 last_loop_id = SNS_LAST_LOOP_ID;
6469 }
6470
6471 /* Acquire adapter state lock. */
6472 ADAPTER_STATE_LOCK(ha);
6473
6474 tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6475 if (tq == NULL) {
6476 EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6477 d_id.b24);
6478
6479 ADAPTER_STATE_UNLOCK(ha);
6480
6481 return (QL_FUNCTION_FAILED);
6482 }
6483
6484 rval = QL_FUNCTION_FAILED;
6485 loop_id = ha->pha->free_loop_id++;
6486 for (index = (uint16_t)(last_loop_id - first_loop_id); index;
6487 index--) {
6488 if (loop_id < first_loop_id ||
6489 loop_id > last_loop_id) {
6490 loop_id = first_loop_id;
6491 ha->pha->free_loop_id = (uint16_t)
6492 (loop_id + 1);
6493 }
6494
6495 /* Bypass if loop ID used. */
6496 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6497 tq2 = ql_loop_id_to_queue(vha, loop_id);
6498 if (tq2 != NULL && tq2 != tq) {
6499 break;
6500 }
6501 }
6502 if (vha != NULL || RESERVED_LOOP_ID(ha, loop_id) ||
6503 loop_id == ha->loop_id) {
6504 loop_id = ha->pha->free_loop_id++;
6505 continue;
6506 }
6507
6508 ADAPTER_STATE_UNLOCK(ha);
6509 rval = ql_login_fabric_port(ha, tq, loop_id);
6510
6511 /*
6512 * If PORT_ID_USED is returned
6513 * the login_fabric_port() updates
6514 * with the correct loop ID
6515 */
6516 switch (rval) {
6517 case QL_PORT_ID_USED:
6518 /*
6519 * use f/w handle and try to
6520 * login again.
6521 */
6522 ADAPTER_STATE_LOCK(ha);
6523 ha->pha->free_loop_id--;
6524 ADAPTER_STATE_UNLOCK(ha);
6525 loop_id = tq->loop_id;
6526 break;
6527
6528 case QL_SUCCESS:
6529 tq->flags |= TQF_FABRIC_DEVICE;
6530 (void) ql_get_port_database(ha,
6531 tq, PDF_NONE);
6532 index = 1;
6533 break;
6534
6535 case QL_LOOP_ID_USED:
6536 tq->loop_id = PORT_NO_LOOP_ID;
6537 loop_id = ha->pha->free_loop_id++;
6538 break;
6539
6540 case QL_ALL_IDS_IN_USE:
6541 tq->loop_id = PORT_NO_LOOP_ID;
6542 index = 1;
6543 break;
6544
6545 default:
6546 tq->loop_id = PORT_NO_LOOP_ID;
6547 index = 1;
6548 break;
6549 }
6550
6551 ADAPTER_STATE_LOCK(ha);
6552 }
6553
6554 ADAPTER_STATE_UNLOCK(ha);
6555 } else {
6556 rval = QL_FUNCTION_FAILED;
6557 }
6558
6559 if (rval != QL_SUCCESS) {
6560 EL(ha, "failed=%xh, d_id=%xh\n", rval, d_id.b24);
6561 } else {
6562 EL(ha, "d_id=%xh, loop_id=%xh, "
6563 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6564 tq->loop_id, tq->port_name[0], tq->port_name[1],
6565 tq->port_name[2], tq->port_name[3], tq->port_name[4],
6566 tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6567 }
6568 return (rval);
6569 }
6570
6571 /*
6572 * ql_login_fabric_port
6573 * Issue login fabric port mailbox command.
6574 *
6575 * Input:
6576 * ha: adapter state pointer.
6577 * tq: target queue pointer.
6578 * loop_id: FC Loop ID.
6579 *
6580 * Returns:
6581 * ql local function return status code.
6582 *
6583 * Context:
6584 * Kernel context.
6585 */
6586 static int
6587 ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6588 {
6589 int rval;
6590 int index;
6591 int retry = 0;
6592 port_id_t d_id;
6593 ql_tgt_t *newq;
6594 ql_mbx_data_t mr;
6595
6596 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6597 tq->d_id.b24);
6598
6599 /*
6600 * QL_PARAMETER_ERROR also means the firmware is
6601 * not able to allocate PCB entry due to resource
6602 * issues, or collision.
6603 */
6604 do {
6605 rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6606 if ((rval == QL_PARAMETER_ERROR) ||
6607 ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6608 mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6609 retry++;
6610 drv_usecwait(10 * MILLISEC);
6611 } else {
6612 break;
6613 }
6614 } while (retry < 5);
6615
6616 switch (rval) {
6617 case QL_SUCCESS:
6618 tq->loop_id = loop_id;
6619 break;
6620
6621 case QL_PORT_ID_USED:
6622 /*
6623 * This Loop ID should NOT be in use in drivers
6624 */
6625 newq = ql_loop_id_to_queue(ha, mr.mb[1]);
6626
6627 if (newq != NULL && newq != tq && tq->logout_sent == 0) {
6628 cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
6629 "dup loop_id=%xh, d_id=%xh", ha->instance,
6630 newq->loop_id, newq->d_id.b24);
6631 ql_send_logo(ha, newq, NULL);
6632 }
6633
6634 tq->loop_id = mr.mb[1];
6635 break;
6636
6637 case QL_LOOP_ID_USED:
6638 d_id.b.al_pa = LSB(mr.mb[2]);
6639 d_id.b.area = MSB(mr.mb[2]);
6640 d_id.b.domain = LSB(mr.mb[1]);
6641
6642 newq = ql_d_id_to_queue(ha, d_id);
6643 if (newq && (newq->loop_id != loop_id)) {
6644 /*
6645 * This should NEVER ever happen; but this
6646 * code is needed to bail out when the worst
6647 * case happens - or as used to happen before
6648 */
6649 QL_PRINT_2(CE_CONT, "(%d,%d): Loop ID is now "
6650 "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
6651 "new pairs: [%xh, unknown] and [%xh, %xh]\n",
6652 ha->instance, ha->vp_index, tq->d_id.b24, loop_id,
6653 newq->d_id.b24, newq->loop_id, tq->d_id.b24,
6654 newq->d_id.b24, loop_id);
6655
6656 if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
6657 ADAPTER_STATE_LOCK(ha);
6658
6659 index = ql_alpa_to_index[newq->d_id.b.al_pa];
6660 ql_add_link_b(&ha->dev[index], &newq->device);
6661
6662 newq->d_id.b24 = d_id.b24;
6663
6664 index = ql_alpa_to_index[d_id.b.al_pa];
6665 ql_add_link_b(&ha->dev[index], &newq->device);
6666
6667 ADAPTER_STATE_UNLOCK(ha);
6668 }
6669
6670 (void) ql_get_port_database(ha, newq, PDF_NONE);
6671
6672 }
6673
6674 /*
6675 * Invalidate the loop ID for the
6676 * us to obtain a new one.
6677 */
6678 tq->loop_id = PORT_NO_LOOP_ID;
6679 break;
6680
6681 case QL_ALL_IDS_IN_USE:
6682 rval = QL_FUNCTION_FAILED;
6683 EL(ha, "no loop id's available\n");
6684 break;
6685
6686 default:
6687 if (rval == QL_COMMAND_ERROR) {
6688 switch (mr.mb[1]) {
6689 case 2:
6690 case 3:
6691 rval = QL_MEMORY_ALLOC_FAILED;
6692 break;
6693
6694 case 4:
6695 rval = QL_FUNCTION_TIMEOUT;
6696 break;
6697 case 7:
6698 rval = QL_FABRIC_NOT_INITIALIZED;
6699 break;
6700 default:
6701 EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
6702 break;
6703 }
6704 } else {
6705 cmn_err(CE_WARN, "%s(%d): login fabric port failed"
6706 " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
6707 ha->instance, tq->d_id.b24, rval, mr.mb[1]);
6708 }
6709 break;
6710 }
6711
6712 if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
6713 rval != QL_LOOP_ID_USED) {
6714 EL(ha, "failed=%xh\n", rval);
6715 } else {
6716 /*EMPTY*/
6717 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6718 }
6719 return (rval);
6720 }
6721
6722 /*
6723 * ql_logout_port
6724 * Logs out a device if possible.
6725 *
6726 * Input:
6727 * ha: adapter state pointer.
6728 * d_id: 24 bit port ID.
6729 *
6730 * Returns:
6731 * QL local function return status code.
6732 *
6733 * Context:
6734 * Kernel context.
6735 */
6736 static int
6737 ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
6738 {
6739 ql_link_t *link;
6740 ql_tgt_t *tq;
6741 uint16_t index;
6742
6743 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6744
6745 /* Get head queue index. */
6746 index = ql_alpa_to_index[d_id.b.al_pa];
6747
6748 /* Get device queue. */
6749 tq = NULL;
6750 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6751 tq = link->base_address;
6752 if (tq->d_id.b24 == d_id.b24) {
6753 break;
6754 } else {
6755 tq = NULL;
6756 }
6757 }
6758
6759 if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
6760 (void) ql_logout_fabric_port(ha, tq);
6761 tq->loop_id = PORT_NO_LOOP_ID;
6762 }
6763
6764 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6765
6766 return (QL_SUCCESS);
6767 }
6768
6769 /*
6770 * ql_dev_init
6771 * Initialize/allocate device queue.
6772 *
6773 * Input:
6774 * ha: adapter state pointer.
6775 * d_id: device destination ID
6776 * loop_id: device loop ID
6777 * ADAPTER_STATE_LOCK must be already obtained.
6778 *
6779 * Returns:
6780 * NULL = failure
6781 *
6782 * Context:
6783 * Kernel context.
6784 */
6785 ql_tgt_t *
6786 ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
6787 {
6788 ql_link_t *link;
6789 uint16_t index;
6790 ql_tgt_t *tq;
6791
6792 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh, loop_id=%xh\n",
6793 ha->instance, d_id.b24, loop_id);
6794
6795 index = ql_alpa_to_index[d_id.b.al_pa];
6796
6797 /* If device queue exists, set proper loop ID. */
6798 tq = NULL;
6799 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6800 tq = link->base_address;
6801 if (tq->d_id.b24 == d_id.b24) {
6802 tq->loop_id = loop_id;
6803
6804 /* Reset port down retry count. */
6805 tq->port_down_retry_count = ha->port_down_retry_count;
6806 tq->qfull_retry_count = ha->qfull_retry_count;
6807
6808 break;
6809 } else {
6810 tq = NULL;
6811 }
6812 }
6813
6814 /* If device does not have queue. */
6815 if (tq == NULL) {
6816 tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
6817 if (tq != NULL) {
6818 /*
6819 * mutex to protect the device queue,
6820 * does not block interrupts.
6821 */
6822 mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
6823 (ha->iflags & IFLG_INTR_AIF) ?
6824 (void *)(uintptr_t)ha->intr_pri :
6825 (void *)(uintptr_t)ha->iblock_cookie);
6826
6827 tq->d_id.b24 = d_id.b24;
6828 tq->loop_id = loop_id;
6829 tq->device.base_address = tq;
6830 tq->iidma_rate = IIDMA_RATE_INIT;
6831
6832 /* Reset port down retry count. */
6833 tq->port_down_retry_count = ha->port_down_retry_count;
6834 tq->qfull_retry_count = ha->qfull_retry_count;
6835
6836 /* Add device to device queue. */
6837 ql_add_link_b(&ha->dev[index], &tq->device);
6838 }
6839 }
6840
6841 if (tq == NULL) {
6842 EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
6843 } else {
6844 /*EMPTY*/
6845 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6846 }
6847 return (tq);
6848 }
6849
6850 /*
6851 * ql_dev_free
6852 * Remove queue from device list and frees resources used by queue.
6853 *
6854 * Input:
6855 * ha: adapter state pointer.
6856 * tq: target queue pointer.
6857 * ADAPTER_STATE_LOCK must be already obtained.
6858 *
6859 * Context:
6860 * Kernel context.
6861 */
6862 void
6863 ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
6864 {
6865 ql_link_t *link;
6866 uint16_t index;
6867 ql_lun_t *lq;
6868
6869 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6870
6871 for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6872 lq = link->base_address;
6873 if (lq->cmd.first != NULL) {
6874 return;
6875 }
6876 }
6877
6878 if (tq->outcnt == 0) {
6879 /* Get head queue index. */
6880 index = ql_alpa_to_index[tq->d_id.b.al_pa];
6881 for (link = ha->dev[index].first; link != NULL;
6882 link = link->next) {
6883 if (link->base_address == tq) {
6884 ql_remove_link(&ha->dev[index], link);
6885
6886 link = tq->lun_queues.first;
6887 while (link != NULL) {
6888 lq = link->base_address;
6889 link = link->next;
6890
6891 ql_remove_link(&tq->lun_queues,
6892 &lq->link);
6893 kmem_free(lq, sizeof (ql_lun_t));
6894 }
6895
6896 mutex_destroy(&tq->mutex);
6897 kmem_free(tq, sizeof (ql_tgt_t));
6898 break;
6899 }
6900 }
6901 }
6902
6903 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6904 }
6905
6906 /*
6907 * ql_lun_queue
6908 * Allocate LUN queue if does not exists.
6909 *
6910 * Input:
6911 * ha: adapter state pointer.
6912 * tq: target queue.
6913 * lun: LUN number.
6914 *
6915 * Returns:
6916 * NULL = failure
6917 *
6918 * Context:
6919 * Kernel context.
6920 */
6921 static ql_lun_t *
6922 ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t lun)
6923 {
6924 ql_lun_t *lq;
6925 ql_link_t *link;
6926
6927 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6928
6929 /* Fast path. */
6930 if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_no == lun) {
6931 QL_PRINT_3(CE_CONT, "(%d): fast done\n", ha->instance);
6932 return (tq->last_lun_queue);
6933 }
6934
6935 if (lun >= MAX_LUNS) {
6936 EL(ha, "Exceeded MAX_LUN=%d, lun=%d\n", MAX_LUNS, lun);
6937 return (NULL);
6938 }
6939 /* If device queue exists, set proper loop ID. */
6940 lq = NULL;
6941 for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6942 lq = link->base_address;
6943 if (lq->lun_no == lun) {
6944 QL_PRINT_3(CE_CONT, "(%d): found done\n", ha->instance);
6945 tq->last_lun_queue = lq;
6946 return (lq);
6947 }
6948 }
6949
6950 /* If queue does exist. */
6951 lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
6952
6953 /* Initialize LUN queue. */
6954 if (lq != NULL) {
6955 lq->link.base_address = lq;
6956
6957 lq->lun_no = lun;
6958 lq->target_queue = tq;
6959
6960 DEVICE_QUEUE_LOCK(tq);
6961 ql_add_link_b(&tq->lun_queues, &lq->link);
6962 DEVICE_QUEUE_UNLOCK(tq);
6963 tq->last_lun_queue = lq;
6964 }
6965
6966 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6967
6968 return (lq);
6969 }
6970
6971 /*
6972 * ql_fcp_scsi_cmd
6973 * Process fibre channel (FCP) SCSI protocol commands.
6974 *
6975 * Input:
6976 * ha = adapter state pointer.
6977 * pkt = pointer to fc_packet.
6978 * sp = srb pointer.
6979 *
6980 * Returns:
6981 * FC_SUCCESS - the packet was accepted for transport.
6982 * FC_TRANSPORT_ERROR - a transport error occurred.
6983 *
6984 * Context:
6985 * Kernel context.
6986 */
6987 static int
6988 ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6989 {
6990 port_id_t d_id;
6991 ql_tgt_t *tq;
6992 uint64_t *ptr;
6993 uint16_t lun;
6994
6995 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6996
6997 tq = (ql_tgt_t *)pkt->pkt_fca_device;
6998 if (tq == NULL) {
6999 d_id.r.rsvd_1 = 0;
7000 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7001 tq = ql_d_id_to_queue(ha, d_id);
7002 }
7003
7004 sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
7005 lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
7006 hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
7007
7008 if (tq != NULL &&
7009 (sp->lun_queue = ql_lun_queue(ha, tq, lun)) != NULL) {
7010
7011 /*
7012 * zero out FCP response; 24 Bytes
7013 */
7014 ptr = (uint64_t *)pkt->pkt_resp;
7015 *ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
7016
7017 /* Handle task management function. */
7018 if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
7019 sp->fcp->fcp_cntl.cntl_clr_aca |
7020 sp->fcp->fcp_cntl.cntl_reset_tgt |
7021 sp->fcp->fcp_cntl.cntl_reset_lun |
7022 sp->fcp->fcp_cntl.cntl_clr_tsk |
7023 sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
7024 ql_task_mgmt(ha, tq, pkt, sp);
7025 } else {
7026 ha->pha->xioctl->IosRequested++;
7027 ha->pha->xioctl->BytesRequested += (uint32_t)
7028 sp->fcp->fcp_data_len;
7029
7030 /*
7031 * Setup for commands with data transfer
7032 */
7033 sp->iocb = ha->fcp_cmd;
7034 sp->req_cnt = 1;
7035 if (sp->fcp->fcp_data_len != 0) {
7036 /*
7037 * FCP data is bound to pkt_data_dma
7038 */
7039 if (sp->fcp->fcp_cntl.cntl_write_data) {
7040 (void) ddi_dma_sync(pkt->pkt_data_dma,
7041 0, 0, DDI_DMA_SYNC_FORDEV);
7042 }
7043
7044 /* Setup IOCB count. */
7045 if (pkt->pkt_data_cookie_cnt > ha->cmd_segs &&
7046 (!CFG_IST(ha, CFG_CTRL_8021) ||
7047 sp->sg_dma.dma_handle == NULL)) {
7048 uint32_t cnt;
7049
7050 cnt = pkt->pkt_data_cookie_cnt -
7051 ha->cmd_segs;
7052 sp->req_cnt = (uint16_t)
7053 (cnt / ha->cmd_cont_segs);
7054 if (cnt % ha->cmd_cont_segs) {
7055 sp->req_cnt = (uint16_t)
7056 (sp->req_cnt + 2);
7057 } else {
7058 sp->req_cnt++;
7059 }
7060 }
7061 }
7062 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7063
7064 return (ql_start_cmd(ha, tq, pkt, sp));
7065 }
7066 } else {
7067 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7068 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7069
7070 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7071 ql_awaken_task_daemon(ha, sp, 0, 0);
7072 }
7073
7074 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7075
7076 return (FC_SUCCESS);
7077 }
7078
7079 /*
7080 * ql_task_mgmt
7081 * Task management function processor.
7082 *
7083 * Input:
7084 * ha: adapter state pointer.
7085 * tq: target queue pointer.
7086 * pkt: pointer to fc_packet.
7087 * sp: SRB pointer.
7088 *
7089 * Context:
7090 * Kernel context.
7091 */
7092 static void
7093 ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7094 ql_srb_t *sp)
7095 {
7096 fcp_rsp_t *fcpr;
7097 struct fcp_rsp_info *rsp;
7098 uint16_t lun;
7099
7100 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7101
7102 fcpr = (fcp_rsp_t *)pkt->pkt_resp;
7103 rsp = (struct fcp_rsp_info *)pkt->pkt_resp + sizeof (fcp_rsp_t);
7104
7105 bzero(fcpr, pkt->pkt_rsplen);
7106
7107 fcpr->fcp_u.fcp_status.rsp_len_set = 1;
7108 fcpr->fcp_response_len = 8;
7109 lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
7110 hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
7111
7112 if (sp->fcp->fcp_cntl.cntl_clr_aca) {
7113 if (ql_clear_aca(ha, tq, lun) != QL_SUCCESS) {
7114 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7115 }
7116 } else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
7117 if (ql_lun_reset(ha, tq, lun) != QL_SUCCESS) {
7118 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7119 }
7120 } else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
7121 if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
7122 QL_SUCCESS) {
7123 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7124 }
7125 } else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
7126 if (ql_clear_task_set(ha, tq, lun) != QL_SUCCESS) {
7127 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7128 }
7129 } else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
7130 if (ql_abort_task_set(ha, tq, lun) != QL_SUCCESS) {
7131 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7132 }
7133 } else {
7134 rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
7135 }
7136
7137 pkt->pkt_state = FC_PKT_SUCCESS;
7138
7139 /* Do command callback. */
7140 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7141 ql_awaken_task_daemon(ha, sp, 0, 0);
7142 }
7143
7144 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7145 }
7146
7147 /*
7148 * ql_fcp_ip_cmd
7149 * Process fibre channel (FCP) Internet (IP) protocols commands.
7150 *
7151 * Input:
7152 * ha: adapter state pointer.
7153 * pkt: pointer to fc_packet.
7154 * sp: SRB pointer.
7155 *
7156 * Returns:
7157 * FC_SUCCESS - the packet was accepted for transport.
7158 * FC_TRANSPORT_ERROR - a transport error occurred.
7159 *
7160 * Context:
7161 * Kernel context.
7162 */
7163 static int
7164 ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
7165 {
7166 port_id_t d_id;
7167 ql_tgt_t *tq;
7168
7169 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7170
7171 tq = (ql_tgt_t *)pkt->pkt_fca_device;
7172 if (tq == NULL) {
7173 d_id.r.rsvd_1 = 0;
7174 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7175 tq = ql_d_id_to_queue(ha, d_id);
7176 }
7177
7178 if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
7179 /*
7180 * IP data is bound to pkt_cmd_dma
7181 */
7182 (void) ddi_dma_sync(pkt->pkt_cmd_dma,
7183 0, 0, DDI_DMA_SYNC_FORDEV);
7184
7185 /* Setup IOCB count. */
7186 sp->iocb = ha->ip_cmd;
7187 if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
7188 uint32_t cnt;
7189
7190 cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
7191 sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
7192 if (cnt % ha->cmd_cont_segs) {
7193 sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7194 } else {
7195 sp->req_cnt++;
7196 }
7197 } else {
7198 sp->req_cnt = 1;
7199 }
7200 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7201
7202 return (ql_start_cmd(ha, tq, pkt, sp));
7203 } else {
7204 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7205 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7206
7207 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7208 ql_awaken_task_daemon(ha, sp, 0, 0);
7209 }
7210
7211 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7212
7213 return (FC_SUCCESS);
7214 }
7215
7216 /*
7217 * ql_fc_services
7218 * Process fibre channel services (name server).
7219 *
7220 * Input:
7221 * ha: adapter state pointer.
7222 * pkt: pointer to fc_packet.
7223 *
7224 * Returns:
7225 * FC_SUCCESS - the packet was accepted for transport.
7226 * FC_TRANSPORT_ERROR - a transport error occurred.
7227 *
7228 * Context:
7229 * Kernel context.
7230 */
7231 static int
7232 ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
7233 {
7234 uint32_t cnt;
7235 fc_ct_header_t hdr;
7236 la_els_rjt_t rjt;
7237 port_id_t d_id;
7238 ql_tgt_t *tq;
7239 ql_srb_t *sp;
7240 int rval;
7241
7242 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7243
7244 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
7245 (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
7246
7247 bzero(&rjt, sizeof (rjt));
7248
7249 /* Do some sanity checks */
7250 cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
7251 sizeof (fc_ct_header_t));
7252 if (cnt > (uint32_t)pkt->pkt_rsplen) {
7253 EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
7254 pkt->pkt_rsplen);
7255 return (FC_ELS_MALFORMED);
7256 }
7257
7258 switch (hdr.ct_fcstype) {
7259 case FCSTYPE_DIRECTORY:
7260 case FCSTYPE_MGMTSERVICE:
7261 /* An FCA must make sure that the header is in big endian */
7262 ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
7263
7264 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7265 tq = ql_d_id_to_queue(ha, d_id);
7266 sp = (ql_srb_t *)pkt->pkt_fca_private;
7267 if (tq == NULL ||
7268 (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
7269 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7270 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7271 rval = QL_SUCCESS;
7272 break;
7273 }
7274
7275 /*
7276 * Services data is bound to pkt_cmd_dma
7277 */
7278 (void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
7279 DDI_DMA_SYNC_FORDEV);
7280
7281 sp->flags |= SRB_MS_PKT;
7282 sp->retry_count = 32;
7283
7284 /* Setup IOCB count. */
7285 sp->iocb = ha->ms_cmd;
7286 if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
7287 cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
7288 sp->req_cnt =
7289 (uint16_t)(cnt / CONT_TYPE_1_DATA_SEGMENTS);
7290 if (cnt % CONT_TYPE_1_DATA_SEGMENTS) {
7291 sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7292 } else {
7293 sp->req_cnt++;
7294 }
7295 } else {
7296 sp->req_cnt = 1;
7297 }
7298 rval = ql_start_cmd(ha, tq, pkt, sp);
7299
7300 QL_PRINT_3(CE_CONT, "(%d): done, ql_start_cmd=%xh\n",
7301 ha->instance, rval);
7302
7303 return (rval);
7304
7305 default:
7306 EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7307 rval = QL_FUNCTION_PARAMETER_ERROR;
7308 break;
7309 }
7310
7311 if (rval != QL_SUCCESS) {
7312 /* Build RJT. */
7313 rjt.ls_code.ls_code = LA_ELS_RJT;
7314 rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7315
7316 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7317 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7318
7319 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7320 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7321 EL(ha, "LA_ELS_RJT, FC_REASON_UNSUPPORTED\n");
7322 }
7323
7324 /* Do command callback. */
7325 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7326 ql_awaken_task_daemon(ha, (ql_srb_t *)pkt->pkt_fca_private,
7327 0, 0);
7328 }
7329
7330 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7331
7332 return (FC_SUCCESS);
7333 }
7334
7335 /*
7336 * ql_cthdr_endian
7337 * Change endianess of ct passthrough header and payload.
7338 *
7339 * Input:
7340 * acc_handle: DMA buffer access handle.
7341 * ct_hdr: Pointer to header.
7342 * restore: Restore first flag.
7343 *
7344 * Context:
7345 * Interrupt or Kernel context, no mailbox commands allowed.
7346 */
7347 void
7348 ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7349 boolean_t restore)
7350 {
7351 uint8_t i, *bp;
7352 fc_ct_header_t hdr;
7353 uint32_t *hdrp = (uint32_t *)&hdr;
7354
7355 ddi_rep_get8(acc_handle, (uint8_t *)&hdr,
7356 (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7357
7358 if (restore) {
7359 for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7360 *hdrp = BE_32(*hdrp);
7361 hdrp++;
7362 }
7363 }
7364
7365 if (hdr.ct_fcstype == FCSTYPE_DIRECTORY) {
7366 bp = (uint8_t *)ct_hdr + sizeof (fc_ct_header_t);
7367
7368 switch (hdr.ct_cmdrsp) {
7369 case NS_GA_NXT:
7370 case NS_GPN_ID:
7371 case NS_GNN_ID:
7372 case NS_GCS_ID:
7373 case NS_GFT_ID:
7374 case NS_GSPN_ID:
7375 case NS_GPT_ID:
7376 case NS_GID_FT:
7377 case NS_GID_PT:
7378 case NS_RPN_ID:
7379 case NS_RNN_ID:
7380 case NS_RSPN_ID:
7381 case NS_DA_ID:
7382 BIG_ENDIAN_32(bp);
7383 break;
7384 case NS_RFT_ID:
7385 case NS_RCS_ID:
7386 case NS_RPT_ID:
7387 BIG_ENDIAN_32(bp);
7388 bp += 4;
7389 BIG_ENDIAN_32(bp);
7390 break;
7391 case NS_GNN_IP:
7392 case NS_GIPA_IP:
7393 BIG_ENDIAN(bp, 16);
7394 break;
7395 case NS_RIP_NN:
7396 bp += 8;
7397 BIG_ENDIAN(bp, 16);
7398 break;
7399 case NS_RIPA_NN:
7400 bp += 8;
7401 BIG_ENDIAN_64(bp);
7402 break;
7403 default:
7404 break;
7405 }
7406 }
7407
7408 if (restore == B_FALSE) {
7409 for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7410 *hdrp = BE_32(*hdrp);
7411 hdrp++;
7412 }
7413 }
7414
7415 ddi_rep_put8(acc_handle, (uint8_t *)&hdr,
7416 (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7417 }
7418
7419 /*
7420 * ql_start_cmd
7421 * Finishes starting fibre channel protocol (FCP) command.
7422 *
7423 * Input:
7424 * ha: adapter state pointer.
7425 * tq: target queue pointer.
7426 * pkt: pointer to fc_packet.
7427 * sp: SRB pointer.
7428 *
7429 * Context:
7430 * Kernel context.
7431 */
7432 static int
7433 ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7434 ql_srb_t *sp)
7435 {
7436 int rval = FC_SUCCESS;
7437 time_t poll_wait = 0;
7438 ql_lun_t *lq = sp->lun_queue;
7439
7440 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7441
7442 sp->handle = 0;
7443
7444 /* Set poll for finish. */
7445 if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7446 sp->flags |= SRB_POLL;
7447 if (pkt->pkt_timeout == 0) {
7448 pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7449 }
7450 }
7451
7452 /* Acquire device queue lock. */
7453 DEVICE_QUEUE_LOCK(tq);
7454
7455 /*
7456 * If we need authentication, report device busy to
7457 * upper layers to retry later
7458 */
7459 if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7460 DEVICE_QUEUE_UNLOCK(tq);
7461 EL(ha, "failed, FC_DEVICE_BUSY=%xh, d_id=%xh\n", tq->flags,
7462 tq->d_id.b24);
7463 return (FC_DEVICE_BUSY);
7464 }
7465
7466 /* Insert command onto watchdog queue. */
7467 if (!(pkt->pkt_tran_flags & FC_TRAN_DUMPING)) {
7468 ql_timeout_insert(ha, tq, sp);
7469 } else {
7470 /*
7471 * Run dump requests in polled mode as kernel threads
7472 * and interrupts may have been disabled.
7473 */
7474 sp->flags |= SRB_POLL;
7475 sp->init_wdg_q_time = 0;
7476 sp->isp_timeout = 0;
7477 }
7478
7479 /* If a polling command setup wait time. */
7480 if (sp->flags & SRB_POLL) {
7481 if (sp->flags & SRB_WATCHDOG_ENABLED) {
7482 poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7483 } else {
7484 poll_wait = pkt->pkt_timeout;
7485 }
7486 }
7487
7488 if (ha->pha->flags & ABORT_CMDS_LOOP_DOWN_TMO &&
7489 (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7490 /* Set ending status. */
7491 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7492
7493 /* Call done routine to handle completions. */
7494 sp->cmd.next = NULL;
7495 DEVICE_QUEUE_UNLOCK(tq);
7496 ql_done(&sp->cmd);
7497 } else {
7498 if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7499 int do_lip = 0;
7500
7501 DEVICE_QUEUE_UNLOCK(tq);
7502
7503 ADAPTER_STATE_LOCK(ha);
7504 if ((do_lip = ha->pha->lip_on_panic) == 0) {
7505 ha->pha->lip_on_panic++;
7506 }
7507 ADAPTER_STATE_UNLOCK(ha);
7508
7509 if (!do_lip) {
7510
7511 /*
7512 * That Qlogic F/W performs PLOGI, PRLI, etc
7513 * is helpful here. If a PLOGI fails for some
7514 * reason, you would get CS_PORT_LOGGED_OUT
7515 * or some such error; and we should get a
7516 * careful polled mode login kicked off inside
7517 * of this driver itself. You don't have FC
7518 * transport's services as all threads are
7519 * suspended, interrupts disabled, and so
7520 * on. Right now we do re-login if the packet
7521 * state isn't FC_PKT_SUCCESS.
7522 */
7523 (void) ql_abort_isp(ha);
7524 }
7525
7526 ql_start_iocb(ha, sp);
7527 } else {
7528 /* Add the command to the device queue */
7529 if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7530 ql_add_link_t(&lq->cmd, &sp->cmd);
7531 } else {
7532 ql_add_link_b(&lq->cmd, &sp->cmd);
7533 }
7534
7535 sp->flags |= SRB_IN_DEVICE_QUEUE;
7536
7537 /* Check whether next message can be processed */
7538 ql_next(ha, lq);
7539 }
7540 }
7541
7542 /* If polling, wait for finish. */
7543 if (poll_wait) {
7544 if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS) {
7545 int res;
7546
7547 res = ql_abort((opaque_t)ha, pkt, 0);
7548 if (res != FC_SUCCESS && res != FC_ABORTED) {
7549 DEVICE_QUEUE_LOCK(tq);
7550 ql_remove_link(&lq->cmd, &sp->cmd);
7551 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7552 DEVICE_QUEUE_UNLOCK(tq);
7553 }
7554 }
7555
7556 if (pkt->pkt_state != FC_PKT_SUCCESS) {
7557 EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7558 rval = FC_TRANSPORT_ERROR;
7559 }
7560
7561 if (ddi_in_panic()) {
7562 if (pkt->pkt_state != FC_PKT_SUCCESS) {
7563 port_id_t d_id;
7564
7565 /*
7566 * successful LOGIN implies by design
7567 * that PRLI also succeeded for disks
7568 * Note also that there is no special
7569 * mailbox command to send PRLI.
7570 */
7571 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7572 (void) ql_login_port(ha, d_id);
7573 }
7574 }
7575
7576 /*
7577 * This should only happen during CPR dumping
7578 */
7579 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7580 pkt->pkt_comp) {
7581 sp->flags &= ~SRB_POLL;
7582 (*pkt->pkt_comp)(pkt);
7583 }
7584 }
7585
7586 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7587
7588 return (rval);
7589 }
7590
7591 /*
7592 * ql_poll_cmd
7593 * Polls commands for completion.
7594 *
7595 * Input:
7596 * ha = adapter state pointer.
7597 * sp = SRB command pointer.
7598 * poll_wait = poll wait time in seconds.
7599 *
7600 * Returns:
7601 * QL local function return status code.
7602 *
7603 * Context:
7604 * Kernel context.
7605 */
7606 static int
7607 ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
7608 {
7609 int rval = QL_SUCCESS;
7610 time_t msecs_left = poll_wait * 100; /* 10ms inc */
7611 ql_adapter_state_t *ha = vha->pha;
7612
7613 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7614
7615 while (sp->flags & SRB_POLL) {
7616
7617 if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
7618 ha->idle_timer >= 15 || ddi_in_panic()) {
7619
7620 /* If waiting for restart, do it now. */
7621 if (ha->port_retry_timer != 0) {
7622 ADAPTER_STATE_LOCK(ha);
7623 ha->port_retry_timer = 0;
7624 ADAPTER_STATE_UNLOCK(ha);
7625
7626 TASK_DAEMON_LOCK(ha);
7627 ha->task_daemon_flags |= PORT_RETRY_NEEDED;
7628 TASK_DAEMON_UNLOCK(ha);
7629 }
7630
7631 if (INTERRUPT_PENDING(ha)) {
7632 (void) ql_isr((caddr_t)ha);
7633 INTR_LOCK(ha);
7634 ha->intr_claimed = TRUE;
7635 INTR_UNLOCK(ha);
7636 }
7637
7638 /*
7639 * Call task thread function in case the
7640 * daemon is not running.
7641 */
7642 TASK_DAEMON_LOCK(ha);
7643
7644 if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
7645 QL_TASK_PENDING(ha)) {
7646 ha->task_daemon_flags |= TASK_THREAD_CALLED;
7647 ql_task_thread(ha);
7648 ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7649 }
7650
7651 TASK_DAEMON_UNLOCK(ha);
7652 }
7653
7654 if (msecs_left < 10) {
7655 rval = QL_FUNCTION_TIMEOUT;
7656 break;
7657 }
7658
7659 /*
7660 * Polling interval is 10 milli seconds; Increasing
7661 * the polling interval to seconds since disk IO
7662 * timeout values are ~60 seconds is tempting enough,
7663 * but CPR dump time increases, and so will the crash
7664 * dump time; Don't toy with the settings without due
7665 * consideration for all the scenarios that will be
7666 * impacted.
7667 */
7668 ql_delay(ha, 10000);
7669 msecs_left -= 10;
7670 }
7671
7672 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7673
7674 return (rval);
7675 }
7676
7677 /*
7678 * ql_next
7679 * Retrieve and process next job in the device queue.
7680 *
7681 * Input:
7682 * ha: adapter state pointer.
7683 * lq: LUN queue pointer.
7684 * DEVICE_QUEUE_LOCK must be already obtained.
7685 *
7686 * Output:
7687 * Releases DEVICE_QUEUE_LOCK upon exit.
7688 *
7689 * Context:
7690 * Interrupt or Kernel context, no mailbox commands allowed.
7691 */
7692 void
7693 ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
7694 {
7695 ql_srb_t *sp;
7696 ql_link_t *link;
7697 ql_tgt_t *tq = lq->target_queue;
7698 ql_adapter_state_t *ha = vha->pha;
7699
7700 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7701
7702 if (ddi_in_panic()) {
7703 DEVICE_QUEUE_UNLOCK(tq);
7704 QL_PRINT_3(CE_CONT, "(%d): panic/active exit\n",
7705 ha->instance);
7706 return;
7707 }
7708
7709 while ((link = lq->cmd.first) != NULL) {
7710 sp = link->base_address;
7711
7712 /* Exit if can not start commands. */
7713 if (DRIVER_SUSPENDED(ha) ||
7714 (ha->flags & ONLINE) == 0 ||
7715 !VALID_DEVICE_ID(ha, tq->loop_id) ||
7716 sp->flags & SRB_ABORT ||
7717 tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
7718 TQF_QUEUE_SUSPENDED)) {
7719 EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
7720 "haf=%xh, loop_id=%xh\n", tq->d_id.b24,
7721 ha->task_daemon_flags, tq->flags, sp->flags,
7722 ha->flags, tq->loop_id);
7723 break;
7724 }
7725
7726 /*
7727 * Find out the LUN number for untagged command use.
7728 * If there is an untagged command pending for the LUN,
7729 * we would not submit another untagged command
7730 * or if reached LUN execution throttle.
7731 */
7732 if (sp->flags & SRB_FCP_CMD_PKT) {
7733 if (lq->flags & LQF_UNTAGGED_PENDING ||
7734 lq->lun_outcnt >= ha->execution_throttle) {
7735 QL_PRINT_8(CE_CONT, "(%d): break, d_id=%xh, "
7736 "lf=%xh, lun_outcnt=%xh\n", ha->instance,
7737 tq->d_id.b24, lq->flags, lq->lun_outcnt);
7738 break;
7739 }
7740 if (sp->fcp->fcp_cntl.cntl_qtype ==
7741 FCP_QTYPE_UNTAGGED) {
7742 /*
7743 * Set the untagged-flag for the LUN
7744 * so that no more untagged commands
7745 * can be submitted for this LUN.
7746 */
7747 lq->flags |= LQF_UNTAGGED_PENDING;
7748 }
7749
7750 /* Count command as sent. */
7751 lq->lun_outcnt++;
7752 }
7753
7754 /* Remove srb from device queue. */
7755 ql_remove_link(&lq->cmd, &sp->cmd);
7756 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7757
7758 tq->outcnt++;
7759
7760 ql_start_iocb(vha, sp);
7761 }
7762
7763 /* Release device queue lock. */
7764 DEVICE_QUEUE_UNLOCK(tq);
7765
7766 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7767 }
7768
7769 /*
7770 * ql_done
7771 * Process completed commands.
7772 *
7773 * Input:
7774 * link: first command link in chain.
7775 *
7776 * Context:
7777 * Interrupt or Kernel context, no mailbox commands allowed.
7778 */
7779 void
7780 ql_done(ql_link_t *link)
7781 {
7782 ql_adapter_state_t *ha;
7783 ql_link_t *next_link;
7784 ql_srb_t *sp;
7785 ql_tgt_t *tq;
7786 ql_lun_t *lq;
7787
7788 QL_PRINT_3(CE_CONT, "started\n");
7789
7790 for (; link != NULL; link = next_link) {
7791 next_link = link->next;
7792 sp = link->base_address;
7793 ha = sp->ha;
7794
7795 if (sp->flags & SRB_UB_CALLBACK) {
7796 QL_UB_LOCK(ha);
7797 if (sp->flags & SRB_UB_IN_ISP) {
7798 if (ha->ub_outcnt != 0) {
7799 ha->ub_outcnt--;
7800 }
7801 QL_UB_UNLOCK(ha);
7802 ql_isp_rcvbuf(ha);
7803 QL_UB_LOCK(ha);
7804 }
7805 QL_UB_UNLOCK(ha);
7806 ql_awaken_task_daemon(ha, sp, 0, 0);
7807 } else {
7808 /* Free outstanding command slot. */
7809 if (sp->handle != 0) {
7810 ha->outstanding_cmds[
7811 sp->handle & OSC_INDEX_MASK] = NULL;
7812 sp->handle = 0;
7813 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
7814 }
7815
7816 /* Acquire device queue lock. */
7817 lq = sp->lun_queue;
7818 tq = lq->target_queue;
7819 DEVICE_QUEUE_LOCK(tq);
7820
7821 /* Decrement outstanding commands on device. */
7822 if (tq->outcnt != 0) {
7823 tq->outcnt--;
7824 }
7825
7826 if (sp->flags & SRB_FCP_CMD_PKT) {
7827 if (sp->fcp->fcp_cntl.cntl_qtype ==
7828 FCP_QTYPE_UNTAGGED) {
7829 /*
7830 * Clear the flag for this LUN so that
7831 * untagged commands can be submitted
7832 * for it.
7833 */
7834 lq->flags &= ~LQF_UNTAGGED_PENDING;
7835 }
7836
7837 if (lq->lun_outcnt != 0) {
7838 lq->lun_outcnt--;
7839 }
7840 }
7841
7842 /* Reset port down retry count on good completion. */
7843 if (sp->pkt->pkt_reason == CS_COMPLETE) {
7844 tq->port_down_retry_count =
7845 ha->port_down_retry_count;
7846 tq->qfull_retry_count = ha->qfull_retry_count;
7847 }
7848
7849
7850 /* Alter aborted status for fast timeout feature */
7851 if (CFG_IST(ha, CFG_FAST_TIMEOUT) &&
7852 (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7853 !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7854 sp->flags & SRB_RETRY &&
7855 (sp->flags & SRB_WATCHDOG_ENABLED &&
7856 sp->wdg_q_time > 1)) {
7857 EL(ha, "fast abort modify change\n");
7858 sp->flags &= ~(SRB_RETRY);
7859 sp->pkt->pkt_reason = CS_TIMEOUT;
7860 }
7861
7862 /* Place request back on top of target command queue */
7863 if ((sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7864 !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7865 sp->flags & SRB_RETRY &&
7866 (sp->flags & SRB_WATCHDOG_ENABLED &&
7867 sp->wdg_q_time > 1)) {
7868 sp->flags &= ~(SRB_ISP_STARTED |
7869 SRB_ISP_COMPLETED | SRB_RETRY);
7870
7871 /* Reset watchdog timer */
7872 sp->wdg_q_time = sp->init_wdg_q_time;
7873
7874 /* Issue marker command on reset status. */
7875 if (!(ha->task_daemon_flags & LOOP_DOWN) &&
7876 (sp->pkt->pkt_reason == CS_RESET ||
7877 (CFG_IST(ha, CFG_CTRL_24258081) &&
7878 sp->pkt->pkt_reason == CS_ABORTED))) {
7879 (void) ql_marker(ha, tq->loop_id, 0,
7880 MK_SYNC_ID);
7881 }
7882
7883 ql_add_link_t(&lq->cmd, &sp->cmd);
7884 sp->flags |= SRB_IN_DEVICE_QUEUE;
7885 ql_next(ha, lq);
7886 } else {
7887 /* Remove command from watchdog queue. */
7888 if (sp->flags & SRB_WATCHDOG_ENABLED) {
7889 ql_remove_link(&tq->wdg, &sp->wdg);
7890 sp->flags &= ~SRB_WATCHDOG_ENABLED;
7891 }
7892
7893 if (lq->cmd.first != NULL) {
7894 ql_next(ha, lq);
7895 } else {
7896 /* Release LU queue specific lock. */
7897 DEVICE_QUEUE_UNLOCK(tq);
7898 if (ha->pha->pending_cmds.first !=
7899 NULL) {
7900 ql_start_iocb(ha, NULL);
7901 }
7902 }
7903
7904 /* Sync buffers if required. */
7905 if (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT)) {
7906 (void) ddi_dma_sync(
7907 sp->pkt->pkt_resp_dma,
7908 0, 0, DDI_DMA_SYNC_FORCPU);
7909 }
7910
7911 /* Map ISP completion codes. */
7912 sp->pkt->pkt_expln = FC_EXPLN_NONE;
7913 sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
7914 switch (sp->pkt->pkt_reason) {
7915 case CS_COMPLETE:
7916 sp->pkt->pkt_state = FC_PKT_SUCCESS;
7917 break;
7918 case CS_RESET:
7919 /* Issue marker command. */
7920 if (!(ha->task_daemon_flags &
7921 LOOP_DOWN)) {
7922 (void) ql_marker(ha,
7923 tq->loop_id, 0,
7924 MK_SYNC_ID);
7925 }
7926 sp->pkt->pkt_state =
7927 FC_PKT_PORT_OFFLINE;
7928 sp->pkt->pkt_reason =
7929 FC_REASON_ABORTED;
7930 break;
7931 case CS_RESOUCE_UNAVAILABLE:
7932 sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
7933 sp->pkt->pkt_reason =
7934 FC_REASON_PKT_BUSY;
7935 break;
7936
7937 case CS_TIMEOUT:
7938 sp->pkt->pkt_state = FC_PKT_TIMEOUT;
7939 sp->pkt->pkt_reason =
7940 FC_REASON_HW_ERROR;
7941 break;
7942 case CS_DATA_OVERRUN:
7943 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7944 sp->pkt->pkt_reason =
7945 FC_REASON_OVERRUN;
7946 break;
7947 case CS_PORT_UNAVAILABLE:
7948 case CS_PORT_LOGGED_OUT:
7949 sp->pkt->pkt_state =
7950 FC_PKT_PORT_OFFLINE;
7951 sp->pkt->pkt_reason =
7952 FC_REASON_LOGIN_REQUIRED;
7953 ql_send_logo(ha, tq, NULL);
7954 break;
7955 case CS_PORT_CONFIG_CHG:
7956 sp->pkt->pkt_state =
7957 FC_PKT_PORT_OFFLINE;
7958 sp->pkt->pkt_reason =
7959 FC_REASON_OFFLINE;
7960 break;
7961 case CS_QUEUE_FULL:
7962 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7963 sp->pkt->pkt_reason = FC_REASON_QFULL;
7964 break;
7965
7966 case CS_ABORTED:
7967 DEVICE_QUEUE_LOCK(tq);
7968 if (tq->flags & (TQF_RSCN_RCVD |
7969 TQF_NEED_AUTHENTICATION)) {
7970 sp->pkt->pkt_state =
7971 FC_PKT_PORT_OFFLINE;
7972 sp->pkt->pkt_reason =
7973 FC_REASON_LOGIN_REQUIRED;
7974 } else {
7975 sp->pkt->pkt_state =
7976 FC_PKT_LOCAL_RJT;
7977 sp->pkt->pkt_reason =
7978 FC_REASON_ABORTED;
7979 }
7980 DEVICE_QUEUE_UNLOCK(tq);
7981 break;
7982
7983 case CS_TRANSPORT:
7984 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7985 sp->pkt->pkt_reason =
7986 FC_PKT_TRAN_ERROR;
7987 break;
7988
7989 case CS_DATA_UNDERRUN:
7990 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7991 sp->pkt->pkt_reason =
7992 FC_REASON_UNDERRUN;
7993 break;
7994 case CS_DMA_ERROR:
7995 case CS_BAD_PAYLOAD:
7996 case CS_UNKNOWN:
7997 case CS_CMD_FAILED:
7998 default:
7999 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
8000 sp->pkt->pkt_reason =
8001 FC_REASON_HW_ERROR;
8002 break;
8003 }
8004
8005 /* Now call the pkt completion callback */
8006 if (sp->flags & SRB_POLL) {
8007 sp->flags &= ~SRB_POLL;
8008 } else if (sp->pkt->pkt_comp) {
8009 if (sp->pkt->pkt_tran_flags &
8010 FC_TRAN_IMMEDIATE_CB) {
8011 (*sp->pkt->pkt_comp)(sp->pkt);
8012 } else {
8013 ql_awaken_task_daemon(ha, sp,
8014 0, 0);
8015 }
8016 }
8017 }
8018 }
8019 }
8020
8021 QL_PRINT_3(CE_CONT, "done\n");
8022 }
8023
8024 /*
8025 * ql_awaken_task_daemon
8026 * Adds command completion callback to callback queue and/or
8027 * awakens task daemon thread.
8028 *
8029 * Input:
8030 * ha: adapter state pointer.
8031 * sp: srb pointer.
8032 * set_flags: task daemon flags to set.
8033 * reset_flags: task daemon flags to reset.
8034 *
8035 * Context:
8036 * Interrupt or Kernel context, no mailbox commands allowed.
8037 */
8038 void
8039 ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
8040 uint32_t set_flags, uint32_t reset_flags)
8041 {
8042 ql_adapter_state_t *ha = vha->pha;
8043
8044 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8045
8046 /* Acquire task daemon lock. */
8047 TASK_DAEMON_LOCK(ha);
8048
8049 if (set_flags & ISP_ABORT_NEEDED) {
8050 if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
8051 set_flags &= ~ISP_ABORT_NEEDED;
8052 }
8053 }
8054
8055 ha->task_daemon_flags |= set_flags;
8056 ha->task_daemon_flags &= ~reset_flags;
8057
8058 if (QL_DAEMON_SUSPENDED(ha)) {
8059 if (sp != NULL) {
8060 TASK_DAEMON_UNLOCK(ha);
8061
8062 /* Do callback. */
8063 if (sp->flags & SRB_UB_CALLBACK) {
8064 ql_unsol_callback(sp);
8065 } else {
8066 (*sp->pkt->pkt_comp)(sp->pkt);
8067 }
8068 } else {
8069 if (!(curthread->t_flag & T_INTR_THREAD) &&
8070 !(ha->task_daemon_flags & TASK_THREAD_CALLED)) {
8071 ha->task_daemon_flags |= TASK_THREAD_CALLED;
8072 ql_task_thread(ha);
8073 ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
8074 }
8075
8076 TASK_DAEMON_UNLOCK(ha);
8077 }
8078 } else {
8079 if (sp != NULL) {
8080 ql_add_link_b(&ha->callback_queue, &sp->cmd);
8081 }
8082
8083 if (ha->task_daemon_flags & TASK_DAEMON_SLEEPING_FLG) {
8084 cv_broadcast(&ha->cv_task_daemon);
8085 }
8086 TASK_DAEMON_UNLOCK(ha);
8087 }
8088
8089 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8090 }
8091
8092 /*
8093 * ql_task_daemon
8094 * Thread that is awaken by the driver when a
8095 * background needs to be done.
8096 *
8097 * Input:
8098 * arg = adapter state pointer.
8099 *
8100 * Context:
8101 * Kernel context.
8102 */
8103 static void
8104 ql_task_daemon(void *arg)
8105 {
8106 ql_adapter_state_t *ha = (void *)arg;
8107
8108 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8109
8110 CALLB_CPR_INIT(&ha->cprinfo, &ha->task_daemon_mutex, callb_generic_cpr,
8111 "ql_task_daemon");
8112
8113 /* Acquire task daemon lock. */
8114 TASK_DAEMON_LOCK(ha);
8115
8116 ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
8117
8118 while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
8119 ql_task_thread(ha);
8120
8121 QL_PRINT_3(CE_CONT, "(%d): Going to sleep\n", ha->instance);
8122
8123 /*
8124 * Before we wait on the conditional variable, we
8125 * need to check if STOP_FLG is set for us to terminate
8126 */
8127 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
8128 break;
8129 }
8130
8131 /*LINTED [Solaris CALLB_CPR_SAFE_BEGIN Lint error]*/
8132 CALLB_CPR_SAFE_BEGIN(&ha->cprinfo);
8133
8134 ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
8135
8136 /* If killed, stop task daemon */
8137 if (cv_wait_sig(&ha->cv_task_daemon,
8138 &ha->task_daemon_mutex) == 0) {
8139 ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
8140 }
8141
8142 ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
8143
8144 /*LINTED [Solaris CALLB_CPR_SAFE_END Lint error]*/
8145 CALLB_CPR_SAFE_END(&ha->cprinfo, &ha->task_daemon_mutex);
8146
8147 QL_PRINT_3(CE_CONT, "(%d): Awakened\n", ha->instance);
8148 }
8149
8150 ha->task_daemon_flags &= ~(TASK_DAEMON_STOP_FLG |
8151 TASK_DAEMON_ALIVE_FLG);
8152
8153 /*LINTED [Solaris CALLB_CPR_EXIT Lint error]*/
8154 CALLB_CPR_EXIT(&ha->cprinfo);
8155
8156 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8157
8158 thread_exit();
8159 }
8160
8161 /*
8162 * ql_task_thread
8163 * Thread run by daemon.
8164 *
8165 * Input:
8166 * ha = adapter state pointer.
8167 * TASK_DAEMON_LOCK must be acquired prior to call.
8168 *
8169 * Context:
8170 * Kernel context.
8171 */
8172 static void
8173 ql_task_thread(ql_adapter_state_t *ha)
8174 {
8175 int loop_again;
8176 ql_srb_t *sp;
8177 ql_head_t *head;
8178 ql_link_t *link;
8179 caddr_t msg;
8180 ql_adapter_state_t *vha;
8181
8182 do {
8183 QL_PRINT_3(CE_CONT, "(%d): task_daemon_flags=%xh\n",
8184 ha->instance, ha->task_daemon_flags);
8185
8186 loop_again = FALSE;
8187
8188 QL_PM_LOCK(ha);
8189 if (ha->power_level != PM_LEVEL_D0) {
8190 QL_PM_UNLOCK(ha);
8191 ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8192 break;
8193 }
8194 QL_PM_UNLOCK(ha);
8195
8196 /* IDC event. */
8197 if (ha->task_daemon_flags & IDC_EVENT) {
8198 ha->task_daemon_flags &= ~IDC_EVENT;
8199 TASK_DAEMON_UNLOCK(ha);
8200 ql_process_idc_event(ha);
8201 TASK_DAEMON_LOCK(ha);
8202 loop_again = TRUE;
8203 }
8204
8205 if (ha->flags & ADAPTER_SUSPENDED || ha->task_daemon_flags &
8206 (TASK_DAEMON_STOP_FLG | DRIVER_STALL) ||
8207 (ha->flags & ONLINE) == 0) {
8208 ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8209 break;
8210 }
8211 ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
8212
8213 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8214 TASK_DAEMON_UNLOCK(ha);
8215 if (ha->log_parity_pause == B_TRUE) {
8216 (void) ql_flash_errlog(ha,
8217 FLASH_ERRLOG_PARITY_ERR, 0,
8218 MSW(ha->parity_stat_err),
8219 LSW(ha->parity_stat_err));
8220 ha->log_parity_pause = B_FALSE;
8221 }
8222 ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
8223 TASK_DAEMON_LOCK(ha);
8224 loop_again = TRUE;
8225 }
8226
8227 /* Idle Check. */
8228 if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
8229 ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
8230 if (!(ha->task_daemon_flags & QL_SUSPENDED)) {
8231 TASK_DAEMON_UNLOCK(ha);
8232 ql_idle_check(ha);
8233 TASK_DAEMON_LOCK(ha);
8234 loop_again = TRUE;
8235 }
8236 }
8237
8238 /* Crystal+ port#0 bypass transition */
8239 if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
8240 ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
8241 TASK_DAEMON_UNLOCK(ha);
8242 (void) ql_initiate_lip(ha);
8243 TASK_DAEMON_LOCK(ha);
8244 loop_again = TRUE;
8245 }
8246
8247 /* Abort queues needed. */
8248 if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
8249 ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
8250 TASK_DAEMON_UNLOCK(ha);
8251 ql_abort_queues(ha);
8252 TASK_DAEMON_LOCK(ha);
8253 }
8254
8255 /* Not suspended, awaken waiting routines. */
8256 if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8257 ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
8258 ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
8259 cv_broadcast(&ha->cv_dr_suspended);
8260 loop_again = TRUE;
8261 }
8262
8263 /* Handle RSCN changes. */
8264 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8265 if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
8266 vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
8267 TASK_DAEMON_UNLOCK(ha);
8268 (void) ql_handle_rscn_update(vha);
8269 TASK_DAEMON_LOCK(ha);
8270 loop_again = TRUE;
8271 }
8272 }
8273
8274 /* Handle state changes. */
8275 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8276 if (vha->task_daemon_flags & FC_STATE_CHANGE &&
8277 !(ha->task_daemon_flags &
8278 TASK_DAEMON_POWERING_DOWN)) {
8279 /* Report state change. */
8280 EL(vha, "state change = %xh\n", vha->state);
8281 vha->task_daemon_flags &= ~FC_STATE_CHANGE;
8282
8283 if (vha->task_daemon_flags &
8284 COMMAND_WAIT_NEEDED) {
8285 vha->task_daemon_flags &=
8286 ~COMMAND_WAIT_NEEDED;
8287 if (!(ha->task_daemon_flags &
8288 COMMAND_WAIT_ACTIVE)) {
8289 ha->task_daemon_flags |=
8290 COMMAND_WAIT_ACTIVE;
8291 TASK_DAEMON_UNLOCK(ha);
8292 ql_cmd_wait(ha);
8293 TASK_DAEMON_LOCK(ha);
8294 ha->task_daemon_flags &=
8295 ~COMMAND_WAIT_ACTIVE;
8296 }
8297 }
8298
8299 msg = NULL;
8300 if (FC_PORT_STATE_MASK(vha->state) ==
8301 FC_STATE_OFFLINE) {
8302 if (vha->task_daemon_flags &
8303 STATE_ONLINE) {
8304 if (ha->topology &
8305 QL_LOOP_CONNECTION) {
8306 msg = "Loop OFFLINE";
8307 } else {
8308 msg = "Link OFFLINE";
8309 }
8310 }
8311 vha->task_daemon_flags &=
8312 ~STATE_ONLINE;
8313 } else if (FC_PORT_STATE_MASK(vha->state) ==
8314 FC_STATE_LOOP) {
8315 if (!(vha->task_daemon_flags &
8316 STATE_ONLINE)) {
8317 msg = "Loop ONLINE";
8318 }
8319 vha->task_daemon_flags |= STATE_ONLINE;
8320 } else if (FC_PORT_STATE_MASK(vha->state) ==
8321 FC_STATE_ONLINE) {
8322 if (!(vha->task_daemon_flags &
8323 STATE_ONLINE)) {
8324 msg = "Link ONLINE";
8325 }
8326 vha->task_daemon_flags |= STATE_ONLINE;
8327 } else {
8328 msg = "Unknown Link state";
8329 }
8330
8331 if (msg != NULL) {
8332 cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8333 "%s", QL_NAME, ha->instance,
8334 vha->vp_index, msg);
8335 }
8336
8337 if (vha->flags & FCA_BOUND) {
8338 QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8339 "cb state=%xh\n", ha->instance,
8340 vha->vp_index, vha->state);
8341 TASK_DAEMON_UNLOCK(ha);
8342 (vha->bind_info.port_statec_cb)
8343 (vha->bind_info.port_handle,
8344 vha->state);
8345 TASK_DAEMON_LOCK(ha);
8346 }
8347 loop_again = TRUE;
8348 }
8349 }
8350
8351 if (ha->task_daemon_flags & LIP_RESET_PENDING &&
8352 !(ha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN)) {
8353 EL(ha, "processing LIP reset\n");
8354 ha->task_daemon_flags &= ~LIP_RESET_PENDING;
8355 TASK_DAEMON_UNLOCK(ha);
8356 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8357 if (vha->flags & FCA_BOUND) {
8358 QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8359 "cb reset\n", ha->instance,
8360 vha->vp_index);
8361 (vha->bind_info.port_statec_cb)
8362 (vha->bind_info.port_handle,
8363 FC_STATE_TARGET_PORT_RESET);
8364 }
8365 }
8366 TASK_DAEMON_LOCK(ha);
8367 loop_again = TRUE;
8368 }
8369
8370 if (QL_IS_SET(ha->task_daemon_flags, NEED_UNSOLICITED_BUFFERS |
8371 FIRMWARE_UP)) {
8372 /*
8373 * The firmware needs more unsolicited
8374 * buffers. We cannot allocate any new
8375 * buffers unless the ULP module requests
8376 * for new buffers. All we can do here is
8377 * to give received buffers from the pool
8378 * that is already allocated
8379 */
8380 ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8381 TASK_DAEMON_UNLOCK(ha);
8382 ql_isp_rcvbuf(ha);
8383 TASK_DAEMON_LOCK(ha);
8384 loop_again = TRUE;
8385 }
8386
8387 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8388 TASK_DAEMON_UNLOCK(ha);
8389 (void) ql_abort_isp(ha);
8390 TASK_DAEMON_LOCK(ha);
8391 loop_again = TRUE;
8392 }
8393
8394 if (!(ha->task_daemon_flags & (LOOP_DOWN | DRIVER_STALL |
8395 COMMAND_WAIT_NEEDED))) {
8396 if (QL_IS_SET(ha->task_daemon_flags,
8397 RESET_MARKER_NEEDED | FIRMWARE_UP)) {
8398 ha->task_daemon_flags &= ~RESET_MARKER_NEEDED;
8399 if (!(ha->task_daemon_flags & RESET_ACTIVE)) {
8400 ha->task_daemon_flags |= RESET_ACTIVE;
8401 TASK_DAEMON_UNLOCK(ha);
8402 for (vha = ha; vha != NULL;
8403 vha = vha->vp_next) {
8404 ql_rst_aen(vha);
8405 }
8406 TASK_DAEMON_LOCK(ha);
8407 ha->task_daemon_flags &= ~RESET_ACTIVE;
8408 loop_again = TRUE;
8409 }
8410 }
8411
8412 if (QL_IS_SET(ha->task_daemon_flags,
8413 LOOP_RESYNC_NEEDED | FIRMWARE_UP)) {
8414 if (!(ha->task_daemon_flags &
8415 LOOP_RESYNC_ACTIVE)) {
8416 ha->task_daemon_flags |=
8417 LOOP_RESYNC_ACTIVE;
8418 TASK_DAEMON_UNLOCK(ha);
8419 (void) ql_loop_resync(ha);
8420 TASK_DAEMON_LOCK(ha);
8421 loop_again = TRUE;
8422 }
8423 }
8424 }
8425
8426 /* Port retry needed. */
8427 if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8428 ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8429 ADAPTER_STATE_LOCK(ha);
8430 ha->port_retry_timer = 0;
8431 ADAPTER_STATE_UNLOCK(ha);
8432
8433 TASK_DAEMON_UNLOCK(ha);
8434 ql_restart_queues(ha);
8435 TASK_DAEMON_LOCK(ha);
8436 loop_again = B_TRUE;
8437 }
8438
8439 /* iiDMA setting needed? */
8440 if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8441 ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8442
8443 TASK_DAEMON_UNLOCK(ha);
8444 ql_iidma(ha);
8445 TASK_DAEMON_LOCK(ha);
8446 loop_again = B_TRUE;
8447 }
8448
8449 if (ha->task_daemon_flags & SEND_PLOGI) {
8450 ha->task_daemon_flags &= ~SEND_PLOGI;
8451 TASK_DAEMON_UNLOCK(ha);
8452 (void) ql_n_port_plogi(ha);
8453 TASK_DAEMON_LOCK(ha);
8454 }
8455
8456 head = &ha->callback_queue;
8457 if (head->first != NULL) {
8458 sp = head->first->base_address;
8459 link = &sp->cmd;
8460
8461 /* Dequeue command. */
8462 ql_remove_link(head, link);
8463
8464 /* Release task daemon lock. */
8465 TASK_DAEMON_UNLOCK(ha);
8466
8467 /* Do callback. */
8468 if (sp->flags & SRB_UB_CALLBACK) {
8469 ql_unsol_callback(sp);
8470 } else {
8471 (*sp->pkt->pkt_comp)(sp->pkt);
8472 }
8473
8474 /* Acquire task daemon lock. */
8475 TASK_DAEMON_LOCK(ha);
8476
8477 loop_again = TRUE;
8478 }
8479
8480 } while (loop_again);
8481 }
8482
8483 /*
8484 * ql_idle_check
8485 * Test for adapter is alive and well.
8486 *
8487 * Input:
8488 * ha: adapter state pointer.
8489 *
8490 * Context:
8491 * Kernel context.
8492 */
8493 static void
8494 ql_idle_check(ql_adapter_state_t *ha)
8495 {
8496 ddi_devstate_t state;
8497 int rval;
8498 ql_mbx_data_t mr;
8499
8500 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8501
8502 /* Firmware Ready Test. */
8503 rval = ql_get_firmware_state(ha, &mr);
8504 if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8505 (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8506 EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8507 state = ddi_get_devstate(ha->dip);
8508 if (state == DDI_DEVSTATE_UP) {
8509 /*EMPTY*/
8510 ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
8511 DDI_DEVICE_FAULT, "Firmware Ready Test failed");
8512 }
8513 TASK_DAEMON_LOCK(ha);
8514 if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8515 EL(ha, "fstate_ready, isp_abort_needed\n");
8516 ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8517 }
8518 TASK_DAEMON_UNLOCK(ha);
8519 }
8520
8521 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8522 }
8523
8524 /*
8525 * ql_unsol_callback
8526 * Handle unsolicited buffer callbacks.
8527 *
8528 * Input:
8529 * ha = adapter state pointer.
8530 * sp = srb pointer.
8531 *
8532 * Context:
8533 * Kernel context.
8534 */
8535 static void
8536 ql_unsol_callback(ql_srb_t *sp)
8537 {
8538 fc_affected_id_t *af;
8539 fc_unsol_buf_t *ubp;
8540 uchar_t r_ctl;
8541 uchar_t ls_code;
8542 ql_tgt_t *tq;
8543 ql_adapter_state_t *ha = sp->ha, *pha = sp->ha->pha;
8544
8545 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8546
8547 ubp = ha->ub_array[sp->handle];
8548 r_ctl = ubp->ub_frame.r_ctl;
8549 ls_code = ubp->ub_buffer[0];
8550
8551 if (sp->lun_queue == NULL) {
8552 tq = NULL;
8553 } else {
8554 tq = sp->lun_queue->target_queue;
8555 }
8556
8557 QL_UB_LOCK(ha);
8558 if (sp->flags & SRB_UB_FREE_REQUESTED ||
8559 pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8560 sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8561 SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8562 sp->flags |= SRB_UB_IN_FCA;
8563 QL_UB_UNLOCK(ha);
8564 return;
8565 }
8566
8567 /* Process RSCN */
8568 if (sp->flags & SRB_UB_RSCN) {
8569 int sendup = 1;
8570
8571 /*
8572 * Defer RSCN posting until commands return
8573 */
8574 QL_UB_UNLOCK(ha);
8575
8576 af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8577
8578 /* Abort outstanding commands */
8579 sendup = ql_process_rscn(ha, af);
8580 if (sendup == 0) {
8581
8582 TASK_DAEMON_LOCK(ha);
8583 ql_add_link_b(&pha->callback_queue, &sp->cmd);
8584 TASK_DAEMON_UNLOCK(ha);
8585
8586 /*
8587 * Wait for commands to drain in F/W (doesn't take
8588 * more than a few milliseconds)
8589 */
8590 ql_delay(ha, 10000);
8591
8592 QL_PRINT_2(CE_CONT, "(%d,%d): done rscn_sendup=0, "
8593 "fmt=%xh, d_id=%xh\n", ha->instance, ha->vp_index,
8594 af->aff_format, af->aff_d_id);
8595 return;
8596 }
8597
8598 QL_UB_LOCK(ha);
8599
8600 EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
8601 af->aff_format, af->aff_d_id);
8602 }
8603
8604 /* Process UNSOL LOGO */
8605 if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
8606 QL_UB_UNLOCK(ha);
8607
8608 if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
8609 TASK_DAEMON_LOCK(ha);
8610 ql_add_link_b(&pha->callback_queue, &sp->cmd);
8611 TASK_DAEMON_UNLOCK(ha);
8612 QL_PRINT_2(CE_CONT, "(%d,%d): logo_sendup=0, d_id=%xh"
8613 "\n", ha->instance, ha->vp_index, tq->d_id.b24);
8614 return;
8615 }
8616
8617 QL_UB_LOCK(ha);
8618 EL(ha, "sending unsol logout for %xh to transport\n",
8619 ubp->ub_frame.s_id);
8620 }
8621
8622 sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
8623 SRB_UB_FCP);
8624
8625 if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
8626 (void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
8627 ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
8628 }
8629 QL_UB_UNLOCK(ha);
8630
8631 (ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
8632 ubp, sp->ub_type);
8633
8634 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8635 }
8636
8637 /*
8638 * ql_send_logo
8639 *
8640 * Input:
8641 * ha: adapter state pointer.
8642 * tq: target queue pointer.
8643 * done_q: done queue pointer.
8644 *
8645 * Context:
8646 * Interrupt or Kernel context, no mailbox commands allowed.
8647 */
8648 void
8649 ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
8650 {
8651 fc_unsol_buf_t *ubp;
8652 ql_srb_t *sp;
8653 la_els_logo_t *payload;
8654 ql_adapter_state_t *ha = vha->pha;
8655
8656 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
8657 tq->d_id.b24);
8658
8659 if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == 0xffffff)) {
8660 EL(ha, "no device, d_id=%xh\n", tq->d_id.b24);
8661 return;
8662 }
8663
8664 if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
8665 tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
8666
8667 /* Locate a buffer to use. */
8668 ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
8669 if (ubp == NULL) {
8670 EL(vha, "Failed, get_unsolicited_buffer\n");
8671 return;
8672 }
8673
8674 DEVICE_QUEUE_LOCK(tq);
8675 tq->flags |= TQF_NEED_AUTHENTICATION;
8676 tq->logout_sent++;
8677 DEVICE_QUEUE_UNLOCK(tq);
8678
8679 EL(vha, "Received LOGO from = %xh\n", tq->d_id.b24);
8680
8681 sp = ubp->ub_fca_private;
8682
8683 /* Set header. */
8684 ubp->ub_frame.d_id = vha->d_id.b24;
8685 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8686 ubp->ub_frame.s_id = tq->d_id.b24;
8687 ubp->ub_frame.rsvd = 0;
8688 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8689 F_CTL_SEQ_INITIATIVE;
8690 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8691 ubp->ub_frame.seq_cnt = 0;
8692 ubp->ub_frame.df_ctl = 0;
8693 ubp->ub_frame.seq_id = 0;
8694 ubp->ub_frame.rx_id = 0xffff;
8695 ubp->ub_frame.ox_id = 0xffff;
8696
8697 /* set payload. */
8698 payload = (la_els_logo_t *)ubp->ub_buffer;
8699 bzero(payload, sizeof (la_els_logo_t));
8700 /* Make sure ls_code in payload is always big endian */
8701 ubp->ub_buffer[0] = LA_ELS_LOGO;
8702 ubp->ub_buffer[1] = 0;
8703 ubp->ub_buffer[2] = 0;
8704 ubp->ub_buffer[3] = 0;
8705 bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
8706 &payload->nport_ww_name.raw_wwn[0], 8);
8707 payload->nport_id.port_id = tq->d_id.b24;
8708
8709 QL_UB_LOCK(ha);
8710 sp->flags |= SRB_UB_CALLBACK;
8711 QL_UB_UNLOCK(ha);
8712 if (tq->lun_queues.first != NULL) {
8713 sp->lun_queue = (tq->lun_queues.first)->base_address;
8714 } else {
8715 sp->lun_queue = ql_lun_queue(vha, tq, 0);
8716 }
8717 if (done_q) {
8718 ql_add_link_b(done_q, &sp->cmd);
8719 } else {
8720 ql_awaken_task_daemon(ha, sp, 0, 0);
8721 }
8722 }
8723
8724 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8725 }
8726
8727 static int
8728 ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8729 {
8730 port_id_t d_id;
8731 ql_srb_t *sp;
8732 ql_link_t *link;
8733 int sendup = 1;
8734
8735 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8736
8737 DEVICE_QUEUE_LOCK(tq);
8738 if (tq->outcnt) {
8739 DEVICE_QUEUE_UNLOCK(tq);
8740 sendup = 0;
8741 (void) ql_abort_device(ha, tq, 1);
8742 ql_delay(ha, 10000);
8743 } else {
8744 DEVICE_QUEUE_UNLOCK(tq);
8745 TASK_DAEMON_LOCK(ha);
8746
8747 for (link = ha->pha->callback_queue.first; link != NULL;
8748 link = link->next) {
8749 sp = link->base_address;
8750 if (sp->flags & SRB_UB_CALLBACK) {
8751 continue;
8752 }
8753 d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
8754
8755 if (tq->d_id.b24 == d_id.b24) {
8756 sendup = 0;
8757 break;
8758 }
8759 }
8760
8761 TASK_DAEMON_UNLOCK(ha);
8762 }
8763
8764 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8765
8766 return (sendup);
8767 }
8768
8769 static int
8770 ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
8771 {
8772 fc_unsol_buf_t *ubp;
8773 ql_srb_t *sp;
8774 la_els_logi_t *payload;
8775 class_svc_param_t *class3_param;
8776
8777 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8778
8779 if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
8780 LOOP_DOWN)) {
8781 EL(ha, "Failed, tqf=%xh\n", tq->flags);
8782 return (QL_FUNCTION_FAILED);
8783 }
8784
8785 /* Locate a buffer to use. */
8786 ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8787 if (ubp == NULL) {
8788 EL(ha, "Failed\n");
8789 return (QL_FUNCTION_FAILED);
8790 }
8791
8792 QL_PRINT_3(CE_CONT, "(%d): Received LOGO from = %xh\n",
8793 ha->instance, tq->d_id.b24);
8794
8795 EL(ha, "Emulate PLOGI from = %xh tq = %x\n", tq->d_id.b24, tq);
8796
8797 sp = ubp->ub_fca_private;
8798
8799 /* Set header. */
8800 ubp->ub_frame.d_id = ha->d_id.b24;
8801 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8802 ubp->ub_frame.s_id = tq->d_id.b24;
8803 ubp->ub_frame.rsvd = 0;
8804 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8805 F_CTL_SEQ_INITIATIVE;
8806 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8807 ubp->ub_frame.seq_cnt = 0;
8808 ubp->ub_frame.df_ctl = 0;
8809 ubp->ub_frame.seq_id = 0;
8810 ubp->ub_frame.rx_id = 0xffff;
8811 ubp->ub_frame.ox_id = 0xffff;
8812
8813 /* set payload. */
8814 payload = (la_els_logi_t *)ubp->ub_buffer;
8815 bzero(payload, sizeof (payload));
8816
8817 payload->ls_code.ls_code = LA_ELS_PLOGI;
8818 payload->common_service.fcph_version = 0x2006;
8819 payload->common_service.cmn_features = 0x8800;
8820
8821 CFG_IST(ha, CFG_CTRL_24258081) ?
8822 (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8823 ha->init_ctrl_blk.cb24.max_frame_length[0],
8824 ha->init_ctrl_blk.cb24.max_frame_length[1])) :
8825 (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8826 ha->init_ctrl_blk.cb.max_frame_length[0],
8827 ha->init_ctrl_blk.cb.max_frame_length[1]));
8828
8829 payload->common_service.conc_sequences = 0xff;
8830 payload->common_service.relative_offset = 0x03;
8831 payload->common_service.e_d_tov = 0x7d0;
8832
8833 bcopy((void *)&tq->port_name[0],
8834 (void *)&payload->nport_ww_name.raw_wwn[0], 8);
8835
8836 bcopy((void *)&tq->node_name[0],
8837 (void *)&payload->node_ww_name.raw_wwn[0], 8);
8838
8839 class3_param = (class_svc_param_t *)&payload->class_3;
8840 class3_param->class_valid_svc_opt = 0x8000;
8841 class3_param->recipient_ctl = tq->class3_recipient_ctl;
8842 class3_param->rcv_data_size = tq->class3_rcv_data_size;
8843 class3_param->conc_sequences = tq->class3_conc_sequences;
8844 class3_param->open_sequences_per_exch =
8845 tq->class3_open_sequences_per_exch;
8846
8847 QL_UB_LOCK(ha);
8848 sp->flags |= SRB_UB_CALLBACK;
8849 QL_UB_UNLOCK(ha);
8850
8851 ql_isp_els_handle_endian(ha, (uint8_t *)payload, LA_ELS_PLOGI);
8852
8853 if (done_q) {
8854 ql_add_link_b(done_q, &sp->cmd);
8855 } else {
8856 ql_awaken_task_daemon(ha, sp, 0, 0);
8857 }
8858
8859 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8860
8861 return (QL_SUCCESS);
8862 }
8863
8864 /*
8865 * Abort outstanding commands in the Firmware, clear internally
8866 * queued commands in the driver, Synchronize the target with
8867 * the Firmware
8868 */
8869 int
8870 ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
8871 {
8872 ql_link_t *link, *link2;
8873 ql_lun_t *lq;
8874 int rval = QL_SUCCESS;
8875 ql_srb_t *sp;
8876 ql_head_t done_q = { NULL, NULL };
8877
8878 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
8879
8880 /*
8881 * First clear, internally queued commands
8882 */
8883 DEVICE_QUEUE_LOCK(tq);
8884 for (link = tq->lun_queues.first; link != NULL; link = link->next) {
8885 lq = link->base_address;
8886
8887 link2 = lq->cmd.first;
8888 while (link2 != NULL) {
8889 sp = link2->base_address;
8890 link2 = link2->next;
8891
8892 if (sp->flags & SRB_ABORT) {
8893 continue;
8894 }
8895
8896 /* Remove srb from device command queue. */
8897 ql_remove_link(&lq->cmd, &sp->cmd);
8898 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8899
8900 /* Set ending status. */
8901 sp->pkt->pkt_reason = CS_ABORTED;
8902
8903 /* Call done routine to handle completions. */
8904 ql_add_link_b(&done_q, &sp->cmd);
8905 }
8906 }
8907 DEVICE_QUEUE_UNLOCK(tq);
8908
8909 if (done_q.first != NULL) {
8910 ql_done(done_q.first);
8911 }
8912
8913 if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
8914 rval = ql_abort_target(ha, tq, 0);
8915 }
8916
8917 if (rval != QL_SUCCESS) {
8918 EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
8919 } else {
8920 /*EMPTY*/
8921 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
8922 ha->vp_index);
8923 }
8924
8925 return (rval);
8926 }
8927
8928 /*
8929 * ql_rcv_rscn_els
8930 * Processes received RSCN extended link service.
8931 *
8932 * Input:
8933 * ha: adapter state pointer.
8934 * mb: array containing input mailbox registers.
8935 * done_q: done queue pointer.
8936 *
8937 * Context:
8938 * Interrupt or Kernel context, no mailbox commands allowed.
8939 */
8940 void
8941 ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
8942 {
8943 fc_unsol_buf_t *ubp;
8944 ql_srb_t *sp;
8945 fc_rscn_t *rn;
8946 fc_affected_id_t *af;
8947 port_id_t d_id;
8948
8949 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8950
8951 /* Locate a buffer to use. */
8952 ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8953 if (ubp != NULL) {
8954 sp = ubp->ub_fca_private;
8955
8956 /* Set header. */
8957 ubp->ub_frame.d_id = ha->d_id.b24;
8958 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8959 ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
8960 ubp->ub_frame.rsvd = 0;
8961 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8962 F_CTL_SEQ_INITIATIVE;
8963 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8964 ubp->ub_frame.seq_cnt = 0;
8965 ubp->ub_frame.df_ctl = 0;
8966 ubp->ub_frame.seq_id = 0;
8967 ubp->ub_frame.rx_id = 0xffff;
8968 ubp->ub_frame.ox_id = 0xffff;
8969
8970 /* set payload. */
8971 rn = (fc_rscn_t *)ubp->ub_buffer;
8972 af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8973
8974 rn->rscn_code = LA_ELS_RSCN;
8975 rn->rscn_len = 4;
8976 rn->rscn_payload_len = 8;
8977 d_id.b.al_pa = LSB(mb[2]);
8978 d_id.b.area = MSB(mb[2]);
8979 d_id.b.domain = LSB(mb[1]);
8980 af->aff_d_id = d_id.b24;
8981 af->aff_format = MSB(mb[1]);
8982
8983 EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
8984 af->aff_d_id);
8985
8986 ql_update_rscn(ha, af);
8987
8988 QL_UB_LOCK(ha);
8989 sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
8990 QL_UB_UNLOCK(ha);
8991 ql_add_link_b(done_q, &sp->cmd);
8992 }
8993
8994 if (ubp == NULL) {
8995 EL(ha, "Failed, get_unsolicited_buffer\n");
8996 } else {
8997 /*EMPTY*/
8998 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8999 }
9000 }
9001
9002 /*
9003 * ql_update_rscn
9004 * Update devices from received RSCN.
9005 *
9006 * Input:
9007 * ha: adapter state pointer.
9008 * af: pointer to RSCN data.
9009 *
9010 * Context:
9011 * Interrupt or Kernel context, no mailbox commands allowed.
9012 */
9013 static void
9014 ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9015 {
9016 ql_link_t *link;
9017 uint16_t index;
9018 ql_tgt_t *tq;
9019
9020 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9021
9022 if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9023 port_id_t d_id;
9024
9025 d_id.r.rsvd_1 = 0;
9026 d_id.b24 = af->aff_d_id;
9027
9028 tq = ql_d_id_to_queue(ha, d_id);
9029 if (tq) {
9030 EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
9031 DEVICE_QUEUE_LOCK(tq);
9032 tq->flags |= TQF_RSCN_RCVD;
9033 DEVICE_QUEUE_UNLOCK(tq);
9034 }
9035 QL_PRINT_3(CE_CONT, "(%d): FC_RSCN_PORT_ADDRESS done\n",
9036 ha->instance);
9037
9038 return;
9039 }
9040
9041 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9042 for (link = ha->dev[index].first; link != NULL;
9043 link = link->next) {
9044 tq = link->base_address;
9045
9046 switch (af->aff_format) {
9047 case FC_RSCN_FABRIC_ADDRESS:
9048 if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9049 EL(ha, "SD_RSCN_RCVD %xh RFA\n",
9050 tq->d_id.b24);
9051 DEVICE_QUEUE_LOCK(tq);
9052 tq->flags |= TQF_RSCN_RCVD;
9053 DEVICE_QUEUE_UNLOCK(tq);
9054 }
9055 break;
9056
9057 case FC_RSCN_AREA_ADDRESS:
9058 if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
9059 EL(ha, "SD_RSCN_RCVD %xh RAA\n",
9060 tq->d_id.b24);
9061 DEVICE_QUEUE_LOCK(tq);
9062 tq->flags |= TQF_RSCN_RCVD;
9063 DEVICE_QUEUE_UNLOCK(tq);
9064 }
9065 break;
9066
9067 case FC_RSCN_DOMAIN_ADDRESS:
9068 if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
9069 EL(ha, "SD_RSCN_RCVD %xh RDA\n",
9070 tq->d_id.b24);
9071 DEVICE_QUEUE_LOCK(tq);
9072 tq->flags |= TQF_RSCN_RCVD;
9073 DEVICE_QUEUE_UNLOCK(tq);
9074 }
9075 break;
9076
9077 default:
9078 break;
9079 }
9080 }
9081 }
9082 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9083 }
9084
9085 /*
9086 * ql_process_rscn
9087 *
9088 * Input:
9089 * ha: adapter state pointer.
9090 * af: RSCN payload pointer.
9091 *
9092 * Context:
9093 * Kernel context.
9094 */
9095 static int
9096 ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9097 {
9098 int sendit;
9099 int sendup = 1;
9100 ql_link_t *link;
9101 uint16_t index;
9102 ql_tgt_t *tq;
9103
9104 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9105
9106 if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9107 port_id_t d_id;
9108
9109 d_id.r.rsvd_1 = 0;
9110 d_id.b24 = af->aff_d_id;
9111
9112 tq = ql_d_id_to_queue(ha, d_id);
9113 if (tq) {
9114 sendup = ql_process_rscn_for_device(ha, tq);
9115 }
9116
9117 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9118
9119 return (sendup);
9120 }
9121
9122 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9123 for (link = ha->dev[index].first; link != NULL;
9124 link = link->next) {
9125
9126 tq = link->base_address;
9127 if (tq == NULL) {
9128 continue;
9129 }
9130
9131 switch (af->aff_format) {
9132 case FC_RSCN_FABRIC_ADDRESS:
9133 if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9134 sendit = ql_process_rscn_for_device(
9135 ha, tq);
9136 if (sendup) {
9137 sendup = sendit;
9138 }
9139 }
9140 break;
9141
9142 case FC_RSCN_AREA_ADDRESS:
9143 if ((tq->d_id.b24 & 0xffff00) ==
9144 af->aff_d_id) {
9145 sendit = ql_process_rscn_for_device(
9146 ha, tq);
9147
9148 if (sendup) {
9149 sendup = sendit;
9150 }
9151 }
9152 break;
9153
9154 case FC_RSCN_DOMAIN_ADDRESS:
9155 if ((tq->d_id.b24 & 0xff0000) ==
9156 af->aff_d_id) {
9157 sendit = ql_process_rscn_for_device(
9158 ha, tq);
9159
9160 if (sendup) {
9161 sendup = sendit;
9162 }
9163 }
9164 break;
9165
9166 default:
9167 break;
9168 }
9169 }
9170 }
9171
9172 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9173
9174 return (sendup);
9175 }
9176
9177 /*
9178 * ql_process_rscn_for_device
9179 *
9180 * Input:
9181 * ha: adapter state pointer.
9182 * tq: target queue pointer.
9183 *
9184 * Context:
9185 * Kernel context.
9186 */
9187 static int
9188 ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9189 {
9190 int sendup = 1;
9191
9192 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9193
9194 DEVICE_QUEUE_LOCK(tq);
9195
9196 /*
9197 * Let FCP-2 compliant devices continue I/Os
9198 * with their low level recoveries.
9199 */
9200 if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
9201 (tq->prli_svc_param_word_3 & PRLI_W3_RETRY)) {
9202 /*
9203 * Cause ADISC to go out
9204 */
9205 DEVICE_QUEUE_UNLOCK(tq);
9206
9207 (void) ql_get_port_database(ha, tq, PDF_NONE);
9208
9209 DEVICE_QUEUE_LOCK(tq);
9210 tq->flags &= ~TQF_RSCN_RCVD;
9211
9212 } else if (tq->loop_id != PORT_NO_LOOP_ID) {
9213 if (tq->d_id.b24 != BROADCAST_ADDR) {
9214 tq->flags |= TQF_NEED_AUTHENTICATION;
9215 }
9216
9217 DEVICE_QUEUE_UNLOCK(tq);
9218
9219 (void) ql_abort_device(ha, tq, 1);
9220
9221 DEVICE_QUEUE_LOCK(tq);
9222
9223 if (tq->outcnt) {
9224 sendup = 0;
9225 } else {
9226 tq->flags &= ~TQF_RSCN_RCVD;
9227 }
9228 } else {
9229 tq->flags &= ~TQF_RSCN_RCVD;
9230 }
9231
9232 if (sendup) {
9233 if (tq->d_id.b24 != BROADCAST_ADDR) {
9234 tq->flags |= TQF_NEED_AUTHENTICATION;
9235 }
9236 }
9237
9238 DEVICE_QUEUE_UNLOCK(tq);
9239
9240 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9241
9242 return (sendup);
9243 }
9244
9245 static int
9246 ql_handle_rscn_update(ql_adapter_state_t *ha)
9247 {
9248 int rval;
9249 ql_tgt_t *tq;
9250 uint16_t index, loop_id;
9251 ql_dev_id_list_t *list;
9252 uint32_t list_size;
9253 port_id_t d_id;
9254 ql_mbx_data_t mr;
9255 ql_head_t done_q = { NULL, NULL };
9256
9257 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9258
9259 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
9260 list = kmem_zalloc(list_size, KM_SLEEP);
9261 if (list == NULL) {
9262 rval = QL_MEMORY_ALLOC_FAILED;
9263 EL(ha, "kmem_zalloc failed=%xh\n", rval);
9264 return (rval);
9265 }
9266
9267 /*
9268 * Get data from RISC code d_id list to init each device queue.
9269 */
9270 rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
9271 if (rval != QL_SUCCESS) {
9272 kmem_free(list, list_size);
9273 EL(ha, "get_id_list failed=%xh\n", rval);
9274 return (rval);
9275 }
9276
9277 /* Acquire adapter state lock. */
9278 ADAPTER_STATE_LOCK(ha);
9279
9280 /* Check for new devices */
9281 for (index = 0; index < mr.mb[1]; index++) {
9282 ql_dev_list(ha, list, index, &d_id, &loop_id);
9283
9284 if (VALID_DEVICE_ID(ha, loop_id)) {
9285 d_id.r.rsvd_1 = 0;
9286
9287 tq = ql_d_id_to_queue(ha, d_id);
9288 if (tq != NULL) {
9289 continue;
9290 }
9291
9292 tq = ql_dev_init(ha, d_id, loop_id);
9293
9294 /* Test for fabric device. */
9295 if (d_id.b.domain != ha->d_id.b.domain ||
9296 d_id.b.area != ha->d_id.b.area) {
9297 tq->flags |= TQF_FABRIC_DEVICE;
9298 }
9299
9300 ADAPTER_STATE_UNLOCK(ha);
9301 if (ql_get_port_database(ha, tq, PDF_NONE) !=
9302 QL_SUCCESS) {
9303 tq->loop_id = PORT_NO_LOOP_ID;
9304 }
9305 ADAPTER_STATE_LOCK(ha);
9306
9307 /*
9308 * Send up a PLOGI about the new device
9309 */
9310 if (VALID_DEVICE_ID(ha, tq->loop_id)) {
9311 (void) ql_send_plogi(ha, tq, &done_q);
9312 }
9313 }
9314 }
9315
9316 /* Release adapter state lock. */
9317 ADAPTER_STATE_UNLOCK(ha);
9318
9319 if (done_q.first != NULL) {
9320 ql_done(done_q.first);
9321 }
9322
9323 kmem_free(list, list_size);
9324
9325 if (rval != QL_SUCCESS) {
9326 EL(ha, "failed=%xh\n", rval);
9327 } else {
9328 /*EMPTY*/
9329 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9330 }
9331
9332 return (rval);
9333 }
9334
9335 /*
9336 * ql_free_unsolicited_buffer
9337 * Frees allocated buffer.
9338 *
9339 * Input:
9340 * ha = adapter state pointer.
9341 * index = buffer array index.
9342 * ADAPTER_STATE_LOCK must be already obtained.
9343 *
9344 * Context:
9345 * Kernel context.
9346 */
9347 static void
9348 ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9349 {
9350 ql_srb_t *sp;
9351 int status;
9352
9353 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9354
9355 sp = ubp->ub_fca_private;
9356 if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9357 /* Disconnect IP from system buffers. */
9358 if (ha->flags & IP_INITIALIZED) {
9359 ADAPTER_STATE_UNLOCK(ha);
9360 status = ql_shutdown_ip(ha);
9361 ADAPTER_STATE_LOCK(ha);
9362 if (status != QL_SUCCESS) {
9363 cmn_err(CE_WARN,
9364 "!Qlogic %s(%d): Failed to shutdown IP",
9365 QL_NAME, ha->instance);
9366 return;
9367 }
9368
9369 ha->flags &= ~IP_ENABLED;
9370 }
9371
9372 ql_free_phys(ha, &sp->ub_buffer);
9373 } else {
9374 kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9375 }
9376
9377 kmem_free(sp, sizeof (ql_srb_t));
9378 kmem_free(ubp, sizeof (fc_unsol_buf_t));
9379
9380 if (ha->ub_allocated != 0) {
9381 ha->ub_allocated--;
9382 }
9383
9384 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9385 }
9386
9387 /*
9388 * ql_get_unsolicited_buffer
9389 * Locates a free unsolicited buffer.
9390 *
9391 * Input:
9392 * ha = adapter state pointer.
9393 * type = buffer type.
9394 *
9395 * Returns:
9396 * Unsolicited buffer pointer.
9397 *
9398 * Context:
9399 * Interrupt or Kernel context, no mailbox commands allowed.
9400 */
9401 fc_unsol_buf_t *
9402 ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9403 {
9404 fc_unsol_buf_t *ubp;
9405 ql_srb_t *sp;
9406 uint16_t index;
9407
9408 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9409
9410 /* Locate a buffer to use. */
9411 ubp = NULL;
9412
9413 QL_UB_LOCK(ha);
9414 for (index = 0; index < QL_UB_LIMIT; index++) {
9415 ubp = ha->ub_array[index];
9416 if (ubp != NULL) {
9417 sp = ubp->ub_fca_private;
9418 if ((sp->ub_type == type) &&
9419 (sp->flags & SRB_UB_IN_FCA) &&
9420 (!(sp->flags & (SRB_UB_CALLBACK |
9421 SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9422 sp->flags |= SRB_UB_ACQUIRED;
9423 ubp->ub_resp_flags = 0;
9424 break;
9425 }
9426 ubp = NULL;
9427 }
9428 }
9429 QL_UB_UNLOCK(ha);
9430
9431 if (ubp) {
9432 ubp->ub_resp_token = NULL;
9433 ubp->ub_class = FC_TRAN_CLASS3;
9434 }
9435
9436 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9437
9438 return (ubp);
9439 }
9440
9441 /*
9442 * ql_ub_frame_hdr
9443 * Processes received unsolicited buffers from ISP.
9444 *
9445 * Input:
9446 * ha: adapter state pointer.
9447 * tq: target queue pointer.
9448 * index: unsolicited buffer array index.
9449 * done_q: done queue pointer.
9450 *
9451 * Returns:
9452 * ql local function return status code.
9453 *
9454 * Context:
9455 * Interrupt or Kernel context, no mailbox commands allowed.
9456 */
9457 int
9458 ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9459 ql_head_t *done_q)
9460 {
9461 fc_unsol_buf_t *ubp;
9462 ql_srb_t *sp;
9463 uint16_t loop_id;
9464 int rval = QL_FUNCTION_FAILED;
9465
9466 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9467
9468 QL_UB_LOCK(ha);
9469 if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9470 EL(ha, "Invalid buffer index=%xh\n", index);
9471 QL_UB_UNLOCK(ha);
9472 return (rval);
9473 }
9474
9475 sp = ubp->ub_fca_private;
9476 if (sp->flags & SRB_UB_FREE_REQUESTED) {
9477 EL(ha, "buffer freed index=%xh\n", index);
9478 sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9479 SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9480
9481 sp->flags |= SRB_UB_IN_FCA;
9482
9483 QL_UB_UNLOCK(ha);
9484 return (rval);
9485 }
9486
9487 if ((sp->handle == index) &&
9488 (sp->flags & SRB_UB_IN_ISP) &&
9489 (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9490 (!(sp->flags & SRB_UB_ACQUIRED))) {
9491 /* set broadcast D_ID */
9492 loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
9493 BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9494 if (tq->ub_loop_id == loop_id) {
9495 if (ha->topology & QL_FL_PORT) {
9496 ubp->ub_frame.d_id = 0x000000;
9497 } else {
9498 ubp->ub_frame.d_id = 0xffffff;
9499 }
9500 } else {
9501 ubp->ub_frame.d_id = ha->d_id.b24;
9502 }
9503 ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9504 ubp->ub_frame.rsvd = 0;
9505 ubp->ub_frame.s_id = tq->d_id.b24;
9506 ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9507 ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9508 ubp->ub_frame.df_ctl = 0;
9509 ubp->ub_frame.seq_id = tq->ub_seq_id;
9510 ubp->ub_frame.rx_id = 0xffff;
9511 ubp->ub_frame.ox_id = 0xffff;
9512 ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9513 sp->ub_size : tq->ub_sequence_length;
9514 ubp->ub_frame.ro = tq->ub_frame_ro;
9515
9516 tq->ub_sequence_length = (uint16_t)
9517 (tq->ub_sequence_length - ubp->ub_bufsize);
9518 tq->ub_frame_ro += ubp->ub_bufsize;
9519 tq->ub_seq_cnt++;
9520
9521 if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9522 if (tq->ub_seq_cnt == 1) {
9523 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9524 F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9525 } else {
9526 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9527 F_CTL_END_SEQ;
9528 }
9529 tq->ub_total_seg_cnt = 0;
9530 } else if (tq->ub_seq_cnt == 1) {
9531 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9532 F_CTL_FIRST_SEQ;
9533 ubp->ub_frame.df_ctl = 0x20;
9534 }
9535
9536 QL_PRINT_3(CE_CONT, "(%d): ub_frame.d_id=%xh\n",
9537 ha->instance, ubp->ub_frame.d_id);
9538 QL_PRINT_3(CE_CONT, "(%d): ub_frame.s_id=%xh\n",
9539 ha->instance, ubp->ub_frame.s_id);
9540 QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_cnt=%xh\n",
9541 ha->instance, ubp->ub_frame.seq_cnt);
9542 QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_id=%xh\n",
9543 ha->instance, ubp->ub_frame.seq_id);
9544 QL_PRINT_3(CE_CONT, "(%d): ub_frame.ro=%xh\n",
9545 ha->instance, ubp->ub_frame.ro);
9546 QL_PRINT_3(CE_CONT, "(%d): ub_frame.f_ctl=%xh\n",
9547 ha->instance, ubp->ub_frame.f_ctl);
9548 QL_PRINT_3(CE_CONT, "(%d): ub_bufsize=%xh\n",
9549 ha->instance, ubp->ub_bufsize);
9550 QL_DUMP_3(ubp->ub_buffer, 8,
9551 ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9552
9553 sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9554 ql_add_link_b(done_q, &sp->cmd);
9555 rval = QL_SUCCESS;
9556 } else {
9557 if (sp->handle != index) {
9558 EL(ha, "Bad index=%xh, expect=%xh\n", index,
9559 sp->handle);
9560 }
9561 if ((sp->flags & SRB_UB_IN_ISP) == 0) {
9562 EL(ha, "buffer was already in driver, index=%xh\n",
9563 index);
9564 }
9565 if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
9566 EL(ha, "buffer was not an IP buffer, index=%xh\n",
9567 index);
9568 }
9569 if (sp->flags & SRB_UB_ACQUIRED) {
9570 EL(ha, "buffer was being used by driver, index=%xh\n",
9571 index);
9572 }
9573 }
9574 QL_UB_UNLOCK(ha);
9575
9576 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9577
9578 return (rval);
9579 }
9580
9581 /*
9582 * ql_timer
9583 * One second timer function.
9584 *
9585 * Input:
9586 * ql_hba.first = first link in adapter list.
9587 *
9588 * Context:
9589 * Interrupt context, no mailbox commands allowed.
9590 */
9591 static void
9592 ql_timer(void *arg)
9593 {
9594 ql_link_t *link;
9595 uint32_t set_flags;
9596 uint32_t reset_flags;
9597 ql_adapter_state_t *ha = NULL, *vha;
9598
9599 QL_PRINT_6(CE_CONT, "started\n");
9600
9601 /* Acquire global state lock. */
9602 GLOBAL_STATE_LOCK();
9603 if (ql_timer_timeout_id == NULL) {
9604 /* Release global state lock. */
9605 GLOBAL_STATE_UNLOCK();
9606 return;
9607 }
9608
9609 for (link = ql_hba.first; link != NULL; link = link->next) {
9610 ha = link->base_address;
9611
9612 /* Skip adapter if suspended of stalled. */
9613 ADAPTER_STATE_LOCK(ha);
9614 if (ha->flags & ADAPTER_SUSPENDED ||
9615 ha->task_daemon_flags & DRIVER_STALL) {
9616 ADAPTER_STATE_UNLOCK(ha);
9617 continue;
9618 }
9619 ha->flags |= ADAPTER_TIMER_BUSY;
9620 ADAPTER_STATE_UNLOCK(ha);
9621
9622 QL_PM_LOCK(ha);
9623 if (ha->power_level != PM_LEVEL_D0) {
9624 QL_PM_UNLOCK(ha);
9625
9626 ADAPTER_STATE_LOCK(ha);
9627 ha->flags &= ~ADAPTER_TIMER_BUSY;
9628 ADAPTER_STATE_UNLOCK(ha);
9629 continue;
9630 }
9631 ha->busy++;
9632 QL_PM_UNLOCK(ha);
9633
9634 set_flags = 0;
9635 reset_flags = 0;
9636
9637 /* Port retry timer handler. */
9638 if (LOOP_READY(ha)) {
9639 ADAPTER_STATE_LOCK(ha);
9640 if (ha->port_retry_timer != 0) {
9641 ha->port_retry_timer--;
9642 if (ha->port_retry_timer == 0) {
9643 set_flags |= PORT_RETRY_NEEDED;
9644 }
9645 }
9646 ADAPTER_STATE_UNLOCK(ha);
9647 }
9648
9649 /* Loop down timer handler. */
9650 if (LOOP_RECONFIGURE(ha) == 0) {
9651 if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
9652 ha->loop_down_timer--;
9653 /*
9654 * give the firmware loop down dump flag
9655 * a chance to work.
9656 */
9657 if (ha->loop_down_timer == LOOP_DOWN_RESET) {
9658 if (CFG_IST(ha,
9659 CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
9660 (void) ql_binary_fw_dump(ha,
9661 TRUE);
9662 }
9663 EL(ha, "loop_down_reset, "
9664 "isp_abort_needed\n");
9665 set_flags |= ISP_ABORT_NEEDED;
9666 }
9667 }
9668 if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
9669 /* Command abort time handler. */
9670 if (ha->loop_down_timer ==
9671 ha->loop_down_abort_time) {
9672 ADAPTER_STATE_LOCK(ha);
9673 ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
9674 ADAPTER_STATE_UNLOCK(ha);
9675 set_flags |= ABORT_QUEUES_NEEDED;
9676 EL(ha, "loop_down_abort_time, "
9677 "abort_queues_needed\n");
9678 }
9679
9680 /* Watchdog timer handler. */
9681 if (ha->watchdog_timer == 0) {
9682 ha->watchdog_timer = WATCHDOG_TIME;
9683 } else if (LOOP_READY(ha)) {
9684 ha->watchdog_timer--;
9685 if (ha->watchdog_timer == 0) {
9686 for (vha = ha; vha != NULL;
9687 vha = vha->vp_next) {
9688 ql_watchdog(vha,
9689 &set_flags,
9690 &reset_flags);
9691 }
9692 ha->watchdog_timer =
9693 WATCHDOG_TIME;
9694 }
9695 }
9696 }
9697 }
9698
9699 /* Idle timer handler. */
9700 if (!DRIVER_SUSPENDED(ha)) {
9701 if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
9702 #if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
9703 set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
9704 #endif
9705 ha->idle_timer = 0;
9706 }
9707 if (ha->send_plogi_timer != NULL) {
9708 ha->send_plogi_timer--;
9709 if (ha->send_plogi_timer == NULL) {
9710 set_flags |= SEND_PLOGI;
9711 }
9712 }
9713 }
9714 ADAPTER_STATE_LOCK(ha);
9715 if (ha->idc_restart_timer != 0) {
9716 ha->idc_restart_timer--;
9717 if (ha->idc_restart_timer == 0) {
9718 ha->idc_restart_cnt = 0;
9719 reset_flags |= DRIVER_STALL;
9720 }
9721 }
9722 if (ha->idc_flash_acc_timer != 0) {
9723 ha->idc_flash_acc_timer--;
9724 if (ha->idc_flash_acc_timer == 0 &&
9725 ha->idc_flash_acc != 0) {
9726 ha->idc_flash_acc = 1;
9727 ha->idc_mb[0] = MBA_IDC_NOTIFICATION;
9728 ha->idc_mb[1] = 0;
9729 ha->idc_mb[2] = IDC_OPC_DRV_START;
9730 set_flags |= IDC_EVENT;
9731 }
9732 }
9733 ADAPTER_STATE_UNLOCK(ha);
9734
9735 if (set_flags != 0 || reset_flags != 0) {
9736 ql_awaken_task_daemon(ha, NULL, set_flags,
9737 reset_flags);
9738 }
9739
9740 if (ha->xioctl->ledstate.BeaconState == BEACON_ON) {
9741 ql_blink_led(ha);
9742 }
9743
9744 /* Update the IO stats */
9745 if (ha->xioctl->IOInputByteCnt >= 0x100000) {
9746 ha->xioctl->IOInputMByteCnt +=
9747 (ha->xioctl->IOInputByteCnt / 0x100000);
9748 ha->xioctl->IOInputByteCnt %= 0x100000;
9749 }
9750
9751 if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
9752 ha->xioctl->IOOutputMByteCnt +=
9753 (ha->xioctl->IOOutputByteCnt / 0x100000);
9754 ha->xioctl->IOOutputByteCnt %= 0x100000;
9755 }
9756
9757 if (CFG_IST(ha, CFG_CTRL_8021)) {
9758 (void) ql_8021_idc_handler(ha);
9759 }
9760
9761 ADAPTER_STATE_LOCK(ha);
9762 ha->flags &= ~ADAPTER_TIMER_BUSY;
9763 ADAPTER_STATE_UNLOCK(ha);
9764
9765 QL_PM_LOCK(ha);
9766 ha->busy--;
9767 QL_PM_UNLOCK(ha);
9768 }
9769
9770 /* Restart timer, if not being stopped. */
9771 if (ql_timer_timeout_id != NULL) {
9772 ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
9773 }
9774
9775 /* Release global state lock. */
9776 GLOBAL_STATE_UNLOCK();
9777
9778 QL_PRINT_6(CE_CONT, "done\n");
9779 }
9780
9781 /*
9782 * ql_timeout_insert
9783 * Function used to insert a command block onto the
9784 * watchdog timer queue.
9785 *
9786 * Note: Must insure that pkt_time is not zero
9787 * before calling ql_timeout_insert.
9788 *
9789 * Input:
9790 * ha: adapter state pointer.
9791 * tq: target queue pointer.
9792 * sp: SRB pointer.
9793 * DEVICE_QUEUE_LOCK must be already obtained.
9794 *
9795 * Context:
9796 * Kernel context.
9797 */
9798 /* ARGSUSED */
9799 static void
9800 ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
9801 {
9802 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9803
9804 if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
9805 sp->isp_timeout = (uint16_t)(sp->pkt->pkt_timeout);
9806 /*
9807 * The WATCHDOG_TIME must be rounded up + 1. As an example,
9808 * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
9809 * will expire in the next watchdog call, which could be in
9810 * 1 microsecond.
9811 *
9812 */
9813 sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
9814 WATCHDOG_TIME;
9815 /*
9816 * Added an additional 10 to account for the
9817 * firmware timer drift which can occur with
9818 * very long timeout values.
9819 */
9820 sp->wdg_q_time += 10;
9821
9822 /*
9823 * Add 6 more to insure watchdog does not timeout at the same
9824 * time as ISP RISC code timeout.
9825 */
9826 sp->wdg_q_time += 6;
9827
9828 /* Save initial time for resetting watchdog time. */
9829 sp->init_wdg_q_time = sp->wdg_q_time;
9830
9831 /* Insert command onto watchdog queue. */
9832 ql_add_link_b(&tq->wdg, &sp->wdg);
9833
9834 sp->flags |= SRB_WATCHDOG_ENABLED;
9835 } else {
9836 sp->isp_timeout = 0;
9837 sp->wdg_q_time = 0;
9838 sp->init_wdg_q_time = 0;
9839 }
9840
9841 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9842 }
9843
9844 /*
9845 * ql_watchdog
9846 * Timeout handler that runs in interrupt context. The
9847 * ql_adapter_state_t * argument is the parameter set up when the
9848 * timeout was initialized (state structure pointer).
9849 * Function used to update timeout values and if timeout
9850 * has occurred command will be aborted.
9851 *
9852 * Input:
9853 * ha: adapter state pointer.
9854 * set_flags: task daemon flags to set.
9855 * reset_flags: task daemon flags to reset.
9856 *
9857 * Context:
9858 * Interrupt context, no mailbox commands allowed.
9859 */
9860 static void
9861 ql_watchdog(ql_adapter_state_t *ha, uint32_t *set_flags, uint32_t *reset_flags)
9862 {
9863 ql_srb_t *sp;
9864 ql_link_t *link;
9865 ql_link_t *next_cmd;
9866 ql_link_t *next_device;
9867 ql_tgt_t *tq;
9868 ql_lun_t *lq;
9869 uint16_t index;
9870 int q_sane;
9871
9872 QL_PRINT_6(CE_CONT, "(%d): started\n", ha->instance);
9873
9874 /* Loop through all targets. */
9875 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9876 for (link = ha->dev[index].first; link != NULL;
9877 link = next_device) {
9878 tq = link->base_address;
9879
9880 /* Try to acquire device queue lock. */
9881 if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
9882 next_device = NULL;
9883 continue;
9884 }
9885
9886 next_device = link->next;
9887
9888 if (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) &&
9889 (tq->port_down_retry_count == 0)) {
9890 /* Release device queue lock. */
9891 DEVICE_QUEUE_UNLOCK(tq);
9892 continue;
9893 }
9894
9895 /* Find out if this device is in a sane state. */
9896 if (tq->flags & (TQF_RSCN_RCVD |
9897 TQF_NEED_AUTHENTICATION | TQF_QUEUE_SUSPENDED)) {
9898 q_sane = 0;
9899 } else {
9900 q_sane = 1;
9901 }
9902 /* Loop through commands on watchdog queue. */
9903 for (link = tq->wdg.first; link != NULL;
9904 link = next_cmd) {
9905 next_cmd = link->next;
9906 sp = link->base_address;
9907 lq = sp->lun_queue;
9908
9909 /*
9910 * For SCSI commands, if everything seems to
9911 * be going fine and this packet is stuck
9912 * because of throttling at LUN or target
9913 * level then do not decrement the
9914 * sp->wdg_q_time
9915 */
9916 if (ha->task_daemon_flags & STATE_ONLINE &&
9917 (sp->flags & SRB_ISP_STARTED) == 0 &&
9918 q_sane && sp->flags & SRB_FCP_CMD_PKT &&
9919 lq->lun_outcnt >= ha->execution_throttle) {
9920 continue;
9921 }
9922
9923 if (sp->wdg_q_time != 0) {
9924 sp->wdg_q_time--;
9925
9926 /* Timeout? */
9927 if (sp->wdg_q_time != 0) {
9928 continue;
9929 }
9930
9931 ql_remove_link(&tq->wdg, &sp->wdg);
9932 sp->flags &= ~SRB_WATCHDOG_ENABLED;
9933
9934 if (sp->flags & SRB_ISP_STARTED) {
9935 ql_cmd_timeout(ha, tq, sp,
9936 set_flags, reset_flags);
9937
9938 DEVICE_QUEUE_UNLOCK(tq);
9939 tq = NULL;
9940 next_cmd = NULL;
9941 next_device = NULL;
9942 index = DEVICE_HEAD_LIST_SIZE;
9943 } else {
9944 ql_cmd_timeout(ha, tq, sp,
9945 set_flags, reset_flags);
9946 }
9947 }
9948 }
9949
9950 /* Release device queue lock. */
9951 if (tq != NULL) {
9952 DEVICE_QUEUE_UNLOCK(tq);
9953 }
9954 }
9955 }
9956
9957 QL_PRINT_6(CE_CONT, "(%d): done\n", ha->instance);
9958 }
9959
9960 /*
9961 * ql_cmd_timeout
9962 * Command timeout handler.
9963 *
9964 * Input:
9965 * ha: adapter state pointer.
9966 * tq: target queue pointer.
9967 * sp: SRB pointer.
9968 * set_flags: task daemon flags to set.
9969 * reset_flags: task daemon flags to reset.
9970 *
9971 * Context:
9972 * Interrupt context, no mailbox commands allowed.
9973 */
9974 /* ARGSUSED */
9975 static void
9976 ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp,
9977 uint32_t *set_flags, uint32_t *reset_flags)
9978 {
9979 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9980
9981 if (!(sp->flags & SRB_ISP_STARTED)) {
9982
9983 EL(ha, "command timed out in driver = %ph\n", (void *)sp);
9984
9985 REQUEST_RING_LOCK(ha);
9986
9987 /* if it's on a queue */
9988 if (sp->cmd.head) {
9989 /*
9990 * The pending_cmds que needs to be
9991 * protected by the ring lock
9992 */
9993 ql_remove_link(sp->cmd.head, &sp->cmd);
9994 }
9995 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
9996
9997 /* Release device queue lock. */
9998 REQUEST_RING_UNLOCK(ha);
9999 DEVICE_QUEUE_UNLOCK(tq);
10000
10001 /* Set timeout status */
10002 sp->pkt->pkt_reason = CS_TIMEOUT;
10003
10004 /* Ensure no retry */
10005 sp->flags &= ~SRB_RETRY;
10006
10007 /* Call done routine to handle completion. */
10008 ql_done(&sp->cmd);
10009
10010 DEVICE_QUEUE_LOCK(tq);
10011 } else if (CFG_IST(ha, CFG_CTRL_8021)) {
10012 int rval;
10013 uint32_t index;
10014
10015 EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10016 "spf=%xh\n", (void *)sp,
10017 (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10018 sp->handle & OSC_INDEX_MASK, sp->flags);
10019
10020 DEVICE_QUEUE_UNLOCK(tq);
10021
10022 INTR_LOCK(ha);
10023 ha->pha->xioctl->ControllerErrorCount++;
10024 if (sp->handle) {
10025 ha->pha->timeout_cnt++;
10026 index = sp->handle & OSC_INDEX_MASK;
10027 if (ha->pha->outstanding_cmds[index] == sp) {
10028 sp->request_ring_ptr->entry_type =
10029 INVALID_ENTRY_TYPE;
10030 sp->request_ring_ptr->entry_count = 0;
10031 ha->pha->outstanding_cmds[index] = 0;
10032 }
10033 INTR_UNLOCK(ha);
10034
10035 rval = ql_abort_command(ha, sp);
10036 if (rval == QL_FUNCTION_TIMEOUT ||
10037 rval == QL_LOCK_TIMEOUT ||
10038 rval == QL_FUNCTION_PARAMETER_ERROR ||
10039 ha->pha->timeout_cnt > TIMEOUT_THRESHOLD) {
10040 *set_flags |= ISP_ABORT_NEEDED;
10041 EL(ha, "abort status=%xh, tc=%xh, isp_abort_"
10042 "needed\n", rval, ha->pha->timeout_cnt);
10043 }
10044
10045 sp->handle = 0;
10046 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10047 } else {
10048 INTR_UNLOCK(ha);
10049 }
10050
10051 /* Set timeout status */
10052 sp->pkt->pkt_reason = CS_TIMEOUT;
10053
10054 /* Ensure no retry */
10055 sp->flags &= ~SRB_RETRY;
10056
10057 /* Call done routine to handle completion. */
10058 ql_done(&sp->cmd);
10059
10060 DEVICE_QUEUE_LOCK(tq);
10061
10062 } else {
10063 EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10064 "spf=%xh, isp_abort_needed\n", (void *)sp,
10065 (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10066 sp->handle & OSC_INDEX_MASK, sp->flags);
10067
10068 /* Release device queue lock. */
10069 DEVICE_QUEUE_UNLOCK(tq);
10070
10071 INTR_LOCK(ha);
10072 ha->pha->xioctl->ControllerErrorCount++;
10073 INTR_UNLOCK(ha);
10074
10075 /* Set ISP needs to be reset */
10076 sp->flags |= SRB_COMMAND_TIMEOUT;
10077
10078 if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
10079 (void) ql_binary_fw_dump(ha, TRUE);
10080 }
10081
10082 *set_flags |= ISP_ABORT_NEEDED;
10083
10084 DEVICE_QUEUE_LOCK(tq);
10085 }
10086
10087 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10088 }
10089
10090 /*
10091 * ql_rst_aen
10092 * Processes asynchronous reset.
10093 *
10094 * Input:
10095 * ha = adapter state pointer.
10096 *
10097 * Context:
10098 * Kernel context.
10099 */
10100 static void
10101 ql_rst_aen(ql_adapter_state_t *ha)
10102 {
10103 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10104
10105 /* Issue marker command. */
10106 (void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
10107
10108 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10109 }
10110
10111 /*
10112 * ql_cmd_wait
10113 * Stall driver until all outstanding commands are returned.
10114 *
10115 * Input:
10116 * ha = adapter state pointer.
10117 *
10118 * Context:
10119 * Kernel context.
10120 */
10121 void
10122 ql_cmd_wait(ql_adapter_state_t *ha)
10123 {
10124 uint16_t index;
10125 ql_link_t *link;
10126 ql_tgt_t *tq;
10127 ql_adapter_state_t *vha;
10128
10129 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10130
10131 /* Wait for all outstanding commands to be returned. */
10132 (void) ql_wait_outstanding(ha);
10133
10134 /*
10135 * clear out internally queued commands
10136 */
10137 for (vha = ha; vha != NULL; vha = vha->vp_next) {
10138 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10139 for (link = vha->dev[index].first; link != NULL;
10140 link = link->next) {
10141 tq = link->base_address;
10142 if (tq &&
10143 (!(tq->prli_svc_param_word_3 &
10144 PRLI_W3_RETRY))) {
10145 (void) ql_abort_device(vha, tq, 0);
10146 }
10147 }
10148 }
10149 }
10150
10151 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10152 }
10153
10154 /*
10155 * ql_wait_outstanding
10156 * Wait for all outstanding commands to complete.
10157 *
10158 * Input:
10159 * ha = adapter state pointer.
10160 *
10161 * Returns:
10162 * index - the index for ql_srb into outstanding_cmds.
10163 *
10164 * Context:
10165 * Kernel context.
10166 */
10167 static uint16_t
10168 ql_wait_outstanding(ql_adapter_state_t *ha)
10169 {
10170 ql_srb_t *sp;
10171 uint16_t index, count;
10172
10173 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10174
10175 count = ql_osc_wait_count;
10176 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10177 if (ha->pha->pending_cmds.first != NULL) {
10178 ql_start_iocb(ha, NULL);
10179 index = 1;
10180 }
10181 if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
10182 (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
10183 if (count-- != 0) {
10184 ql_delay(ha, 10000);
10185 index = 0;
10186 } else {
10187 EL(ha, "failed, sp=%ph, oci=%d, hdl=%xh\n",
10188 (void *)sp, index, sp->handle);
10189 break;
10190 }
10191 }
10192 }
10193
10194 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10195
10196 return (index);
10197 }
10198
10199 /*
10200 * ql_restart_queues
10201 * Restart device queues.
10202 *
10203 * Input:
10204 * ha = adapter state pointer.
10205 * DEVICE_QUEUE_LOCK must be released.
10206 *
10207 * Context:
10208 * Interrupt or Kernel context, no mailbox commands allowed.
10209 */
10210 static void
10211 ql_restart_queues(ql_adapter_state_t *ha)
10212 {
10213 ql_link_t *link, *link2;
10214 ql_tgt_t *tq;
10215 ql_lun_t *lq;
10216 uint16_t index;
10217 ql_adapter_state_t *vha;
10218
10219 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10220
10221 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10222 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10223 for (link = vha->dev[index].first; link != NULL;
10224 link = link->next) {
10225 tq = link->base_address;
10226
10227 /* Acquire device queue lock. */
10228 DEVICE_QUEUE_LOCK(tq);
10229
10230 tq->flags &= ~TQF_QUEUE_SUSPENDED;
10231
10232 for (link2 = tq->lun_queues.first;
10233 link2 != NULL; link2 = link2->next) {
10234 lq = link2->base_address;
10235
10236 if (lq->cmd.first != NULL) {
10237 ql_next(vha, lq);
10238 DEVICE_QUEUE_LOCK(tq);
10239 }
10240 }
10241
10242 /* Release device queue lock. */
10243 DEVICE_QUEUE_UNLOCK(tq);
10244 }
10245 }
10246 }
10247
10248 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10249 }
10250
10251 /*
10252 * ql_iidma
10253 * Setup iiDMA parameters to firmware
10254 *
10255 * Input:
10256 * ha = adapter state pointer.
10257 * DEVICE_QUEUE_LOCK must be released.
10258 *
10259 * Context:
10260 * Interrupt or Kernel context, no mailbox commands allowed.
10261 */
10262 static void
10263 ql_iidma(ql_adapter_state_t *ha)
10264 {
10265 ql_link_t *link;
10266 ql_tgt_t *tq;
10267 uint16_t index;
10268 char buf[256];
10269 uint32_t data;
10270
10271 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10272
10273 if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
10274 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10275 return;
10276 }
10277
10278 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10279 for (link = ha->dev[index].first; link != NULL;
10280 link = link->next) {
10281 tq = link->base_address;
10282
10283 /* Acquire device queue lock. */
10284 DEVICE_QUEUE_LOCK(tq);
10285
10286 if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
10287 DEVICE_QUEUE_UNLOCK(tq);
10288 continue;
10289 }
10290
10291 tq->flags &= ~TQF_IIDMA_NEEDED;
10292
10293 if ((tq->loop_id > LAST_N_PORT_HDL) ||
10294 (tq->iidma_rate == IIDMA_RATE_NDEF)) {
10295 DEVICE_QUEUE_UNLOCK(tq);
10296 continue;
10297 }
10298
10299 /* Get the iiDMA persistent data */
10300 if (tq->iidma_rate == IIDMA_RATE_INIT) {
10301 (void) sprintf(buf,
10302 "iidma-rate-%02x%02x%02x%02x%02x"
10303 "%02x%02x%02x", tq->port_name[0],
10304 tq->port_name[1], tq->port_name[2],
10305 tq->port_name[3], tq->port_name[4],
10306 tq->port_name[5], tq->port_name[6],
10307 tq->port_name[7]);
10308
10309 if ((data = ql_get_prop(ha, buf)) ==
10310 0xffffffff) {
10311 tq->iidma_rate = IIDMA_RATE_NDEF;
10312 } else {
10313 switch (data) {
10314 case IIDMA_RATE_1GB:
10315 case IIDMA_RATE_2GB:
10316 case IIDMA_RATE_4GB:
10317 case IIDMA_RATE_10GB:
10318 tq->iidma_rate = data;
10319 break;
10320 case IIDMA_RATE_8GB:
10321 if (CFG_IST(ha,
10322 CFG_CTRL_25XX)) {
10323 tq->iidma_rate = data;
10324 } else {
10325 tq->iidma_rate =
10326 IIDMA_RATE_4GB;
10327 }
10328 break;
10329 default:
10330 EL(ha, "invalid data for "
10331 "parameter: %s: %xh\n",
10332 buf, data);
10333 tq->iidma_rate =
10334 IIDMA_RATE_NDEF;
10335 break;
10336 }
10337 }
10338 }
10339
10340 /* Set the firmware's iiDMA rate */
10341 if (tq->iidma_rate <= IIDMA_RATE_MAX &&
10342 !(CFG_IST(ha, CFG_CTRL_8081))) {
10343 data = ql_iidma_rate(ha, tq->loop_id,
10344 &tq->iidma_rate, EXT_IIDMA_MODE_SET);
10345 if (data != QL_SUCCESS) {
10346 EL(ha, "mbx failed: %xh\n", data);
10347 }
10348 }
10349
10350 /* Release device queue lock. */
10351 DEVICE_QUEUE_UNLOCK(tq);
10352 }
10353 }
10354
10355 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10356 }
10357
10358 /*
10359 * ql_abort_queues
10360 * Abort all commands on device queues.
10361 *
10362 * Input:
10363 * ha = adapter state pointer.
10364 *
10365 * Context:
10366 * Interrupt or Kernel context, no mailbox commands allowed.
10367 */
10368 static void
10369 ql_abort_queues(ql_adapter_state_t *ha)
10370 {
10371 ql_link_t *link;
10372 ql_tgt_t *tq;
10373 ql_srb_t *sp;
10374 uint16_t index;
10375 ql_adapter_state_t *vha;
10376
10377 QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10378
10379 /* Return all commands in outstanding command list. */
10380 INTR_LOCK(ha);
10381
10382 /* Place all commands in outstanding cmd list on device queue. */
10383 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10384 if (ha->pending_cmds.first != NULL) {
10385 INTR_UNLOCK(ha);
10386 ql_start_iocb(ha, NULL);
10387 /* Delay for system */
10388 ql_delay(ha, 10000);
10389 INTR_LOCK(ha);
10390 index = 1;
10391 }
10392 sp = ha->outstanding_cmds[index];
10393
10394 /* skip devices capable of FCP2 retrys */
10395 if ((sp != NULL) &&
10396 ((tq = sp->lun_queue->target_queue) != NULL) &&
10397 (!(tq->prli_svc_param_word_3 & PRLI_W3_RETRY))) {
10398 ha->outstanding_cmds[index] = NULL;
10399 sp->handle = 0;
10400 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10401
10402 INTR_UNLOCK(ha);
10403
10404 /* Set ending status. */
10405 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10406 sp->flags |= SRB_ISP_COMPLETED;
10407
10408 /* Call done routine to handle completions. */
10409 sp->cmd.next = NULL;
10410 ql_done(&sp->cmd);
10411
10412 INTR_LOCK(ha);
10413 }
10414 }
10415 INTR_UNLOCK(ha);
10416
10417 for (vha = ha; vha != NULL; vha = vha->vp_next) {
10418 QL_PRINT_10(CE_CONT, "(%d,%d): abort instance\n",
10419 vha->instance, vha->vp_index);
10420 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10421 for (link = vha->dev[index].first; link != NULL;
10422 link = link->next) {
10423 tq = link->base_address;
10424 /* skip devices capable of FCP2 retrys */
10425 if (!(tq->prli_svc_param_word_3 &
10426 PRLI_W3_RETRY)) {
10427 /*
10428 * Set port unavailable status and
10429 * return all commands on a devices
10430 * queues.
10431 */
10432 ql_abort_device_queues(ha, tq);
10433 }
10434 }
10435 }
10436 }
10437 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10438 }
10439
10440 /*
10441 * ql_abort_device_queues
10442 * Abort all commands on device queues.
10443 *
10444 * Input:
10445 * ha = adapter state pointer.
10446 *
10447 * Context:
10448 * Interrupt or Kernel context, no mailbox commands allowed.
10449 */
10450 static void
10451 ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq)
10452 {
10453 ql_link_t *lun_link, *cmd_link;
10454 ql_srb_t *sp;
10455 ql_lun_t *lq;
10456
10457 QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10458
10459 DEVICE_QUEUE_LOCK(tq);
10460
10461 for (lun_link = tq->lun_queues.first; lun_link != NULL;
10462 lun_link = lun_link->next) {
10463 lq = lun_link->base_address;
10464
10465 cmd_link = lq->cmd.first;
10466 while (cmd_link != NULL) {
10467 sp = cmd_link->base_address;
10468
10469 if (sp->flags & SRB_ABORT) {
10470 cmd_link = cmd_link->next;
10471 continue;
10472 }
10473
10474 /* Remove srb from device cmd queue. */
10475 ql_remove_link(&lq->cmd, &sp->cmd);
10476
10477 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10478
10479 DEVICE_QUEUE_UNLOCK(tq);
10480
10481 /* Set ending status. */
10482 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10483
10484 /* Call done routine to handle completion. */
10485 ql_done(&sp->cmd);
10486
10487 /* Delay for system */
10488 ql_delay(ha, 10000);
10489
10490 DEVICE_QUEUE_LOCK(tq);
10491 cmd_link = lq->cmd.first;
10492 }
10493 }
10494 DEVICE_QUEUE_UNLOCK(tq);
10495
10496 QL_PRINT_10(CE_CONT, "(%d): done\n", ha->instance);
10497 }
10498
10499 /*
10500 * ql_loop_resync
10501 * Resync with fibre channel devices.
10502 *
10503 * Input:
10504 * ha = adapter state pointer.
10505 * DEVICE_QUEUE_LOCK must be released.
10506 *
10507 * Returns:
10508 * ql local function return status code.
10509 *
10510 * Context:
10511 * Kernel context.
10512 */
10513 static int
10514 ql_loop_resync(ql_adapter_state_t *ha)
10515 {
10516 int rval;
10517
10518 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10519
10520 if (ha->flags & IP_INITIALIZED) {
10521 (void) ql_shutdown_ip(ha);
10522 }
10523
10524 rval = ql_fw_ready(ha, 10);
10525
10526 TASK_DAEMON_LOCK(ha);
10527 ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10528 TASK_DAEMON_UNLOCK(ha);
10529
10530 /* Set loop online, if it really is. */
10531 if (rval == QL_SUCCESS) {
10532 ql_loop_online(ha);
10533 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10534 } else {
10535 EL(ha, "failed, rval = %xh\n", rval);
10536 }
10537
10538 return (rval);
10539 }
10540
10541 /*
10542 * ql_loop_online
10543 * Set loop online status if it really is online.
10544 *
10545 * Input:
10546 * ha = adapter state pointer.
10547 * DEVICE_QUEUE_LOCK must be released.
10548 *
10549 * Context:
10550 * Kernel context.
10551 */
10552 void
10553 ql_loop_online(ql_adapter_state_t *ha)
10554 {
10555 ql_adapter_state_t *vha;
10556
10557 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10558
10559 /* Inform the FC Transport that the hardware is online. */
10560 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10561 if (!(vha->task_daemon_flags &
10562 (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10563 /* Restart IP if it was shutdown. */
10564 if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10565 !(vha->flags & IP_INITIALIZED)) {
10566 (void) ql_initialize_ip(vha);
10567 ql_isp_rcvbuf(vha);
10568 }
10569
10570 if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10571 FC_PORT_STATE_MASK(vha->state) !=
10572 FC_STATE_ONLINE) {
10573 vha->state = FC_PORT_SPEED_MASK(vha->state);
10574 if (vha->topology & QL_LOOP_CONNECTION) {
10575 vha->state |= FC_STATE_LOOP;
10576 } else {
10577 vha->state |= FC_STATE_ONLINE;
10578 }
10579 TASK_DAEMON_LOCK(ha);
10580 vha->task_daemon_flags |= FC_STATE_CHANGE;
10581 TASK_DAEMON_UNLOCK(ha);
10582 }
10583 }
10584 }
10585
10586 ql_awaken_task_daemon(ha, NULL, 0, 0);
10587
10588 /* Restart device queues that may have been stopped. */
10589 ql_restart_queues(ha);
10590
10591 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10592 }
10593
10594 /*
10595 * ql_fca_handle_to_state
10596 * Verifies handle to be correct.
10597 *
10598 * Input:
10599 * fca_handle = pointer to state structure.
10600 *
10601 * Returns:
10602 * NULL = failure
10603 *
10604 * Context:
10605 * Kernel context.
10606 */
10607 static ql_adapter_state_t *
10608 ql_fca_handle_to_state(opaque_t fca_handle)
10609 {
10610 #ifdef QL_DEBUG_ROUTINES
10611 ql_link_t *link;
10612 ql_adapter_state_t *ha = NULL;
10613 ql_adapter_state_t *vha = NULL;
10614
10615 for (link = ql_hba.first; link != NULL; link = link->next) {
10616 ha = link->base_address;
10617 for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
10618 if ((opaque_t)vha == fca_handle) {
10619 ha = vha;
10620 break;
10621 }
10622 }
10623 if ((opaque_t)ha == fca_handle) {
10624 break;
10625 } else {
10626 ha = NULL;
10627 }
10628 }
10629
10630 if (ha == NULL) {
10631 /*EMPTY*/
10632 QL_PRINT_2(CE_CONT, "failed\n");
10633 }
10634
10635 #endif /* QL_DEBUG_ROUTINES */
10636
10637 return ((ql_adapter_state_t *)fca_handle);
10638 }
10639
10640 /*
10641 * ql_d_id_to_queue
10642 * Locate device queue that matches destination ID.
10643 *
10644 * Input:
10645 * ha = adapter state pointer.
10646 * d_id = destination ID
10647 *
10648 * Returns:
10649 * NULL = failure
10650 *
10651 * Context:
10652 * Interrupt or Kernel context, no mailbox commands allowed.
10653 */
10654 ql_tgt_t *
10655 ql_d_id_to_queue(ql_adapter_state_t *ha, port_id_t d_id)
10656 {
10657 uint16_t index;
10658 ql_tgt_t *tq;
10659 ql_link_t *link;
10660
10661 /* Get head queue index. */
10662 index = ql_alpa_to_index[d_id.b.al_pa];
10663
10664 for (link = ha->dev[index].first; link != NULL; link = link->next) {
10665 tq = link->base_address;
10666 if (tq->d_id.b24 == d_id.b24 &&
10667 VALID_DEVICE_ID(ha, tq->loop_id)) {
10668 return (tq);
10669 }
10670 }
10671
10672 return (NULL);
10673 }
10674
10675 /*
10676 * ql_loop_id_to_queue
10677 * Locate device queue that matches loop ID.
10678 *
10679 * Input:
10680 * ha: adapter state pointer.
10681 * loop_id: destination ID
10682 *
10683 * Returns:
10684 * NULL = failure
10685 *
10686 * Context:
10687 * Interrupt or Kernel context, no mailbox commands allowed.
10688 */
10689 ql_tgt_t *
10690 ql_loop_id_to_queue(ql_adapter_state_t *ha, uint16_t loop_id)
10691 {
10692 uint16_t index;
10693 ql_tgt_t *tq;
10694 ql_link_t *link;
10695
10696 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10697 for (link = ha->dev[index].first; link != NULL;
10698 link = link->next) {
10699 tq = link->base_address;
10700 if (tq->loop_id == loop_id) {
10701 return (tq);
10702 }
10703 }
10704 }
10705
10706 return (NULL);
10707 }
10708
10709 /*
10710 * ql_kstat_update
10711 * Updates kernel statistics.
10712 *
10713 * Input:
10714 * ksp - driver kernel statistics structure pointer.
10715 * rw - function to perform
10716 *
10717 * Returns:
10718 * 0 or EACCES
10719 *
10720 * Context:
10721 * Kernel context.
10722 */
10723 /* ARGSUSED */
10724 static int
10725 ql_kstat_update(kstat_t *ksp, int rw)
10726 {
10727 int rval;
10728
10729 QL_PRINT_3(CE_CONT, "started\n");
10730
10731 if (rw == KSTAT_WRITE) {
10732 rval = EACCES;
10733 } else {
10734 rval = 0;
10735 }
10736
10737 if (rval != 0) {
10738 /*EMPTY*/
10739 QL_PRINT_2(CE_CONT, "failed, rval = %xh\n", rval);
10740 } else {
10741 /*EMPTY*/
10742 QL_PRINT_3(CE_CONT, "done\n");
10743 }
10744 return (rval);
10745 }
10746
10747 /*
10748 * ql_load_flash
10749 * Loads flash.
10750 *
10751 * Input:
10752 * ha: adapter state pointer.
10753 * dp: data pointer.
10754 * size: data length.
10755 *
10756 * Returns:
10757 * ql local function return status code.
10758 *
10759 * Context:
10760 * Kernel context.
10761 */
10762 int
10763 ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
10764 {
10765 uint32_t cnt;
10766 int rval;
10767 uint32_t size_to_offset;
10768 uint32_t size_to_compare;
10769 int erase_all;
10770
10771 if (CFG_IST(ha, CFG_CTRL_24258081)) {
10772 return (ql_24xx_load_flash(ha, dp, size, 0));
10773 }
10774
10775 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10776
10777 size_to_compare = 0x20000;
10778 size_to_offset = 0;
10779 erase_all = 0;
10780 if (CFG_IST(ha, CFG_SBUS_CARD)) {
10781 if (size == 0x80000) {
10782 /* Request to flash the entire chip. */
10783 size_to_compare = 0x80000;
10784 erase_all = 1;
10785 } else {
10786 size_to_compare = 0x40000;
10787 if (ql_flash_sbus_fpga) {
10788 size_to_offset = 0x40000;
10789 }
10790 }
10791 }
10792 if (size > size_to_compare) {
10793 rval = QL_FUNCTION_PARAMETER_ERROR;
10794 EL(ha, "failed=%xh\n", rval);
10795 return (rval);
10796 }
10797
10798 GLOBAL_HW_LOCK();
10799
10800 /* Enable Flash Read/Write. */
10801 ql_flash_enable(ha);
10802
10803 /* Erase flash prior to write. */
10804 rval = ql_erase_flash(ha, erase_all);
10805
10806 if (rval == QL_SUCCESS) {
10807 /* Write data to flash. */
10808 for (cnt = 0; cnt < size; cnt++) {
10809 /* Allow other system activity. */
10810 if (cnt % 0x1000 == 0) {
10811 ql_delay(ha, 10000);
10812 }
10813 rval = ql_program_flash_address(ha,
10814 cnt + size_to_offset, *dp++);
10815 if (rval != QL_SUCCESS) {
10816 break;
10817 }
10818 }
10819 }
10820
10821 ql_flash_disable(ha);
10822
10823 GLOBAL_HW_UNLOCK();
10824
10825 if (rval != QL_SUCCESS) {
10826 EL(ha, "failed=%xh\n", rval);
10827 } else {
10828 /*EMPTY*/
10829 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10830 }
10831 return (rval);
10832 }
10833
10834 /*
10835 * ql_program_flash_address
10836 * Program flash address.
10837 *
10838 * Input:
10839 * ha = adapter state pointer.
10840 * addr = flash byte address.
10841 * data = data to be written to flash.
10842 *
10843 * Returns:
10844 * ql local function return status code.
10845 *
10846 * Context:
10847 * Kernel context.
10848 */
10849 static int
10850 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10851 {
10852 int rval;
10853
10854 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10855
10856 if (CFG_IST(ha, CFG_SBUS_CARD)) {
10857 ql_write_flash_byte(ha, 0x5555, 0xa0);
10858 ql_write_flash_byte(ha, addr, data);
10859 } else {
10860 /* Write Program Command Sequence */
10861 ql_write_flash_byte(ha, 0x5555, 0xaa);
10862 ql_write_flash_byte(ha, 0x2aaa, 0x55);
10863 ql_write_flash_byte(ha, 0x5555, 0xa0);
10864 ql_write_flash_byte(ha, addr, data);
10865 }
10866
10867 /* Wait for write to complete. */
10868 rval = ql_poll_flash(ha, addr, data);
10869
10870 if (rval != QL_SUCCESS) {
10871 EL(ha, "failed=%xh\n", rval);
10872 } else {
10873 /*EMPTY*/
10874 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10875 }
10876 return (rval);
10877 }
10878
10879 /*
10880 * ql_erase_flash
10881 * Erases entire flash.
10882 *
10883 * Input:
10884 * ha = adapter state pointer.
10885 *
10886 * Returns:
10887 * ql local function return status code.
10888 *
10889 * Context:
10890 * Kernel context.
10891 */
10892 int
10893 ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
10894 {
10895 int rval;
10896 uint32_t erase_delay = 2000000;
10897 uint32_t sStartAddr;
10898 uint32_t ssize;
10899 uint32_t cnt;
10900 uint8_t *bfp;
10901 uint8_t *tmp;
10902
10903 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10904
10905 if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10906
10907 if (ql_flash_sbus_fpga == 1) {
10908 ssize = QL_SBUS_FCODE_SIZE;
10909 sStartAddr = QL_FCODE_OFFSET;
10910 } else {
10911 ssize = QL_FPGA_SIZE;
10912 sStartAddr = QL_FPGA_OFFSET;
10913 }
10914
10915 erase_delay = 20000000;
10916
10917 bfp = (uint8_t *)kmem_zalloc(ssize, KM_SLEEP);
10918
10919 /* Save the section of flash we're not updating to buffer */
10920 tmp = bfp;
10921 for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10922 /* Allow other system activity. */
10923 if (cnt % 0x1000 == 0) {
10924 ql_delay(ha, 10000);
10925 }
10926 *tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
10927 }
10928 }
10929
10930 /* Chip Erase Command Sequence */
10931 ql_write_flash_byte(ha, 0x5555, 0xaa);
10932 ql_write_flash_byte(ha, 0x2aaa, 0x55);
10933 ql_write_flash_byte(ha, 0x5555, 0x80);
10934 ql_write_flash_byte(ha, 0x5555, 0xaa);
10935 ql_write_flash_byte(ha, 0x2aaa, 0x55);
10936 ql_write_flash_byte(ha, 0x5555, 0x10);
10937
10938 ql_delay(ha, erase_delay);
10939
10940 /* Wait for erase to complete. */
10941 rval = ql_poll_flash(ha, 0, 0x80);
10942
10943 if (rval != QL_SUCCESS) {
10944 EL(ha, "failed=%xh\n", rval);
10945 if (CFG_IST(ha, CFG_SBUS_CARD)) {
10946 kmem_free(bfp, ssize);
10947 }
10948 return (rval);
10949 }
10950
10951 /* restore the section we saved in the buffer */
10952 if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10953 /* Restore the section we saved off */
10954 tmp = bfp;
10955 for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10956 /* Allow other system activity. */
10957 if (cnt % 0x1000 == 0) {
10958 ql_delay(ha, 10000);
10959 }
10960 rval = ql_program_flash_address(ha, cnt, *tmp++);
10961 if (rval != QL_SUCCESS) {
10962 break;
10963 }
10964 }
10965
10966 kmem_free(bfp, ssize);
10967 }
10968
10969 if (rval != QL_SUCCESS) {
10970 EL(ha, "failed=%xh\n", rval);
10971 } else {
10972 /*EMPTY*/
10973 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10974 }
10975 return (rval);
10976 }
10977
10978 /*
10979 * ql_poll_flash
10980 * Polls flash for completion.
10981 *
10982 * Input:
10983 * ha = adapter state pointer.
10984 * addr = flash byte address.
10985 * data = data to be polled.
10986 *
10987 * Returns:
10988 * ql local function return status code.
10989 *
10990 * Context:
10991 * Kernel context.
10992 */
10993 int
10994 ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
10995 {
10996 uint8_t flash_data;
10997 uint32_t cnt;
10998 int rval = QL_FUNCTION_FAILED;
10999
11000 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11001
11002 poll_data = (uint8_t)(poll_data & BIT_7);
11003
11004 /* Wait for 30 seconds for command to finish. */
11005 for (cnt = 30000000; cnt; cnt--) {
11006 flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
11007
11008 if ((flash_data & BIT_7) == poll_data) {
11009 rval = QL_SUCCESS;
11010 break;
11011 }
11012 if (flash_data & BIT_5 && cnt > 2) {
11013 cnt = 2;
11014 }
11015 drv_usecwait(1);
11016 }
11017
11018 if (rval != QL_SUCCESS) {
11019 EL(ha, "failed=%xh\n", rval);
11020 } else {
11021 /*EMPTY*/
11022 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11023 }
11024 return (rval);
11025 }
11026
11027 /*
11028 * ql_flash_enable
11029 * Setup flash for reading/writing.
11030 *
11031 * Input:
11032 * ha = adapter state pointer.
11033 *
11034 * Context:
11035 * Kernel context.
11036 */
11037 void
11038 ql_flash_enable(ql_adapter_state_t *ha)
11039 {
11040 uint16_t data;
11041
11042 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11043
11044 /* Enable Flash Read/Write. */
11045 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11046 data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11047 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11048 data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
11049 ddi_put16(ha->sbus_fpga_dev_handle,
11050 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11051 /* Read reset command sequence */
11052 ql_write_flash_byte(ha, 0xaaa, 0xaa);
11053 ql_write_flash_byte(ha, 0x555, 0x55);
11054 ql_write_flash_byte(ha, 0xaaa, 0x20);
11055 ql_write_flash_byte(ha, 0x555, 0xf0);
11056 } else {
11057 data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
11058 ISP_FLASH_ENABLE);
11059 WRT16_IO_REG(ha, ctrl_status, data);
11060
11061 /* Read/Reset Command Sequence */
11062 ql_write_flash_byte(ha, 0x5555, 0xaa);
11063 ql_write_flash_byte(ha, 0x2aaa, 0x55);
11064 ql_write_flash_byte(ha, 0x5555, 0xf0);
11065 }
11066 (void) ql_read_flash_byte(ha, 0);
11067
11068 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11069 }
11070
11071 /*
11072 * ql_flash_disable
11073 * Disable flash and allow RISC to run.
11074 *
11075 * Input:
11076 * ha = adapter state pointer.
11077 *
11078 * Context:
11079 * Kernel context.
11080 */
11081 void
11082 ql_flash_disable(ql_adapter_state_t *ha)
11083 {
11084 uint16_t data;
11085
11086 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11087
11088 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11089 /*
11090 * Lock the flash back up.
11091 */
11092 ql_write_flash_byte(ha, 0x555, 0x90);
11093 ql_write_flash_byte(ha, 0x555, 0x0);
11094
11095 data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11096 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11097 data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
11098 ddi_put16(ha->sbus_fpga_dev_handle,
11099 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11100 } else {
11101 data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
11102 ~ISP_FLASH_ENABLE);
11103 WRT16_IO_REG(ha, ctrl_status, data);
11104 }
11105
11106 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11107 }
11108
11109 /*
11110 * ql_write_flash_byte
11111 * Write byte to flash.
11112 *
11113 * Input:
11114 * ha = adapter state pointer.
11115 * addr = flash byte address.
11116 * data = data to be written.
11117 *
11118 * Context:
11119 * Kernel context.
11120 */
11121 void
11122 ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
11123 {
11124 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11125 ddi_put16(ha->sbus_fpga_dev_handle,
11126 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11127 LSW(addr));
11128 ddi_put16(ha->sbus_fpga_dev_handle,
11129 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11130 MSW(addr));
11131 ddi_put16(ha->sbus_fpga_dev_handle,
11132 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
11133 (uint16_t)data);
11134 } else {
11135 uint16_t bank_select;
11136
11137 /* Setup bit 16 of flash address. */
11138 bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
11139
11140 if (CFG_IST(ha, CFG_CTRL_6322)) {
11141 bank_select = (uint16_t)(bank_select & ~0xf0);
11142 bank_select = (uint16_t)(bank_select |
11143 ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11144 WRT16_IO_REG(ha, ctrl_status, bank_select);
11145 } else {
11146 if (addr & BIT_16 && !(bank_select &
11147 ISP_FLASH_64K_BANK)) {
11148 bank_select = (uint16_t)(bank_select |
11149 ISP_FLASH_64K_BANK);
11150 WRT16_IO_REG(ha, ctrl_status, bank_select);
11151 } else if (!(addr & BIT_16) && bank_select &
11152 ISP_FLASH_64K_BANK) {
11153 bank_select = (uint16_t)(bank_select &
11154 ~ISP_FLASH_64K_BANK);
11155 WRT16_IO_REG(ha, ctrl_status, bank_select);
11156 }
11157 }
11158
11159 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11160 WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
11161 WRT16_IO_REG(ha, flash_data, (uint16_t)data);
11162 } else {
11163 WRT16_IOMAP_REG(ha, flash_address, addr);
11164 WRT16_IOMAP_REG(ha, flash_data, data);
11165 }
11166 }
11167 }
11168
11169 /*
11170 * ql_read_flash_byte
11171 * Reads byte from flash, but must read a word from chip.
11172 *
11173 * Input:
11174 * ha = adapter state pointer.
11175 * addr = flash byte address.
11176 *
11177 * Returns:
11178 * byte from flash.
11179 *
11180 * Context:
11181 * Kernel context.
11182 */
11183 uint8_t
11184 ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
11185 {
11186 uint8_t data;
11187
11188 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11189 ddi_put16(ha->sbus_fpga_dev_handle,
11190 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11191 LSW(addr));
11192 ddi_put16(ha->sbus_fpga_dev_handle,
11193 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11194 MSW(addr));
11195 data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
11196 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
11197 } else {
11198 uint16_t bank_select;
11199
11200 /* Setup bit 16 of flash address. */
11201 bank_select = RD16_IO_REG(ha, ctrl_status);
11202 if (CFG_IST(ha, CFG_CTRL_6322)) {
11203 bank_select = (uint16_t)(bank_select & ~0xf0);
11204 bank_select = (uint16_t)(bank_select |
11205 ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11206 WRT16_IO_REG(ha, ctrl_status, bank_select);
11207 } else {
11208 if (addr & BIT_16 &&
11209 !(bank_select & ISP_FLASH_64K_BANK)) {
11210 bank_select = (uint16_t)(bank_select |
11211 ISP_FLASH_64K_BANK);
11212 WRT16_IO_REG(ha, ctrl_status, bank_select);
11213 } else if (!(addr & BIT_16) &&
11214 bank_select & ISP_FLASH_64K_BANK) {
11215 bank_select = (uint16_t)(bank_select &
11216 ~ISP_FLASH_64K_BANK);
11217 WRT16_IO_REG(ha, ctrl_status, bank_select);
11218 }
11219 }
11220
11221 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11222 WRT16_IO_REG(ha, flash_address, addr);
11223 data = (uint8_t)RD16_IO_REG(ha, flash_data);
11224 } else {
11225 WRT16_IOMAP_REG(ha, flash_address, addr);
11226 data = (uint8_t)RD16_IOMAP_REG(ha, flash_data);
11227 }
11228 }
11229
11230 return (data);
11231 }
11232
11233 /*
11234 * ql_24xx_flash_id
11235 * Get flash IDs.
11236 *
11237 * Input:
11238 * ha: adapter state pointer.
11239 *
11240 * Returns:
11241 * ql local function return status code.
11242 *
11243 * Context:
11244 * Kernel context.
11245 */
11246 int
11247 ql_24xx_flash_id(ql_adapter_state_t *vha)
11248 {
11249 int rval;
11250 uint32_t fdata = 0;
11251 ql_adapter_state_t *ha = vha->pha;
11252 ql_xioctl_t *xp = ha->xioctl;
11253
11254 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11255
11256 rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
11257
11258 if (rval != QL_SUCCESS || fdata == 0 || CFG_IST(ha, CFG_CTRL_2581)) {
11259 fdata = 0;
11260 rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
11261 (CFG_IST(ha, CFG_CTRL_2422) ? 0x39F : 0x49F), &fdata);
11262 }
11263
11264 if (rval != QL_SUCCESS) {
11265 EL(ha, "24xx read_flash failed=%xh\n", rval);
11266 } else if (fdata != 0) {
11267 xp->fdesc.flash_manuf = LSB(LSW(fdata));
11268 xp->fdesc.flash_id = MSB(LSW(fdata));
11269 xp->fdesc.flash_len = LSB(MSW(fdata));
11270 } else {
11271 xp->fdesc.flash_manuf = ATMEL_FLASH;
11272 xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
11273 xp->fdesc.flash_len = 0;
11274 }
11275
11276 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11277
11278 return (rval);
11279 }
11280
11281 /*
11282 * ql_24xx_load_flash
11283 * Loads flash.
11284 *
11285 * Input:
11286 * ha = adapter state pointer.
11287 * dp = data pointer.
11288 * size = data length in bytes.
11289 * faddr = 32bit word flash byte address.
11290 *
11291 * Returns:
11292 * ql local function return status code.
11293 *
11294 * Context:
11295 * Kernel context.
11296 */
11297 int
11298 ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
11299 uint32_t faddr)
11300 {
11301 int rval;
11302 uint32_t cnt, rest_addr, fdata, wc;
11303 dma_mem_t dmabuf = {0};
11304 ql_adapter_state_t *ha = vha->pha;
11305 ql_xioctl_t *xp = ha->xioctl;
11306
11307 QL_PRINT_3(CE_CONT, "(%d): started, faddr=%xh, size=%xh\n",
11308 ha->instance, faddr, size);
11309
11310 /* start address must be 32 bit word aligned */
11311 if ((faddr & 0x3) != 0) {
11312 EL(ha, "incorrect buffer size alignment\n");
11313 return (QL_FUNCTION_PARAMETER_ERROR);
11314 }
11315
11316 /* Allocate DMA buffer */
11317 if (CFG_IST(ha, CFG_CTRL_2581)) {
11318 if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
11319 LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN)) !=
11320 QL_SUCCESS) {
11321 EL(ha, "dma alloc failed, rval=%xh\n", rval);
11322 return (rval);
11323 }
11324 }
11325
11326 GLOBAL_HW_LOCK();
11327
11328 /* Enable flash write */
11329 if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
11330 GLOBAL_HW_UNLOCK();
11331 EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
11332 ql_free_phys(ha, &dmabuf);
11333 return (rval);
11334 }
11335
11336 /* setup mask of address range within a sector */
11337 rest_addr = (xp->fdesc.block_size - 1) >> 2;
11338
11339 faddr = faddr >> 2; /* flash gets 32 bit words */
11340
11341 /*
11342 * Write data to flash.
11343 */
11344 cnt = 0;
11345 size = (size + 3) >> 2; /* Round up & convert to dwords */
11346
11347 while (cnt < size) {
11348 /* Beginning of a sector? */
11349 if ((faddr & rest_addr) == 0) {
11350 if (CFG_IST(ha, CFG_CTRL_8021)) {
11351 fdata = ha->flash_data_addr | faddr;
11352 rval = ql_8021_rom_erase(ha, fdata);
11353 if (rval != QL_SUCCESS) {
11354 EL(ha, "8021 erase sector status="
11355 "%xh, start=%xh, end=%xh"
11356 "\n", rval, fdata,
11357 fdata + rest_addr);
11358 break;
11359 }
11360 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11361 fdata = ha->flash_data_addr | faddr;
11362 rval = ql_flash_access(ha,
11363 FAC_ERASE_SECTOR, fdata, fdata +
11364 rest_addr, 0);
11365 if (rval != QL_SUCCESS) {
11366 EL(ha, "erase sector status="
11367 "%xh, start=%xh, end=%xh"
11368 "\n", rval, fdata,
11369 fdata + rest_addr);
11370 break;
11371 }
11372 } else {
11373 fdata = (faddr & ~rest_addr) << 2;
11374 fdata = (fdata & 0xff00) |
11375 (fdata << 16 & 0xff0000) |
11376 (fdata >> 16 & 0xff);
11377
11378 if (rest_addr == 0x1fff) {
11379 /* 32kb sector block erase */
11380 rval = ql_24xx_write_flash(ha,
11381 FLASH_CONF_ADDR | 0x0352,
11382 fdata);
11383 } else {
11384 /* 64kb sector block erase */
11385 rval = ql_24xx_write_flash(ha,
11386 FLASH_CONF_ADDR | 0x03d8,
11387 fdata);
11388 }
11389 if (rval != QL_SUCCESS) {
11390 EL(ha, "Unable to flash sector"
11391 ": address=%xh\n", faddr);
11392 break;
11393 }
11394 }
11395 }
11396
11397 /* Write data */
11398 if (CFG_IST(ha, CFG_CTRL_2581) &&
11399 ((faddr & 0x3f) == 0)) {
11400 /*
11401 * Limit write up to sector boundary.
11402 */
11403 wc = ((~faddr & (rest_addr>>1)) + 1);
11404
11405 if (size - cnt < wc) {
11406 wc = size - cnt;
11407 }
11408
11409 ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
11410 (uint8_t *)dmabuf.bp, wc<<2,
11411 DDI_DEV_AUTOINCR);
11412
11413 rval = ql_wrt_risc_ram(ha, ha->flash_data_addr |
11414 faddr, dmabuf.cookie.dmac_laddress, wc);
11415 if (rval != QL_SUCCESS) {
11416 EL(ha, "unable to dma to flash "
11417 "address=%xh\n", faddr << 2);
11418 break;
11419 }
11420
11421 cnt += wc;
11422 faddr += wc;
11423 dp += wc << 2;
11424 } else {
11425 fdata = *dp++;
11426 fdata |= *dp++ << 8;
11427 fdata |= *dp++ << 16;
11428 fdata |= *dp++ << 24;
11429 rval = ql_24xx_write_flash(ha,
11430 ha->flash_data_addr | faddr, fdata);
11431 if (rval != QL_SUCCESS) {
11432 EL(ha, "Unable to program flash "
11433 "address=%xh data=%xh\n", faddr,
11434 *dp);
11435 break;
11436 }
11437 cnt++;
11438 faddr++;
11439
11440 /* Allow other system activity. */
11441 if (cnt % 0x1000 == 0) {
11442 ql_delay(ha, 10000);
11443 }
11444 }
11445 }
11446
11447 ql_24xx_protect_flash(ha);
11448
11449 ql_free_phys(ha, &dmabuf);
11450
11451 GLOBAL_HW_UNLOCK();
11452
11453 if (rval != QL_SUCCESS) {
11454 EL(ha, "failed=%xh\n", rval);
11455 } else {
11456 /*EMPTY*/
11457 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11458 }
11459 return (rval);
11460 }
11461
11462 /*
11463 * ql_24xx_read_flash
11464 * Reads a 32bit word from ISP24xx NVRAM/FLASH.
11465 *
11466 * Input:
11467 * ha: adapter state pointer.
11468 * faddr: NVRAM/FLASH address.
11469 * bp: data pointer.
11470 *
11471 * Returns:
11472 * ql local function return status code.
11473 *
11474 * Context:
11475 * Kernel context.
11476 */
11477 int
11478 ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
11479 {
11480 uint32_t timer;
11481 int rval = QL_SUCCESS;
11482 ql_adapter_state_t *ha = vha->pha;
11483
11484 if (CFG_IST(ha, CFG_CTRL_8021)) {
11485 if ((rval = ql_8021_rom_read(ha, faddr, bp)) != QL_SUCCESS) {
11486 EL(ha, "8021 access error\n");
11487 }
11488 return (rval);
11489 }
11490
11491 /* Clear access error flag */
11492 WRT32_IO_REG(ha, ctrl_status,
11493 RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11494
11495 WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
11496
11497 /* Wait for READ cycle to complete. */
11498 for (timer = 300000; timer; timer--) {
11499 if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
11500 break;
11501 }
11502 drv_usecwait(10);
11503 }
11504
11505 if (timer == 0) {
11506 EL(ha, "failed, timeout\n");
11507 rval = QL_FUNCTION_TIMEOUT;
11508 } else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11509 EL(ha, "failed, access error\n");
11510 rval = QL_FUNCTION_FAILED;
11511 }
11512
11513 *bp = RD32_IO_REG(ha, flash_data);
11514
11515 return (rval);
11516 }
11517
11518 /*
11519 * ql_24xx_write_flash
11520 * Writes a 32bit word to ISP24xx NVRAM/FLASH.
11521 *
11522 * Input:
11523 * ha: adapter state pointer.
11524 * addr: NVRAM/FLASH address.
11525 * value: data.
11526 *
11527 * Returns:
11528 * ql local function return status code.
11529 *
11530 * Context:
11531 * Kernel context.
11532 */
11533 int
11534 ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11535 {
11536 uint32_t timer, fdata;
11537 int rval = QL_SUCCESS;
11538 ql_adapter_state_t *ha = vha->pha;
11539
11540 if (CFG_IST(ha, CFG_CTRL_8021)) {
11541 if ((rval = ql_8021_rom_write(ha, addr, data)) != QL_SUCCESS) {
11542 EL(ha, "8021 access error\n");
11543 }
11544 return (rval);
11545 }
11546 /* Clear access error flag */
11547 WRT32_IO_REG(ha, ctrl_status,
11548 RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11549
11550 WRT32_IO_REG(ha, flash_data, data);
11551 RD32_IO_REG(ha, flash_data); /* PCI Posting. */
11552 WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11553
11554 /* Wait for Write cycle to complete. */
11555 for (timer = 3000000; timer; timer--) {
11556 if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11557 /* Check flash write in progress. */
11558 if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11559 (void) ql_24xx_read_flash(ha,
11560 FLASH_CONF_ADDR | 0x005, &fdata);
11561 if (!(fdata & BIT_0)) {
11562 break;
11563 }
11564 } else {
11565 break;
11566 }
11567 }
11568 drv_usecwait(10);
11569 }
11570 if (timer == 0) {
11571 EL(ha, "failed, timeout\n");
11572 rval = QL_FUNCTION_TIMEOUT;
11573 } else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11574 EL(ha, "access error\n");
11575 rval = QL_FUNCTION_FAILED;
11576 }
11577
11578 return (rval);
11579 }
11580 /*
11581 * ql_24xx_unprotect_flash
11582 * Enable writes
11583 *
11584 * Input:
11585 * ha: adapter state pointer.
11586 *
11587 * Returns:
11588 * ql local function return status code.
11589 *
11590 * Context:
11591 * Kernel context.
11592 */
11593 int
11594 ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11595 {
11596 int rval;
11597 uint32_t fdata;
11598 ql_adapter_state_t *ha = vha->pha;
11599 ql_xioctl_t *xp = ha->xioctl;
11600
11601 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11602
11603 if (CFG_IST(ha, CFG_CTRL_8021)) {
11604 (void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11605 rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11606 if (rval != QL_SUCCESS) {
11607 EL(ha, "8021 access error\n");
11608 }
11609 return (rval);
11610 }
11611 if (CFG_IST(ha, CFG_CTRL_81XX)) {
11612 if (ha->task_daemon_flags & FIRMWARE_UP) {
11613 if ((rval = ql_flash_access(ha, FAC_WRT_ENABLE, 0, 0,
11614 0)) != QL_SUCCESS) {
11615 EL(ha, "status=%xh\n", rval);
11616 }
11617 QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11618 ha->instance);
11619 return (rval);
11620 }
11621 } else {
11622 /* Enable flash write. */
11623 WRT32_IO_REG(ha, ctrl_status,
11624 RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11625 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
11626 }
11627
11628 /*
11629 * Remove block write protection (SST and ST) and
11630 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11631 * Unprotect sectors.
11632 */
11633 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
11634 xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
11635
11636 if (xp->fdesc.unprotect_sector_cmd != 0) {
11637 for (fdata = 0; fdata < 0x10; fdata++) {
11638 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11639 0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
11640 }
11641
11642 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11643 xp->fdesc.unprotect_sector_cmd, 0x00400f);
11644 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11645 xp->fdesc.unprotect_sector_cmd, 0x00600f);
11646 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11647 xp->fdesc.unprotect_sector_cmd, 0x00800f);
11648 }
11649
11650 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11651
11652 return (QL_SUCCESS);
11653 }
11654
11655 /*
11656 * ql_24xx_protect_flash
11657 * Disable writes
11658 *
11659 * Input:
11660 * ha: adapter state pointer.
11661 *
11662 * Context:
11663 * Kernel context.
11664 */
11665 void
11666 ql_24xx_protect_flash(ql_adapter_state_t *vha)
11667 {
11668 int rval;
11669 uint32_t fdata;
11670 ql_adapter_state_t *ha = vha->pha;
11671 ql_xioctl_t *xp = ha->xioctl;
11672
11673 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11674
11675 if (CFG_IST(ha, CFG_CTRL_8021)) {
11676 (void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11677 rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_disable_bits);
11678 if (rval != QL_SUCCESS) {
11679 EL(ha, "8021 access error\n");
11680 }
11681 return;
11682 }
11683 if (CFG_IST(ha, CFG_CTRL_81XX)) {
11684 if (ha->task_daemon_flags & FIRMWARE_UP) {
11685 if ((rval = ql_flash_access(ha, FAC_WRT_PROTECT, 0, 0,
11686 0)) != QL_SUCCESS) {
11687 EL(ha, "status=%xh\n", rval);
11688 }
11689 QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11690 ha->instance);
11691 return;
11692 }
11693 } else {
11694 /* Enable flash write. */
11695 WRT32_IO_REG(ha, ctrl_status,
11696 RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11697 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
11698 }
11699
11700 /*
11701 * Protect sectors.
11702 * Set block write protection (SST and ST) and
11703 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11704 */
11705 if (xp->fdesc.protect_sector_cmd != 0) {
11706 for (fdata = 0; fdata < 0x10; fdata++) {
11707 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11708 0x330 | xp->fdesc.protect_sector_cmd, fdata);
11709 }
11710 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11711 xp->fdesc.protect_sector_cmd, 0x00400f);
11712 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11713 xp->fdesc.protect_sector_cmd, 0x00600f);
11714 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11715 xp->fdesc.protect_sector_cmd, 0x00800f);
11716
11717 /* TODO: ??? */
11718 (void) ql_24xx_write_flash(ha,
11719 FLASH_CONF_ADDR | 0x101, 0x80);
11720 } else {
11721 (void) ql_24xx_write_flash(ha,
11722 FLASH_CONF_ADDR | 0x101, 0x9c);
11723 }
11724
11725 /* Disable flash write. */
11726 if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
11727 WRT32_IO_REG(ha, ctrl_status,
11728 RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
11729 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
11730 }
11731
11732 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11733 }
11734
11735 /*
11736 * ql_dump_firmware
11737 * Save RISC code state information.
11738 *
11739 * Input:
11740 * ha = adapter state pointer.
11741 *
11742 * Returns:
11743 * QL local function return status code.
11744 *
11745 * Context:
11746 * Kernel context.
11747 */
11748 static int
11749 ql_dump_firmware(ql_adapter_state_t *vha)
11750 {
11751 int rval;
11752 clock_t timer = drv_usectohz(30000000);
11753 ql_adapter_state_t *ha = vha->pha;
11754
11755 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11756
11757 QL_DUMP_LOCK(ha);
11758
11759 if (ha->ql_dump_state & QL_DUMPING ||
11760 (ha->ql_dump_state & QL_DUMP_VALID &&
11761 !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11762 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11763 QL_DUMP_UNLOCK(ha);
11764 return (QL_SUCCESS);
11765 }
11766
11767 QL_DUMP_UNLOCK(ha);
11768
11769 ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
11770
11771 /*
11772 * Wait for all outstanding commands to complete
11773 */
11774 (void) ql_wait_outstanding(ha);
11775
11776 /* Dump firmware. */
11777 rval = ql_binary_fw_dump(ha, TRUE);
11778
11779 /* Do abort to force restart. */
11780 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, DRIVER_STALL);
11781 EL(ha, "restarting, isp_abort_needed\n");
11782
11783 /* Acquire task daemon lock. */
11784 TASK_DAEMON_LOCK(ha);
11785
11786 /* Wait for suspension to end. */
11787 while (ha->task_daemon_flags & QL_SUSPENDED) {
11788 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
11789
11790 /* 30 seconds from now */
11791 if (cv_reltimedwait(&ha->cv_dr_suspended,
11792 &ha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
11793 /*
11794 * The timeout time 'timer' was
11795 * reached without the condition
11796 * being signaled.
11797 */
11798 break;
11799 }
11800 }
11801
11802 /* Release task daemon lock. */
11803 TASK_DAEMON_UNLOCK(ha);
11804
11805 if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
11806 /*EMPTY*/
11807 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11808 } else {
11809 EL(ha, "failed, rval = %xh\n", rval);
11810 }
11811 return (rval);
11812 }
11813
11814 /*
11815 * ql_binary_fw_dump
11816 * Dumps binary data from firmware.
11817 *
11818 * Input:
11819 * ha = adapter state pointer.
11820 * lock_needed = mailbox lock needed.
11821 *
11822 * Returns:
11823 * ql local function return status code.
11824 *
11825 * Context:
11826 * Interrupt or Kernel context, no mailbox commands allowed.
11827 */
11828 int
11829 ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
11830 {
11831 clock_t timer;
11832 mbx_cmd_t mc;
11833 mbx_cmd_t *mcp = &mc;
11834 int rval = QL_SUCCESS;
11835 ql_adapter_state_t *ha = vha->pha;
11836
11837 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11838
11839 if (CFG_IST(ha, CFG_CTRL_8021)) {
11840 EL(ha, "8021 not supported\n");
11841 return (QL_NOT_SUPPORTED);
11842 }
11843
11844 QL_DUMP_LOCK(ha);
11845
11846 if (ha->ql_dump_state & QL_DUMPING ||
11847 (ha->ql_dump_state & QL_DUMP_VALID &&
11848 !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11849 EL(ha, "dump already done, qds=%x\n", ha->ql_dump_state);
11850 QL_DUMP_UNLOCK(ha);
11851 return (QL_DATA_EXISTS);
11852 }
11853
11854 ha->ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
11855 ha->ql_dump_state |= QL_DUMPING;
11856
11857 QL_DUMP_UNLOCK(ha);
11858
11859 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE)) {
11860
11861 /* Insert Time Stamp */
11862 rval = ql_fw_etrace(ha, &ha->fwexttracebuf,
11863 FTO_INSERT_TIME_STAMP);
11864 if (rval != QL_SUCCESS) {
11865 EL(ha, "f/w extended trace insert"
11866 "time stamp failed: %xh\n", rval);
11867 }
11868 }
11869
11870 if (lock_needed == TRUE) {
11871 /* Acquire mailbox register lock. */
11872 MBX_REGISTER_LOCK(ha);
11873 timer = (ha->mcp->timeout + 2) * drv_usectohz(1000000);
11874
11875 /* Check for mailbox available, if not wait for signal. */
11876 while (ha->mailbox_flags & MBX_BUSY_FLG) {
11877 ha->mailbox_flags = (uint8_t)
11878 (ha->mailbox_flags | MBX_WANT_FLG);
11879
11880 /* 30 seconds from now */
11881 if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
11882 timer, TR_CLOCK_TICK) == -1) {
11883 /*
11884 * The timeout time 'timer' was
11885 * reached without the condition
11886 * being signaled.
11887 */
11888
11889 /* Release mailbox register lock. */
11890 MBX_REGISTER_UNLOCK(ha);
11891
11892 EL(ha, "failed, rval = %xh\n",
11893 QL_FUNCTION_TIMEOUT);
11894 return (QL_FUNCTION_TIMEOUT);
11895 }
11896 }
11897
11898 /* Set busy flag. */
11899 ha->mailbox_flags = (uint8_t)
11900 (ha->mailbox_flags | MBX_BUSY_FLG);
11901 mcp->timeout = 120;
11902 ha->mcp = mcp;
11903
11904 /* Release mailbox register lock. */
11905 MBX_REGISTER_UNLOCK(ha);
11906 }
11907
11908 /* Free previous dump buffer. */
11909 if (ha->ql_dump_ptr != NULL) {
11910 kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11911 ha->ql_dump_ptr = NULL;
11912 }
11913
11914 if (CFG_IST(ha, CFG_CTRL_2422)) {
11915 ha->ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
11916 ha->fw_ext_memory_size);
11917 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11918 ha->ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
11919 ha->fw_ext_memory_size);
11920 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11921 ha->ql_dump_size = (uint32_t)(sizeof (ql_81xx_fw_dump_t) +
11922 ha->fw_ext_memory_size);
11923 } else {
11924 ha->ql_dump_size = sizeof (ql_fw_dump_t);
11925 }
11926
11927 if ((ha->ql_dump_ptr = kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) ==
11928 NULL) {
11929 rval = QL_MEMORY_ALLOC_FAILED;
11930 } else {
11931 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11932 rval = ql_2300_binary_fw_dump(ha, ha->ql_dump_ptr);
11933 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11934 rval = ql_81xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11935 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11936 rval = ql_25xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11937 } else if (CFG_IST(ha, CFG_CTRL_2422)) {
11938 rval = ql_24xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11939 } else {
11940 rval = ql_2200_binary_fw_dump(ha, ha->ql_dump_ptr);
11941 }
11942 }
11943
11944 /* Reset ISP chip. */
11945 ql_reset_chip(ha);
11946
11947 QL_DUMP_LOCK(ha);
11948
11949 if (rval != QL_SUCCESS) {
11950 if (ha->ql_dump_ptr != NULL) {
11951 kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11952 ha->ql_dump_ptr = NULL;
11953 }
11954 ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
11955 QL_DUMP_UPLOADED);
11956 EL(ha, "failed, rval = %xh\n", rval);
11957 } else {
11958 ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
11959 ha->ql_dump_state |= QL_DUMP_VALID;
11960 EL(ha, "done\n");
11961 }
11962
11963 QL_DUMP_UNLOCK(ha);
11964
11965 return (rval);
11966 }
11967
11968 /*
11969 * ql_ascii_fw_dump
11970 * Converts firmware binary dump to ascii.
11971 *
11972 * Input:
11973 * ha = adapter state pointer.
11974 * bptr = buffer pointer.
11975 *
11976 * Returns:
11977 * Amount of data buffer used.
11978 *
11979 * Context:
11980 * Kernel context.
11981 */
11982 size_t
11983 ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
11984 {
11985 uint32_t cnt;
11986 caddr_t bp;
11987 int mbox_cnt;
11988 ql_adapter_state_t *ha = vha->pha;
11989 ql_fw_dump_t *fw = ha->ql_dump_ptr;
11990
11991 if (CFG_IST(ha, CFG_CTRL_2422)) {
11992 return (ql_24xx_ascii_fw_dump(ha, bufp));
11993 } else if (CFG_IST(ha, CFG_CTRL_2581)) {
11994 return (ql_2581_ascii_fw_dump(ha, bufp));
11995 }
11996
11997 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11998
11999 if (CFG_IST(ha, CFG_CTRL_2300)) {
12000 (void) sprintf(bufp, "\nISP 2300IP ");
12001 } else if (CFG_IST(ha, CFG_CTRL_6322)) {
12002 (void) sprintf(bufp, "\nISP 6322FLX ");
12003 } else {
12004 (void) sprintf(bufp, "\nISP 2200IP ");
12005 }
12006
12007 bp = bufp + strlen(bufp);
12008 (void) sprintf(bp, "Firmware Version %d.%d.%d\n",
12009 ha->fw_major_version, ha->fw_minor_version,
12010 ha->fw_subminor_version);
12011
12012 (void) strcat(bufp, "\nPBIU Registers:");
12013 bp = bufp + strlen(bufp);
12014 for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
12015 if (cnt % 8 == 0) {
12016 *bp++ = '\n';
12017 }
12018 (void) sprintf(bp, "%04x ", fw->pbiu_reg[cnt]);
12019 bp = bp + 6;
12020 }
12021
12022 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12023 (void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
12024 "registers:");
12025 bp = bufp + strlen(bufp);
12026 for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
12027 if (cnt % 8 == 0) {
12028 *bp++ = '\n';
12029 }
12030 (void) sprintf(bp, "%04x ", fw->risc_host_reg[cnt]);
12031 bp = bp + 6;
12032 }
12033 }
12034
12035 (void) strcat(bp, "\n\nMailbox Registers:");
12036 bp = bufp + strlen(bufp);
12037 mbox_cnt = (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) ? 16 : 8;
12038 for (cnt = 0; cnt < mbox_cnt; cnt++) {
12039 if (cnt % 8 == 0) {
12040 *bp++ = '\n';
12041 }
12042 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12043 bp = bp + 6;
12044 }
12045
12046 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12047 (void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
12048 bp = bufp + strlen(bufp);
12049 for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
12050 if (cnt % 8 == 0) {
12051 *bp++ = '\n';
12052 }
12053 (void) sprintf(bp, "%04x ", fw->resp_dma_reg[cnt]);
12054 bp = bp + 6;
12055 }
12056 }
12057
12058 (void) strcat(bp, "\n\nDMA Registers:");
12059 bp = bufp + strlen(bufp);
12060 for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
12061 if (cnt % 8 == 0) {
12062 *bp++ = '\n';
12063 }
12064 (void) sprintf(bp, "%04x ", fw->dma_reg[cnt]);
12065 bp = bp + 6;
12066 }
12067
12068 (void) strcat(bp, "\n\nRISC Hardware Registers:");
12069 bp = bufp + strlen(bufp);
12070 for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
12071 if (cnt % 8 == 0) {
12072 *bp++ = '\n';
12073 }
12074 (void) sprintf(bp, "%04x ", fw->risc_hdw_reg[cnt]);
12075 bp = bp + 6;
12076 }
12077
12078 (void) strcat(bp, "\n\nRISC GP0 Registers:");
12079 bp = bufp + strlen(bufp);
12080 for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
12081 if (cnt % 8 == 0) {
12082 *bp++ = '\n';
12083 }
12084 (void) sprintf(bp, "%04x ", fw->risc_gp0_reg[cnt]);
12085 bp = bp + 6;
12086 }
12087
12088 (void) strcat(bp, "\n\nRISC GP1 Registers:");
12089 bp = bufp + strlen(bufp);
12090 for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
12091 if (cnt % 8 == 0) {
12092 *bp++ = '\n';
12093 }
12094 (void) sprintf(bp, "%04x ", fw->risc_gp1_reg[cnt]);
12095 bp = bp + 6;
12096 }
12097
12098 (void) strcat(bp, "\n\nRISC GP2 Registers:");
12099 bp = bufp + strlen(bufp);
12100 for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
12101 if (cnt % 8 == 0) {
12102 *bp++ = '\n';
12103 }
12104 (void) sprintf(bp, "%04x ", fw->risc_gp2_reg[cnt]);
12105 bp = bp + 6;
12106 }
12107
12108 (void) strcat(bp, "\n\nRISC GP3 Registers:");
12109 bp = bufp + strlen(bufp);
12110 for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
12111 if (cnt % 8 == 0) {
12112 *bp++ = '\n';
12113 }
12114 (void) sprintf(bp, "%04x ", fw->risc_gp3_reg[cnt]);
12115 bp = bp + 6;
12116 }
12117
12118 (void) strcat(bp, "\n\nRISC GP4 Registers:");
12119 bp = bufp + strlen(bufp);
12120 for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
12121 if (cnt % 8 == 0) {
12122 *bp++ = '\n';
12123 }
12124 (void) sprintf(bp, "%04x ", fw->risc_gp4_reg[cnt]);
12125 bp = bp + 6;
12126 }
12127
12128 (void) strcat(bp, "\n\nRISC GP5 Registers:");
12129 bp = bufp + strlen(bufp);
12130 for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
12131 if (cnt % 8 == 0) {
12132 *bp++ = '\n';
12133 }
12134 (void) sprintf(bp, "%04x ", fw->risc_gp5_reg[cnt]);
12135 bp = bp + 6;
12136 }
12137
12138 (void) strcat(bp, "\n\nRISC GP6 Registers:");
12139 bp = bufp + strlen(bufp);
12140 for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
12141 if (cnt % 8 == 0) {
12142 *bp++ = '\n';
12143 }
12144 (void) sprintf(bp, "%04x ", fw->risc_gp6_reg[cnt]);
12145 bp = bp + 6;
12146 }
12147
12148 (void) strcat(bp, "\n\nRISC GP7 Registers:");
12149 bp = bufp + strlen(bufp);
12150 for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
12151 if (cnt % 8 == 0) {
12152 *bp++ = '\n';
12153 }
12154 (void) sprintf(bp, "%04x ", fw->risc_gp7_reg[cnt]);
12155 bp = bp + 6;
12156 }
12157
12158 (void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
12159 bp = bufp + strlen(bufp);
12160 for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
12161 if ((cnt == 16) && ((CFG_IST(ha, (CFG_CTRL_2300 |
12162 CFG_CTRL_6322)) == 0))) {
12163 break;
12164 }
12165 if (cnt % 8 == 0) {
12166 *bp++ = '\n';
12167 }
12168 (void) sprintf(bp, "%04x ", fw->frame_buf_hdw_reg[cnt]);
12169 bp = bp + 6;
12170 }
12171
12172 (void) strcat(bp, "\n\nFPM B0 Registers:");
12173 bp = bufp + strlen(bufp);
12174 for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
12175 if (cnt % 8 == 0) {
12176 *bp++ = '\n';
12177 }
12178 (void) sprintf(bp, "%04x ", fw->fpm_b0_reg[cnt]);
12179 bp = bp + 6;
12180 }
12181
12182 (void) strcat(bp, "\n\nFPM B1 Registers:");
12183 bp = bufp + strlen(bufp);
12184 for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
12185 if (cnt % 8 == 0) {
12186 *bp++ = '\n';
12187 }
12188 (void) sprintf(bp, "%04x ", fw->fpm_b1_reg[cnt]);
12189 bp = bp + 6;
12190 }
12191
12192 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12193 (void) strcat(bp, "\n\nCode RAM Dump:");
12194 bp = bufp + strlen(bufp);
12195 for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
12196 if (cnt % 8 == 0) {
12197 (void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
12198 bp = bp + 8;
12199 }
12200 (void) sprintf(bp, "%04x ", fw->risc_ram[cnt]);
12201 bp = bp + 6;
12202 }
12203
12204 (void) strcat(bp, "\n\nStack RAM Dump:");
12205 bp = bufp + strlen(bufp);
12206 for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
12207 if (cnt % 8 == 0) {
12208 (void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
12209 bp = bp + 8;
12210 }
12211 (void) sprintf(bp, "%04x ", fw->stack_ram[cnt]);
12212 bp = bp + 6;
12213 }
12214
12215 (void) strcat(bp, "\n\nData RAM Dump:");
12216 bp = bufp + strlen(bufp);
12217 for (cnt = 0; cnt < sizeof (fw->data_ram) / 2; cnt++) {
12218 if (cnt % 8 == 0) {
12219 (void) sprintf(bp, "\n%05x: ", cnt + 0x010800);
12220 bp = bp + 8;
12221 }
12222 (void) sprintf(bp, "%04x ", fw->data_ram[cnt]);
12223 bp = bp + 6;
12224 }
12225 } else {
12226 (void) strcat(bp, "\n\nRISC SRAM:");
12227 bp = bufp + strlen(bufp);
12228 for (cnt = 0; cnt < 0xf000; cnt++) {
12229 if (cnt % 8 == 0) {
12230 (void) sprintf(bp, "\n%04x: ", cnt + 0x1000);
12231 bp = bp + 7;
12232 }
12233 (void) sprintf(bp, "%04x ", fw->risc_ram[cnt]);
12234 bp = bp + 6;
12235 }
12236 }
12237
12238 (void) strcat(bp, "\n\n[<==END] ISP Debug Dump.");
12239 bp += strlen(bp);
12240
12241 (void) sprintf(bp, "\n\nRequest Queue");
12242 bp += strlen(bp);
12243 for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12244 if (cnt % 8 == 0) {
12245 (void) sprintf(bp, "\n%08x: ", cnt);
12246 bp += strlen(bp);
12247 }
12248 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12249 bp += strlen(bp);
12250 }
12251
12252 (void) sprintf(bp, "\n\nResponse Queue");
12253 bp += strlen(bp);
12254 for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12255 if (cnt % 8 == 0) {
12256 (void) sprintf(bp, "\n%08x: ", cnt);
12257 bp += strlen(bp);
12258 }
12259 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12260 bp += strlen(bp);
12261 }
12262
12263 (void) sprintf(bp, "\n");
12264
12265 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
12266
12267 return (strlen(bufp));
12268 }
12269
12270 /*
12271 * ql_24xx_ascii_fw_dump
12272 * Converts ISP24xx firmware binary dump to ascii.
12273 *
12274 * Input:
12275 * ha = adapter state pointer.
12276 * bptr = buffer pointer.
12277 *
12278 * Returns:
12279 * Amount of data buffer used.
12280 *
12281 * Context:
12282 * Kernel context.
12283 */
12284 static size_t
12285 ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12286 {
12287 uint32_t cnt;
12288 caddr_t bp = bufp;
12289 ql_24xx_fw_dump_t *fw = ha->ql_dump_ptr;
12290
12291 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12292
12293 (void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
12294 ha->fw_major_version, ha->fw_minor_version,
12295 ha->fw_subminor_version, ha->fw_attributes);
12296 bp += strlen(bp);
12297
12298 (void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
12299
12300 (void) strcat(bp, "\nHost Interface Registers");
12301 bp += strlen(bp);
12302 for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12303 if (cnt % 8 == 0) {
12304 (void) sprintf(bp++, "\n");
12305 }
12306
12307 (void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12308 bp += 9;
12309 }
12310
12311 (void) sprintf(bp, "\n\nMailbox Registers");
12312 bp += strlen(bp);
12313 for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12314 if (cnt % 16 == 0) {
12315 (void) sprintf(bp++, "\n");
12316 }
12317
12318 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12319 bp += 5;
12320 }
12321
12322 (void) sprintf(bp, "\n\nXSEQ GP Registers");
12323 bp += strlen(bp);
12324 for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12325 if (cnt % 8 == 0) {
12326 (void) sprintf(bp++, "\n");
12327 }
12328
12329 (void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12330 bp += 9;
12331 }
12332
12333 (void) sprintf(bp, "\n\nXSEQ-0 Registers");
12334 bp += strlen(bp);
12335 for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12336 if (cnt % 8 == 0) {
12337 (void) sprintf(bp++, "\n");
12338 }
12339
12340 (void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12341 bp += 9;
12342 }
12343
12344 (void) sprintf(bp, "\n\nXSEQ-1 Registers");
12345 bp += strlen(bp);
12346 for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12347 if (cnt % 8 == 0) {
12348 (void) sprintf(bp++, "\n");
12349 }
12350
12351 (void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12352 bp += 9;
12353 }
12354
12355 (void) sprintf(bp, "\n\nRSEQ GP Registers");
12356 bp += strlen(bp);
12357 for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12358 if (cnt % 8 == 0) {
12359 (void) sprintf(bp++, "\n");
12360 }
12361
12362 (void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12363 bp += 9;
12364 }
12365
12366 (void) sprintf(bp, "\n\nRSEQ-0 Registers");
12367 bp += strlen(bp);
12368 for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12369 if (cnt % 8 == 0) {
12370 (void) sprintf(bp++, "\n");
12371 }
12372
12373 (void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12374 bp += 9;
12375 }
12376
12377 (void) sprintf(bp, "\n\nRSEQ-1 Registers");
12378 bp += strlen(bp);
12379 for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12380 if (cnt % 8 == 0) {
12381 (void) sprintf(bp++, "\n");
12382 }
12383
12384 (void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12385 bp += 9;
12386 }
12387
12388 (void) sprintf(bp, "\n\nRSEQ-2 Registers");
12389 bp += strlen(bp);
12390 for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12391 if (cnt % 8 == 0) {
12392 (void) sprintf(bp++, "\n");
12393 }
12394
12395 (void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12396 bp += 9;
12397 }
12398
12399 (void) sprintf(bp, "\n\nCommand DMA Registers");
12400 bp += strlen(bp);
12401 for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12402 if (cnt % 8 == 0) {
12403 (void) sprintf(bp++, "\n");
12404 }
12405
12406 (void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12407 bp += 9;
12408 }
12409
12410 (void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12411 bp += strlen(bp);
12412 for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12413 if (cnt % 8 == 0) {
12414 (void) sprintf(bp++, "\n");
12415 }
12416
12417 (void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12418 bp += 9;
12419 }
12420
12421 (void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12422 bp += strlen(bp);
12423 for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12424 if (cnt % 8 == 0) {
12425 (void) sprintf(bp++, "\n");
12426 }
12427
12428 (void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12429 bp += 9;
12430 }
12431
12432 (void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12433 bp += strlen(bp);
12434 for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12435 if (cnt % 8 == 0) {
12436 (void) sprintf(bp++, "\n");
12437 }
12438
12439 (void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12440 bp += 9;
12441 }
12442
12443 (void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12444 bp += strlen(bp);
12445 for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12446 if (cnt % 8 == 0) {
12447 (void) sprintf(bp++, "\n");
12448 }
12449
12450 (void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12451 bp += 9;
12452 }
12453
12454 (void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12455 bp += strlen(bp);
12456 for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12457 if (cnt % 8 == 0) {
12458 (void) sprintf(bp++, "\n");
12459 }
12460
12461 (void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12462 bp += 9;
12463 }
12464
12465 (void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12466 bp += strlen(bp);
12467 for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12468 if (cnt % 8 == 0) {
12469 (void) sprintf(bp++, "\n");
12470 }
12471
12472 (void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12473 bp += 9;
12474 }
12475
12476 (void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12477 bp += strlen(bp);
12478 for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12479 if (cnt % 8 == 0) {
12480 (void) sprintf(bp++, "\n");
12481 }
12482
12483 (void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12484 bp += 9;
12485 }
12486
12487 (void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12488 bp += strlen(bp);
12489 for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12490 if (cnt % 8 == 0) {
12491 (void) sprintf(bp++, "\n");
12492 }
12493
12494 (void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12495 bp += 9;
12496 }
12497
12498 (void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12499 bp += strlen(bp);
12500 for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12501 if (cnt % 8 == 0) {
12502 (void) sprintf(bp++, "\n");
12503 }
12504
12505 (void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12506 bp += 9;
12507 }
12508
12509 (void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12510 bp += strlen(bp);
12511 for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12512 if (cnt % 8 == 0) {
12513 (void) sprintf(bp++, "\n");
12514 }
12515
12516 (void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12517 bp += 9;
12518 }
12519
12520 (void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12521 bp += strlen(bp);
12522 for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12523 if (cnt % 8 == 0) {
12524 (void) sprintf(bp++, "\n");
12525 }
12526
12527 (void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12528 bp += 9;
12529 }
12530
12531 (void) sprintf(bp, "\n\nRISC GP Registers");
12532 bp += strlen(bp);
12533 for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12534 if (cnt % 8 == 0) {
12535 (void) sprintf(bp++, "\n");
12536 }
12537
12538 (void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12539 bp += 9;
12540 }
12541
12542 (void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12543 bp += strlen(bp);
12544 for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12545 if (cnt % 8 == 0) {
12546 (void) sprintf(bp++, "\n");
12547 }
12548
12549 (void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12550 bp += 9;
12551 }
12552
12553 (void) sprintf(bp, "\n\nLMC Registers");
12554 bp += strlen(bp);
12555 for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12556 if (cnt % 8 == 0) {
12557 (void) sprintf(bp++, "\n");
12558 }
12559
12560 (void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12561 bp += 9;
12562 }
12563
12564 (void) sprintf(bp, "\n\nFPM Hardware Registers");
12565 bp += strlen(bp);
12566 for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12567 if (cnt % 8 == 0) {
12568 (void) sprintf(bp++, "\n");
12569 }
12570
12571 (void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12572 bp += 9;
12573 }
12574
12575 (void) sprintf(bp, "\n\nFB Hardware Registers");
12576 bp += strlen(bp);
12577 for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12578 if (cnt % 8 == 0) {
12579 (void) sprintf(bp++, "\n");
12580 }
12581
12582 (void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12583 bp += 9;
12584 }
12585
12586 (void) sprintf(bp, "\n\nCode RAM");
12587 bp += strlen(bp);
12588 for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12589 if (cnt % 8 == 0) {
12590 (void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12591 bp += 11;
12592 }
12593
12594 (void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12595 bp += 9;
12596 }
12597
12598 (void) sprintf(bp, "\n\nExternal Memory");
12599 bp += strlen(bp);
12600 for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12601 if (cnt % 8 == 0) {
12602 (void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12603 bp += 11;
12604 }
12605 (void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12606 bp += 9;
12607 }
12608
12609 (void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12610 bp += strlen(bp);
12611
12612 (void) sprintf(bp, "\n\nRequest Queue");
12613 bp += strlen(bp);
12614 for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12615 if (cnt % 8 == 0) {
12616 (void) sprintf(bp, "\n%08x: ", cnt);
12617 bp += strlen(bp);
12618 }
12619 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12620 bp += strlen(bp);
12621 }
12622
12623 (void) sprintf(bp, "\n\nResponse Queue");
12624 bp += strlen(bp);
12625 for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12626 if (cnt % 8 == 0) {
12627 (void) sprintf(bp, "\n%08x: ", cnt);
12628 bp += strlen(bp);
12629 }
12630 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12631 bp += strlen(bp);
12632 }
12633
12634 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12635 (ha->fwexttracebuf.bp != NULL)) {
12636 uint32_t cnt_b = 0;
12637 uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12638
12639 (void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12640 bp += strlen(bp);
12641 /* show data address as a byte address, data as long words */
12642 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12643 cnt_b = cnt * 4;
12644 if (cnt_b % 32 == 0) {
12645 (void) sprintf(bp, "\n%08x: ",
12646 (int)(w64 + cnt_b));
12647 bp += 11;
12648 }
12649 (void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12650 bp += 9;
12651 }
12652 }
12653
12654 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12655 (ha->fwfcetracebuf.bp != NULL)) {
12656 uint32_t cnt_b = 0;
12657 uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12658
12659 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12660 bp += strlen(bp);
12661 /* show data address as a byte address, data as long words */
12662 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12663 cnt_b = cnt * 4;
12664 if (cnt_b % 32 == 0) {
12665 (void) sprintf(bp, "\n%08x: ",
12666 (int)(w64 + cnt_b));
12667 bp += 11;
12668 }
12669 (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12670 bp += 9;
12671 }
12672 }
12673
12674 (void) sprintf(bp, "\n\n");
12675 bp += strlen(bp);
12676
12677 cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12678
12679 QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12680
12681 return (cnt);
12682 }
12683
12684 /*
12685 * ql_2581_ascii_fw_dump
12686 * Converts ISP25xx or ISP81xx firmware binary dump to ascii.
12687 *
12688 * Input:
12689 * ha = adapter state pointer.
12690 * bptr = buffer pointer.
12691 *
12692 * Returns:
12693 * Amount of data buffer used.
12694 *
12695 * Context:
12696 * Kernel context.
12697 */
12698 static size_t
12699 ql_2581_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12700 {
12701 uint32_t cnt;
12702 uint32_t cnt1;
12703 caddr_t bp = bufp;
12704 ql_25xx_fw_dump_t *fw = ha->ql_dump_ptr;
12705
12706 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12707
12708 (void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
12709 ha->fw_major_version, ha->fw_minor_version,
12710 ha->fw_subminor_version, ha->fw_attributes);
12711 bp += strlen(bp);
12712
12713 (void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
12714 bp += strlen(bp);
12715
12716 (void) sprintf(bp, "\nHostRisc Registers");
12717 bp += strlen(bp);
12718 for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
12719 if (cnt % 8 == 0) {
12720 (void) sprintf(bp++, "\n");
12721 }
12722 (void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
12723 bp += 9;
12724 }
12725
12726 (void) sprintf(bp, "\n\nPCIe Registers");
12727 bp += strlen(bp);
12728 for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
12729 if (cnt % 8 == 0) {
12730 (void) sprintf(bp++, "\n");
12731 }
12732 (void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
12733 bp += 9;
12734 }
12735
12736 (void) strcat(bp, "\n\nHost Interface Registers");
12737 bp += strlen(bp);
12738 for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12739 if (cnt % 8 == 0) {
12740 (void) sprintf(bp++, "\n");
12741 }
12742 (void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12743 bp += 9;
12744 }
12745
12746 (void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12747 bp += strlen(bp);
12748 for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12749 if (cnt % 8 == 0) {
12750 (void) sprintf(bp++, "\n");
12751 }
12752 (void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12753 bp += 9;
12754 }
12755
12756 (void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
12757 fw->risc_io);
12758 bp += strlen(bp);
12759
12760 (void) sprintf(bp, "\n\nMailbox Registers");
12761 bp += strlen(bp);
12762 for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12763 if (cnt % 16 == 0) {
12764 (void) sprintf(bp++, "\n");
12765 }
12766 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12767 bp += 5;
12768 }
12769
12770 (void) sprintf(bp, "\n\nXSEQ GP Registers");
12771 bp += strlen(bp);
12772 for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12773 if (cnt % 8 == 0) {
12774 (void) sprintf(bp++, "\n");
12775 }
12776 (void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12777 bp += 9;
12778 }
12779
12780 (void) sprintf(bp, "\n\nXSEQ-0 Registers");
12781 bp += strlen(bp);
12782 for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12783 if (cnt % 8 == 0) {
12784 (void) sprintf(bp++, "\n");
12785 }
12786 (void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12787 bp += 9;
12788 }
12789
12790 (void) sprintf(bp, "\n\nXSEQ-1 Registers");
12791 bp += strlen(bp);
12792 for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12793 if (cnt % 8 == 0) {
12794 (void) sprintf(bp++, "\n");
12795 }
12796 (void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12797 bp += 9;
12798 }
12799
12800 (void) sprintf(bp, "\n\nRSEQ GP Registers");
12801 bp += strlen(bp);
12802 for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12803 if (cnt % 8 == 0) {
12804 (void) sprintf(bp++, "\n");
12805 }
12806 (void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12807 bp += 9;
12808 }
12809
12810 (void) sprintf(bp, "\n\nRSEQ-0 Registers");
12811 bp += strlen(bp);
12812 for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12813 if (cnt % 8 == 0) {
12814 (void) sprintf(bp++, "\n");
12815 }
12816 (void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12817 bp += 9;
12818 }
12819
12820 (void) sprintf(bp, "\n\nRSEQ-1 Registers");
12821 bp += strlen(bp);
12822 for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12823 if (cnt % 8 == 0) {
12824 (void) sprintf(bp++, "\n");
12825 }
12826 (void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12827 bp += 9;
12828 }
12829
12830 (void) sprintf(bp, "\n\nRSEQ-2 Registers");
12831 bp += strlen(bp);
12832 for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12833 if (cnt % 8 == 0) {
12834 (void) sprintf(bp++, "\n");
12835 }
12836 (void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12837 bp += 9;
12838 }
12839
12840 (void) sprintf(bp, "\n\nASEQ GP Registers");
12841 bp += strlen(bp);
12842 for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
12843 if (cnt % 8 == 0) {
12844 (void) sprintf(bp++, "\n");
12845 }
12846 (void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
12847 bp += 9;
12848 }
12849
12850 (void) sprintf(bp, "\n\nASEQ-0 Registers");
12851 bp += strlen(bp);
12852 for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
12853 if (cnt % 8 == 0) {
12854 (void) sprintf(bp++, "\n");
12855 }
12856 (void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
12857 bp += 9;
12858 }
12859
12860 (void) sprintf(bp, "\n\nASEQ-1 Registers");
12861 bp += strlen(bp);
12862 for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
12863 if (cnt % 8 == 0) {
12864 (void) sprintf(bp++, "\n");
12865 }
12866 (void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
12867 bp += 9;
12868 }
12869
12870 (void) sprintf(bp, "\n\nASEQ-2 Registers");
12871 bp += strlen(bp);
12872 for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
12873 if (cnt % 8 == 0) {
12874 (void) sprintf(bp++, "\n");
12875 }
12876 (void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
12877 bp += 9;
12878 }
12879
12880 (void) sprintf(bp, "\n\nCommand DMA Registers");
12881 bp += strlen(bp);
12882 for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12883 if (cnt % 8 == 0) {
12884 (void) sprintf(bp++, "\n");
12885 }
12886 (void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12887 bp += 9;
12888 }
12889
12890 (void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12891 bp += strlen(bp);
12892 for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12893 if (cnt % 8 == 0) {
12894 (void) sprintf(bp++, "\n");
12895 }
12896 (void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12897 bp += 9;
12898 }
12899
12900 (void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12901 bp += strlen(bp);
12902 for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12903 if (cnt % 8 == 0) {
12904 (void) sprintf(bp++, "\n");
12905 }
12906 (void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12907 bp += 9;
12908 }
12909
12910 (void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12911 bp += strlen(bp);
12912 for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12913 if (cnt % 8 == 0) {
12914 (void) sprintf(bp++, "\n");
12915 }
12916 (void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12917 bp += 9;
12918 }
12919
12920 (void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12921 bp += strlen(bp);
12922 for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12923 if (cnt % 8 == 0) {
12924 (void) sprintf(bp++, "\n");
12925 }
12926 (void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12927 bp += 9;
12928 }
12929
12930 (void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12931 bp += strlen(bp);
12932 for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12933 if (cnt % 8 == 0) {
12934 (void) sprintf(bp++, "\n");
12935 }
12936 (void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12937 bp += 9;
12938 }
12939
12940 (void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12941 bp += strlen(bp);
12942 for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12943 if (cnt % 8 == 0) {
12944 (void) sprintf(bp++, "\n");
12945 }
12946 (void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12947 bp += 9;
12948 }
12949
12950 (void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12951 bp += strlen(bp);
12952 for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12953 if (cnt % 8 == 0) {
12954 (void) sprintf(bp++, "\n");
12955 }
12956 (void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12957 bp += 9;
12958 }
12959
12960 (void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12961 bp += strlen(bp);
12962 for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12963 if (cnt % 8 == 0) {
12964 (void) sprintf(bp++, "\n");
12965 }
12966 (void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12967 bp += 9;
12968 }
12969
12970 (void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12971 bp += strlen(bp);
12972 for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12973 if (cnt % 8 == 0) {
12974 (void) sprintf(bp++, "\n");
12975 }
12976 (void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12977 bp += 9;
12978 }
12979
12980 (void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12981 bp += strlen(bp);
12982 for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12983 if (cnt % 8 == 0) {
12984 (void) sprintf(bp++, "\n");
12985 }
12986 (void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12987 bp += 9;
12988 }
12989
12990 (void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12991 bp += strlen(bp);
12992 for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12993 if (cnt % 8 == 0) {
12994 (void) sprintf(bp++, "\n");
12995 }
12996 (void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12997 bp += 9;
12998 }
12999
13000 (void) sprintf(bp, "\n\nRISC GP Registers");
13001 bp += strlen(bp);
13002 for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
13003 if (cnt % 8 == 0) {
13004 (void) sprintf(bp++, "\n");
13005 }
13006 (void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
13007 bp += 9;
13008 }
13009
13010 (void) sprintf(bp, "\n\nLMC Registers");
13011 bp += strlen(bp);
13012 for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
13013 if (cnt % 8 == 0) {
13014 (void) sprintf(bp++, "\n");
13015 }
13016 (void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
13017 bp += 9;
13018 }
13019
13020 (void) sprintf(bp, "\n\nFPM Hardware Registers");
13021 bp += strlen(bp);
13022 cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
13023 (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fpm_hdw_reg)) :
13024 (uint32_t)(sizeof (fw->fpm_hdw_reg));
13025 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13026 if (cnt % 8 == 0) {
13027 (void) sprintf(bp++, "\n");
13028 }
13029 (void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
13030 bp += 9;
13031 }
13032
13033 (void) sprintf(bp, "\n\nFB Hardware Registers");
13034 bp += strlen(bp);
13035 cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
13036 (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fb_hdw_reg)) :
13037 (uint32_t)(sizeof (fw->fb_hdw_reg));
13038 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13039 if (cnt % 8 == 0) {
13040 (void) sprintf(bp++, "\n");
13041 }
13042 (void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
13043 bp += 9;
13044 }
13045
13046 (void) sprintf(bp, "\n\nCode RAM");
13047 bp += strlen(bp);
13048 for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
13049 if (cnt % 8 == 0) {
13050 (void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
13051 bp += 11;
13052 }
13053 (void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
13054 bp += 9;
13055 }
13056
13057 (void) sprintf(bp, "\n\nExternal Memory");
13058 bp += strlen(bp);
13059 for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
13060 if (cnt % 8 == 0) {
13061 (void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
13062 bp += 11;
13063 }
13064 (void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
13065 bp += 9;
13066 }
13067
13068 (void) sprintf(bp, "\n[<==END] ISP Debug Dump");
13069 bp += strlen(bp);
13070
13071 (void) sprintf(bp, "\n\nRequest Queue");
13072 bp += strlen(bp);
13073 for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
13074 if (cnt % 8 == 0) {
13075 (void) sprintf(bp, "\n%08x: ", cnt);
13076 bp += strlen(bp);
13077 }
13078 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
13079 bp += strlen(bp);
13080 }
13081
13082 (void) sprintf(bp, "\n\nResponse Queue");
13083 bp += strlen(bp);
13084 for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
13085 if (cnt % 8 == 0) {
13086 (void) sprintf(bp, "\n%08x: ", cnt);
13087 bp += strlen(bp);
13088 }
13089 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
13090 bp += strlen(bp);
13091 }
13092
13093 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13094 (ha->fwexttracebuf.bp != NULL)) {
13095 uint32_t cnt_b = 0;
13096 uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
13097
13098 (void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
13099 bp += strlen(bp);
13100 /* show data address as a byte address, data as long words */
13101 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13102 cnt_b = cnt * 4;
13103 if (cnt_b % 32 == 0) {
13104 (void) sprintf(bp, "\n%08x: ",
13105 (int)(w64 + cnt_b));
13106 bp += 11;
13107 }
13108 (void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
13109 bp += 9;
13110 }
13111 }
13112
13113 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13114 (ha->fwfcetracebuf.bp != NULL)) {
13115 uint32_t cnt_b = 0;
13116 uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
13117
13118 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
13119 bp += strlen(bp);
13120 /* show data address as a byte address, data as long words */
13121 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13122 cnt_b = cnt * 4;
13123 if (cnt_b % 32 == 0) {
13124 (void) sprintf(bp, "\n%08x: ",
13125 (int)(w64 + cnt_b));
13126 bp += 11;
13127 }
13128 (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
13129 bp += 9;
13130 }
13131 }
13132
13133 (void) sprintf(bp, "\n\n");
13134 bp += strlen(bp);
13135
13136 cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
13137
13138 QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
13139
13140 return (cnt);
13141 }
13142
13143 /*
13144 * ql_2200_binary_fw_dump
13145 *
13146 * Input:
13147 * ha: adapter state pointer.
13148 * fw: firmware dump context pointer.
13149 *
13150 * Returns:
13151 * ql local function return status code.
13152 *
13153 * Context:
13154 * Interrupt or Kernel context, no mailbox commands allowed.
13155 */
13156 static int
13157 ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13158 {
13159 uint32_t cnt;
13160 uint16_t risc_address;
13161 clock_t timer;
13162 mbx_cmd_t mc;
13163 mbx_cmd_t *mcp = &mc;
13164 int rval = QL_SUCCESS;
13165
13166 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13167
13168 /* Disable ISP interrupts. */
13169 WRT16_IO_REG(ha, ictrl, 0);
13170 ADAPTER_STATE_LOCK(ha);
13171 ha->flags &= ~INTERRUPTS_ENABLED;
13172 ADAPTER_STATE_UNLOCK(ha);
13173
13174 /* Release mailbox registers. */
13175 WRT16_IO_REG(ha, semaphore, 0);
13176
13177 /* Pause RISC. */
13178 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13179 timer = 30000;
13180 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13181 if (timer-- != 0) {
13182 drv_usecwait(MILLISEC);
13183 } else {
13184 rval = QL_FUNCTION_TIMEOUT;
13185 break;
13186 }
13187 }
13188
13189 if (rval == QL_SUCCESS) {
13190 (void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13191 sizeof (fw->pbiu_reg) / 2, 16);
13192
13193 /* In 2200 we only read 8 mailboxes */
13194 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x10,
13195 8, 16);
13196
13197 (void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x20,
13198 sizeof (fw->dma_reg) / 2, 16);
13199
13200 WRT16_IO_REG(ha, ctrl_status, 0);
13201 (void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13202 sizeof (fw->risc_hdw_reg) / 2, 16);
13203
13204 WRT16_IO_REG(ha, pcr, 0x2000);
13205 (void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13206 sizeof (fw->risc_gp0_reg) / 2, 16);
13207
13208 WRT16_IO_REG(ha, pcr, 0x2100);
13209 (void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13210 sizeof (fw->risc_gp1_reg) / 2, 16);
13211
13212 WRT16_IO_REG(ha, pcr, 0x2200);
13213 (void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13214 sizeof (fw->risc_gp2_reg) / 2, 16);
13215
13216 WRT16_IO_REG(ha, pcr, 0x2300);
13217 (void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13218 sizeof (fw->risc_gp3_reg) / 2, 16);
13219
13220 WRT16_IO_REG(ha, pcr, 0x2400);
13221 (void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13222 sizeof (fw->risc_gp4_reg) / 2, 16);
13223
13224 WRT16_IO_REG(ha, pcr, 0x2500);
13225 (void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13226 sizeof (fw->risc_gp5_reg) / 2, 16);
13227
13228 WRT16_IO_REG(ha, pcr, 0x2600);
13229 (void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13230 sizeof (fw->risc_gp6_reg) / 2, 16);
13231
13232 WRT16_IO_REG(ha, pcr, 0x2700);
13233 (void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13234 sizeof (fw->risc_gp7_reg) / 2, 16);
13235
13236 WRT16_IO_REG(ha, ctrl_status, 0x10);
13237 /* 2200 has only 16 registers */
13238 (void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13239 ha->iobase + 0x80, 16, 16);
13240
13241 WRT16_IO_REG(ha, ctrl_status, 0x20);
13242 (void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13243 sizeof (fw->fpm_b0_reg) / 2, 16);
13244
13245 WRT16_IO_REG(ha, ctrl_status, 0x30);
13246 (void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13247 sizeof (fw->fpm_b1_reg) / 2, 16);
13248
13249 /* Select FPM registers. */
13250 WRT16_IO_REG(ha, ctrl_status, 0x20);
13251
13252 /* FPM Soft Reset. */
13253 WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13254
13255 /* Select frame buffer registers. */
13256 WRT16_IO_REG(ha, ctrl_status, 0x10);
13257
13258 /* Reset frame buffer FIFOs. */
13259 WRT16_IO_REG(ha, fb_cmd, 0xa000);
13260
13261 /* Select RISC module registers. */
13262 WRT16_IO_REG(ha, ctrl_status, 0);
13263
13264 /* Reset RISC module. */
13265 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13266
13267 /* Reset ISP semaphore. */
13268 WRT16_IO_REG(ha, semaphore, 0);
13269
13270 /* Release RISC module. */
13271 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13272
13273 /* Wait for RISC to recover from reset. */
13274 timer = 30000;
13275 while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_BUSY) {
13276 if (timer-- != 0) {
13277 drv_usecwait(MILLISEC);
13278 } else {
13279 rval = QL_FUNCTION_TIMEOUT;
13280 break;
13281 }
13282 }
13283
13284 /* Disable RISC pause on FPM parity error. */
13285 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13286 }
13287
13288 if (rval == QL_SUCCESS) {
13289 /* Pause RISC. */
13290 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13291 timer = 30000;
13292 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13293 if (timer-- != 0) {
13294 drv_usecwait(MILLISEC);
13295 } else {
13296 rval = QL_FUNCTION_TIMEOUT;
13297 break;
13298 }
13299 }
13300 }
13301
13302 if (rval == QL_SUCCESS) {
13303 /* Set memory configuration and timing. */
13304 WRT16_IO_REG(ha, mctr, 0xf2);
13305
13306 /* Release RISC. */
13307 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13308
13309 /* Get RISC SRAM. */
13310 risc_address = 0x1000;
13311 WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_WORD);
13312 for (cnt = 0; cnt < 0xf000; cnt++) {
13313 WRT16_IO_REG(ha, mailbox_in[1], risc_address++);
13314 WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
13315 for (timer = 6000000; timer != 0; timer--) {
13316 /* Check for pending interrupts. */
13317 if (INTERRUPT_PENDING(ha)) {
13318 if (RD16_IO_REG(ha, semaphore) &
13319 BIT_0) {
13320 WRT16_IO_REG(ha, hccr,
13321 HC_CLR_RISC_INT);
13322 mcp->mb[0] = RD16_IO_REG(ha,
13323 mailbox_out[0]);
13324 fw->risc_ram[cnt] =
13325 RD16_IO_REG(ha,
13326 mailbox_out[2]);
13327 WRT16_IO_REG(ha,
13328 semaphore, 0);
13329 break;
13330 }
13331 WRT16_IO_REG(ha, hccr,
13332 HC_CLR_RISC_INT);
13333 }
13334 drv_usecwait(5);
13335 }
13336
13337 if (timer == 0) {
13338 rval = QL_FUNCTION_TIMEOUT;
13339 } else {
13340 rval = mcp->mb[0];
13341 }
13342
13343 if (rval != QL_SUCCESS) {
13344 break;
13345 }
13346 }
13347 }
13348
13349 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13350
13351 return (rval);
13352 }
13353
13354 /*
13355 * ql_2300_binary_fw_dump
13356 *
13357 * Input:
13358 * ha: adapter state pointer.
13359 * fw: firmware dump context pointer.
13360 *
13361 * Returns:
13362 * ql local function return status code.
13363 *
13364 * Context:
13365 * Interrupt or Kernel context, no mailbox commands allowed.
13366 */
13367 static int
13368 ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13369 {
13370 clock_t timer;
13371 int rval = QL_SUCCESS;
13372
13373 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13374
13375 /* Disable ISP interrupts. */
13376 WRT16_IO_REG(ha, ictrl, 0);
13377 ADAPTER_STATE_LOCK(ha);
13378 ha->flags &= ~INTERRUPTS_ENABLED;
13379 ADAPTER_STATE_UNLOCK(ha);
13380
13381 /* Release mailbox registers. */
13382 WRT16_IO_REG(ha, semaphore, 0);
13383
13384 /* Pause RISC. */
13385 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13386 timer = 30000;
13387 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13388 if (timer-- != 0) {
13389 drv_usecwait(MILLISEC);
13390 } else {
13391 rval = QL_FUNCTION_TIMEOUT;
13392 break;
13393 }
13394 }
13395
13396 if (rval == QL_SUCCESS) {
13397 (void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13398 sizeof (fw->pbiu_reg) / 2, 16);
13399
13400 (void) ql_read_regs(ha, fw->risc_host_reg, ha->iobase + 0x10,
13401 sizeof (fw->risc_host_reg) / 2, 16);
13402
13403 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x40,
13404 sizeof (fw->mailbox_reg) / 2, 16);
13405
13406 WRT16_IO_REG(ha, ctrl_status, 0x40);
13407 (void) ql_read_regs(ha, fw->resp_dma_reg, ha->iobase + 0x80,
13408 sizeof (fw->resp_dma_reg) / 2, 16);
13409
13410 WRT16_IO_REG(ha, ctrl_status, 0x50);
13411 (void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x80,
13412 sizeof (fw->dma_reg) / 2, 16);
13413
13414 WRT16_IO_REG(ha, ctrl_status, 0);
13415 (void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13416 sizeof (fw->risc_hdw_reg) / 2, 16);
13417
13418 WRT16_IO_REG(ha, pcr, 0x2000);
13419 (void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13420 sizeof (fw->risc_gp0_reg) / 2, 16);
13421
13422 WRT16_IO_REG(ha, pcr, 0x2200);
13423 (void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13424 sizeof (fw->risc_gp1_reg) / 2, 16);
13425
13426 WRT16_IO_REG(ha, pcr, 0x2400);
13427 (void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13428 sizeof (fw->risc_gp2_reg) / 2, 16);
13429
13430 WRT16_IO_REG(ha, pcr, 0x2600);
13431 (void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13432 sizeof (fw->risc_gp3_reg) / 2, 16);
13433
13434 WRT16_IO_REG(ha, pcr, 0x2800);
13435 (void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13436 sizeof (fw->risc_gp4_reg) / 2, 16);
13437
13438 WRT16_IO_REG(ha, pcr, 0x2A00);
13439 (void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13440 sizeof (fw->risc_gp5_reg) / 2, 16);
13441
13442 WRT16_IO_REG(ha, pcr, 0x2C00);
13443 (void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13444 sizeof (fw->risc_gp6_reg) / 2, 16);
13445
13446 WRT16_IO_REG(ha, pcr, 0x2E00);
13447 (void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13448 sizeof (fw->risc_gp7_reg) / 2, 16);
13449
13450 WRT16_IO_REG(ha, ctrl_status, 0x10);
13451 (void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13452 ha->iobase + 0x80, sizeof (fw->frame_buf_hdw_reg) / 2, 16);
13453
13454 WRT16_IO_REG(ha, ctrl_status, 0x20);
13455 (void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13456 sizeof (fw->fpm_b0_reg) / 2, 16);
13457
13458 WRT16_IO_REG(ha, ctrl_status, 0x30);
13459 (void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13460 sizeof (fw->fpm_b1_reg) / 2, 16);
13461
13462 /* Select FPM registers. */
13463 WRT16_IO_REG(ha, ctrl_status, 0x20);
13464
13465 /* FPM Soft Reset. */
13466 WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13467
13468 /* Select frame buffer registers. */
13469 WRT16_IO_REG(ha, ctrl_status, 0x10);
13470
13471 /* Reset frame buffer FIFOs. */
13472 WRT16_IO_REG(ha, fb_cmd, 0xa000);
13473
13474 /* Select RISC module registers. */
13475 WRT16_IO_REG(ha, ctrl_status, 0);
13476
13477 /* Reset RISC module. */
13478 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13479
13480 /* Reset ISP semaphore. */
13481 WRT16_IO_REG(ha, semaphore, 0);
13482
13483 /* Release RISC module. */
13484 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13485
13486 /* Wait for RISC to recover from reset. */
13487 timer = 30000;
13488 while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_BUSY) {
13489 if (timer-- != 0) {
13490 drv_usecwait(MILLISEC);
13491 } else {
13492 rval = QL_FUNCTION_TIMEOUT;
13493 break;
13494 }
13495 }
13496
13497 /* Disable RISC pause on FPM parity error. */
13498 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13499 }
13500
13501 /* Get RISC SRAM. */
13502 if (rval == QL_SUCCESS) {
13503 rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
13504 }
13505 /* Get STACK SRAM. */
13506 if (rval == QL_SUCCESS) {
13507 rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
13508 }
13509 /* Get DATA SRAM. */
13510 if (rval == QL_SUCCESS) {
13511 rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
13512 }
13513
13514 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13515
13516 return (rval);
13517 }
13518
13519 /*
13520 * ql_24xx_binary_fw_dump
13521 *
13522 * Input:
13523 * ha: adapter state pointer.
13524 * fw: firmware dump context pointer.
13525 *
13526 * Returns:
13527 * ql local function return status code.
13528 *
13529 * Context:
13530 * Interrupt or Kernel context, no mailbox commands allowed.
13531 */
13532 static int
13533 ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
13534 {
13535 uint32_t *reg32;
13536 void *bp;
13537 clock_t timer;
13538 int rval = QL_SUCCESS;
13539
13540 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13541
13542 fw->hccr = RD32_IO_REG(ha, hccr);
13543
13544 /* Pause RISC. */
13545 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
13546 /* Disable ISP interrupts. */
13547 WRT16_IO_REG(ha, ictrl, 0);
13548
13549 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13550 for (timer = 30000;
13551 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
13552 rval == QL_SUCCESS; timer--) {
13553 if (timer) {
13554 drv_usecwait(100);
13555 } else {
13556 rval = QL_FUNCTION_TIMEOUT;
13557 }
13558 }
13559 }
13560
13561 if (rval == QL_SUCCESS) {
13562 /* Host interface registers. */
13563 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13564 sizeof (fw->host_reg) / 4, 32);
13565
13566 /* Disable ISP interrupts. */
13567 WRT32_IO_REG(ha, ictrl, 0);
13568 RD32_IO_REG(ha, ictrl);
13569 ADAPTER_STATE_LOCK(ha);
13570 ha->flags &= ~INTERRUPTS_ENABLED;
13571 ADAPTER_STATE_UNLOCK(ha);
13572
13573 /* Shadow registers. */
13574
13575 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13576 RD32_IO_REG(ha, io_base_addr);
13577
13578 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13579 WRT_REG_DWORD(ha, reg32, 0xB0000000);
13580 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13581 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13582
13583 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13584 WRT_REG_DWORD(ha, reg32, 0xB0100000);
13585 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13586 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13587
13588 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13589 WRT_REG_DWORD(ha, reg32, 0xB0200000);
13590 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13591 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13592
13593 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13594 WRT_REG_DWORD(ha, reg32, 0xB0300000);
13595 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13596 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13597
13598 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13599 WRT_REG_DWORD(ha, reg32, 0xB0400000);
13600 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13601 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13602
13603 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13604 WRT_REG_DWORD(ha, reg32, 0xB0500000);
13605 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13606 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13607
13608 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13609 WRT_REG_DWORD(ha, reg32, 0xB0600000);
13610 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13611 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13612
13613 /* Mailbox registers. */
13614 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13615 sizeof (fw->mailbox_reg) / 2, 16);
13616
13617 /* Transfer sequence registers. */
13618
13619 /* XSEQ GP */
13620 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13621 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13622 16, 32);
13623 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13624 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13625 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13626 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13627 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13628 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13629 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13630 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13631 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13632 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13633 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13634 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13635 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13636 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13637
13638 /* XSEQ-0 */
13639 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13640 (void) ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13641 sizeof (fw->xseq_0_reg) / 4, 32);
13642
13643 /* XSEQ-1 */
13644 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13645 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13646 sizeof (fw->xseq_1_reg) / 4, 32);
13647
13648 /* Receive sequence registers. */
13649
13650 /* RSEQ GP */
13651 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13652 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13653 16, 32);
13654 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13655 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13656 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13657 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13658 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13659 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13660 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13661 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13662 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13663 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13664 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13665 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13666 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13667 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13668
13669 /* RSEQ-0 */
13670 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13671 (void) ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13672 sizeof (fw->rseq_0_reg) / 4, 32);
13673
13674 /* RSEQ-1 */
13675 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13676 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13677 sizeof (fw->rseq_1_reg) / 4, 32);
13678
13679 /* RSEQ-2 */
13680 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13681 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13682 sizeof (fw->rseq_2_reg) / 4, 32);
13683
13684 /* Command DMA registers. */
13685
13686 WRT32_IO_REG(ha, io_base_addr, 0x7100);
13687 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13688 sizeof (fw->cmd_dma_reg) / 4, 32);
13689
13690 /* Queues. */
13691
13692 /* RequestQ0 */
13693 WRT32_IO_REG(ha, io_base_addr, 0x7200);
13694 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13695 8, 32);
13696 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13697
13698 /* ResponseQ0 */
13699 WRT32_IO_REG(ha, io_base_addr, 0x7300);
13700 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13701 8, 32);
13702 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13703
13704 /* RequestQ1 */
13705 WRT32_IO_REG(ha, io_base_addr, 0x7400);
13706 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13707 8, 32);
13708 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13709
13710 /* Transmit DMA registers. */
13711
13712 /* XMT0 */
13713 WRT32_IO_REG(ha, io_base_addr, 0x7600);
13714 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13715 16, 32);
13716 WRT32_IO_REG(ha, io_base_addr, 0x7610);
13717 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13718
13719 /* XMT1 */
13720 WRT32_IO_REG(ha, io_base_addr, 0x7620);
13721 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13722 16, 32);
13723 WRT32_IO_REG(ha, io_base_addr, 0x7630);
13724 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13725
13726 /* XMT2 */
13727 WRT32_IO_REG(ha, io_base_addr, 0x7640);
13728 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13729 16, 32);
13730 WRT32_IO_REG(ha, io_base_addr, 0x7650);
13731 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13732
13733 /* XMT3 */
13734 WRT32_IO_REG(ha, io_base_addr, 0x7660);
13735 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13736 16, 32);
13737 WRT32_IO_REG(ha, io_base_addr, 0x7670);
13738 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13739
13740 /* XMT4 */
13741 WRT32_IO_REG(ha, io_base_addr, 0x7680);
13742 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13743 16, 32);
13744 WRT32_IO_REG(ha, io_base_addr, 0x7690);
13745 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13746
13747 /* XMT Common */
13748 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
13749 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
13750 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13751
13752 /* Receive DMA registers. */
13753
13754 /* RCVThread0 */
13755 WRT32_IO_REG(ha, io_base_addr, 0x7700);
13756 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
13757 ha->iobase + 0xC0, 16, 32);
13758 WRT32_IO_REG(ha, io_base_addr, 0x7710);
13759 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13760
13761 /* RCVThread1 */
13762 WRT32_IO_REG(ha, io_base_addr, 0x7720);
13763 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
13764 ha->iobase + 0xC0, 16, 32);
13765 WRT32_IO_REG(ha, io_base_addr, 0x7730);
13766 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13767
13768 /* RISC registers. */
13769
13770 /* RISC GP */
13771 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
13772 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
13773 16, 32);
13774 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
13775 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13776 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
13777 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13778 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
13779 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13780 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
13781 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13782 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
13783 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13784 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
13785 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13786 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13787 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13788
13789 /* Local memory controller registers. */
13790
13791 /* LMC */
13792 WRT32_IO_REG(ha, io_base_addr, 0x3000);
13793 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
13794 16, 32);
13795 WRT32_IO_REG(ha, io_base_addr, 0x3010);
13796 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13797 WRT32_IO_REG(ha, io_base_addr, 0x3020);
13798 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13799 WRT32_IO_REG(ha, io_base_addr, 0x3030);
13800 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13801 WRT32_IO_REG(ha, io_base_addr, 0x3040);
13802 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13803 WRT32_IO_REG(ha, io_base_addr, 0x3050);
13804 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13805 WRT32_IO_REG(ha, io_base_addr, 0x3060);
13806 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13807
13808 /* Fibre Protocol Module registers. */
13809
13810 /* FPM hardware */
13811 WRT32_IO_REG(ha, io_base_addr, 0x4000);
13812 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
13813 16, 32);
13814 WRT32_IO_REG(ha, io_base_addr, 0x4010);
13815 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13816 WRT32_IO_REG(ha, io_base_addr, 0x4020);
13817 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13818 WRT32_IO_REG(ha, io_base_addr, 0x4030);
13819 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13820 WRT32_IO_REG(ha, io_base_addr, 0x4040);
13821 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13822 WRT32_IO_REG(ha, io_base_addr, 0x4050);
13823 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13824 WRT32_IO_REG(ha, io_base_addr, 0x4060);
13825 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13826 WRT32_IO_REG(ha, io_base_addr, 0x4070);
13827 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13828 WRT32_IO_REG(ha, io_base_addr, 0x4080);
13829 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13830 WRT32_IO_REG(ha, io_base_addr, 0x4090);
13831 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13832 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
13833 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13834 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
13835 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13836
13837 /* Frame Buffer registers. */
13838
13839 /* FB hardware */
13840 WRT32_IO_REG(ha, io_base_addr, 0x6000);
13841 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
13842 16, 32);
13843 WRT32_IO_REG(ha, io_base_addr, 0x6010);
13844 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13845 WRT32_IO_REG(ha, io_base_addr, 0x6020);
13846 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13847 WRT32_IO_REG(ha, io_base_addr, 0x6030);
13848 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13849 WRT32_IO_REG(ha, io_base_addr, 0x6040);
13850 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13851 WRT32_IO_REG(ha, io_base_addr, 0x6100);
13852 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13853 WRT32_IO_REG(ha, io_base_addr, 0x6130);
13854 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13855 WRT32_IO_REG(ha, io_base_addr, 0x6150);
13856 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13857 WRT32_IO_REG(ha, io_base_addr, 0x6170);
13858 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13859 WRT32_IO_REG(ha, io_base_addr, 0x6190);
13860 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13861 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
13862 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13863 }
13864
13865 /* Get the request queue */
13866 if (rval == QL_SUCCESS) {
13867 uint32_t cnt;
13868 uint32_t *w32 = (uint32_t *)ha->request_ring_bp;
13869
13870 /* Sync DMA buffer. */
13871 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
13872 REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
13873 DDI_DMA_SYNC_FORKERNEL);
13874
13875 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
13876 fw->req_q[cnt] = *w32++;
13877 LITTLE_ENDIAN_32(&fw->req_q[cnt]);
13878 }
13879 }
13880
13881 /* Get the response queue */
13882 if (rval == QL_SUCCESS) {
13883 uint32_t cnt;
13884 uint32_t *w32 = (uint32_t *)ha->response_ring_bp;
13885
13886 /* Sync DMA buffer. */
13887 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
13888 RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
13889 DDI_DMA_SYNC_FORKERNEL);
13890
13891 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
13892 fw->rsp_q[cnt] = *w32++;
13893 LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
13894 }
13895 }
13896
13897 /* Reset RISC. */
13898 ql_reset_chip(ha);
13899
13900 /* Memory. */
13901 if (rval == QL_SUCCESS) {
13902 /* Code RAM. */
13903 rval = ql_read_risc_ram(ha, 0x20000,
13904 sizeof (fw->code_ram) / 4, fw->code_ram);
13905 }
13906 if (rval == QL_SUCCESS) {
13907 /* External Memory. */
13908 rval = ql_read_risc_ram(ha, 0x100000,
13909 ha->fw_ext_memory_size / 4, fw->ext_mem);
13910 }
13911
13912 /* Get the extended trace buffer */
13913 if (rval == QL_SUCCESS) {
13914 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13915 (ha->fwexttracebuf.bp != NULL)) {
13916 uint32_t cnt;
13917 uint32_t *w32 = ha->fwexttracebuf.bp;
13918
13919 /* Sync DMA buffer. */
13920 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
13921 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
13922
13923 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13924 fw->ext_trace_buf[cnt] = *w32++;
13925 }
13926 }
13927 }
13928
13929 /* Get the FC event trace buffer */
13930 if (rval == QL_SUCCESS) {
13931 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13932 (ha->fwfcetracebuf.bp != NULL)) {
13933 uint32_t cnt;
13934 uint32_t *w32 = ha->fwfcetracebuf.bp;
13935
13936 /* Sync DMA buffer. */
13937 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
13938 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
13939
13940 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13941 fw->fce_trace_buf[cnt] = *w32++;
13942 }
13943 }
13944 }
13945
13946 if (rval != QL_SUCCESS) {
13947 EL(ha, "failed=%xh\n", rval);
13948 } else {
13949 /*EMPTY*/
13950 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13951 }
13952
13953 return (rval);
13954 }
13955
13956 /*
13957 * ql_25xx_binary_fw_dump
13958 *
13959 * Input:
13960 * ha: adapter state pointer.
13961 * fw: firmware dump context pointer.
13962 *
13963 * Returns:
13964 * ql local function return status code.
13965 *
13966 * Context:
13967 * Interrupt or Kernel context, no mailbox commands allowed.
13968 */
13969 static int
13970 ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
13971 {
13972 uint32_t *reg32;
13973 void *bp;
13974 clock_t timer;
13975 int rval = QL_SUCCESS;
13976
13977 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13978
13979 fw->r2h_status = RD32_IO_REG(ha, risc2host);
13980
13981 /* Pause RISC. */
13982 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
13983 /* Disable ISP interrupts. */
13984 WRT16_IO_REG(ha, ictrl, 0);
13985
13986 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13987 for (timer = 30000;
13988 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
13989 rval == QL_SUCCESS; timer--) {
13990 if (timer) {
13991 drv_usecwait(100);
13992 if (timer % 10000 == 0) {
13993 EL(ha, "risc pause %d\n", timer);
13994 }
13995 } else {
13996 EL(ha, "risc pause timeout\n");
13997 rval = QL_FUNCTION_TIMEOUT;
13998 }
13999 }
14000 }
14001
14002 if (rval == QL_SUCCESS) {
14003
14004 /* Host Interface registers */
14005
14006 /* HostRisc registers. */
14007 WRT32_IO_REG(ha, io_base_addr, 0x7000);
14008 bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
14009 16, 32);
14010 WRT32_IO_REG(ha, io_base_addr, 0x7010);
14011 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14012
14013 /* PCIe registers. */
14014 WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14015 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14016 bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14017 3, 32);
14018 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14019 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14020
14021 /* Host interface registers. */
14022 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14023 sizeof (fw->host_reg) / 4, 32);
14024
14025 /* Disable ISP interrupts. */
14026
14027 WRT32_IO_REG(ha, ictrl, 0);
14028 RD32_IO_REG(ha, ictrl);
14029 ADAPTER_STATE_LOCK(ha);
14030 ha->flags &= ~INTERRUPTS_ENABLED;
14031 ADAPTER_STATE_UNLOCK(ha);
14032
14033 /* Shadow registers. */
14034
14035 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14036 RD32_IO_REG(ha, io_base_addr);
14037
14038 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14039 WRT_REG_DWORD(ha, reg32, 0xB0000000);
14040 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14041 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14042
14043 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14044 WRT_REG_DWORD(ha, reg32, 0xB0100000);
14045 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14046 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14047
14048 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14049 WRT_REG_DWORD(ha, reg32, 0xB0200000);
14050 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14051 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14052
14053 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14054 WRT_REG_DWORD(ha, reg32, 0xB0300000);
14055 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14056 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14057
14058 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14059 WRT_REG_DWORD(ha, reg32, 0xB0400000);
14060 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14061 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14062
14063 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14064 WRT_REG_DWORD(ha, reg32, 0xB0500000);
14065 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14066 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14067
14068 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14069 WRT_REG_DWORD(ha, reg32, 0xB0600000);
14070 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14071 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14072
14073 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14074 WRT_REG_DWORD(ha, reg32, 0xB0700000);
14075 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14076 fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
14077
14078 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14079 WRT_REG_DWORD(ha, reg32, 0xB0800000);
14080 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14081 fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
14082
14083 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14084 WRT_REG_DWORD(ha, reg32, 0xB0900000);
14085 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14086 fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
14087
14088 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14089 WRT_REG_DWORD(ha, reg32, 0xB0A00000);
14090 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14091 fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
14092
14093 /* RISC I/O register. */
14094
14095 WRT32_IO_REG(ha, io_base_addr, 0x0010);
14096 (void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
14097 1, 32);
14098
14099 /* Mailbox registers. */
14100
14101 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14102 sizeof (fw->mailbox_reg) / 2, 16);
14103
14104 /* Transfer sequence registers. */
14105
14106 /* XSEQ GP */
14107 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14108 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14109 16, 32);
14110 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14111 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14112 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14113 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14114 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14115 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14116 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14117 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14118 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14119 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14120 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14121 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14122 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14123 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14124
14125 /* XSEQ-0 */
14126 WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
14127 bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14128 16, 32);
14129 WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
14130 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14131 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14132 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14133
14134 /* XSEQ-1 */
14135 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14136 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14137 16, 32);
14138
14139 /* Receive sequence registers. */
14140
14141 /* RSEQ GP */
14142 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14143 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14144 16, 32);
14145 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14146 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14147 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14148 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14149 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14150 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14151 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14152 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14153 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14154 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14155 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14156 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14157 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14158 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14159
14160 /* RSEQ-0 */
14161 WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
14162 bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14163 16, 32);
14164 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14165 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14166
14167 /* RSEQ-1 */
14168 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14169 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14170 sizeof (fw->rseq_1_reg) / 4, 32);
14171
14172 /* RSEQ-2 */
14173 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14174 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14175 sizeof (fw->rseq_2_reg) / 4, 32);
14176
14177 /* Auxiliary sequencer registers. */
14178
14179 /* ASEQ GP */
14180 WRT32_IO_REG(ha, io_base_addr, 0xB000);
14181 bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
14182 16, 32);
14183 WRT32_IO_REG(ha, io_base_addr, 0xB010);
14184 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14185 WRT32_IO_REG(ha, io_base_addr, 0xB020);
14186 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14187 WRT32_IO_REG(ha, io_base_addr, 0xB030);
14188 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14189 WRT32_IO_REG(ha, io_base_addr, 0xB040);
14190 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14191 WRT32_IO_REG(ha, io_base_addr, 0xB050);
14192 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14193 WRT32_IO_REG(ha, io_base_addr, 0xB060);
14194 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14195 WRT32_IO_REG(ha, io_base_addr, 0xB070);
14196 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14197
14198 /* ASEQ-0 */
14199 WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
14200 bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
14201 16, 32);
14202 WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
14203 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14204
14205 /* ASEQ-1 */
14206 WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
14207 (void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
14208 16, 32);
14209
14210 /* ASEQ-2 */
14211 WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
14212 (void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
14213 16, 32);
14214
14215 /* Command DMA registers. */
14216
14217 WRT32_IO_REG(ha, io_base_addr, 0x7100);
14218 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14219 sizeof (fw->cmd_dma_reg) / 4, 32);
14220
14221 /* Queues. */
14222
14223 /* RequestQ0 */
14224 WRT32_IO_REG(ha, io_base_addr, 0x7200);
14225 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14226 8, 32);
14227 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14228
14229 /* ResponseQ0 */
14230 WRT32_IO_REG(ha, io_base_addr, 0x7300);
14231 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14232 8, 32);
14233 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14234
14235 /* RequestQ1 */
14236 WRT32_IO_REG(ha, io_base_addr, 0x7400);
14237 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14238 8, 32);
14239 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14240
14241 /* Transmit DMA registers. */
14242
14243 /* XMT0 */
14244 WRT32_IO_REG(ha, io_base_addr, 0x7600);
14245 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14246 16, 32);
14247 WRT32_IO_REG(ha, io_base_addr, 0x7610);
14248 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14249
14250 /* XMT1 */
14251 WRT32_IO_REG(ha, io_base_addr, 0x7620);
14252 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14253 16, 32);
14254 WRT32_IO_REG(ha, io_base_addr, 0x7630);
14255 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14256
14257 /* XMT2 */
14258 WRT32_IO_REG(ha, io_base_addr, 0x7640);
14259 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14260 16, 32);
14261 WRT32_IO_REG(ha, io_base_addr, 0x7650);
14262 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14263
14264 /* XMT3 */
14265 WRT32_IO_REG(ha, io_base_addr, 0x7660);
14266 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14267 16, 32);
14268 WRT32_IO_REG(ha, io_base_addr, 0x7670);
14269 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14270
14271 /* XMT4 */
14272 WRT32_IO_REG(ha, io_base_addr, 0x7680);
14273 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14274 16, 32);
14275 WRT32_IO_REG(ha, io_base_addr, 0x7690);
14276 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14277
14278 /* XMT Common */
14279 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14280 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14281 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14282
14283 /* Receive DMA registers. */
14284
14285 /* RCVThread0 */
14286 WRT32_IO_REG(ha, io_base_addr, 0x7700);
14287 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14288 ha->iobase + 0xC0, 16, 32);
14289 WRT32_IO_REG(ha, io_base_addr, 0x7710);
14290 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14291
14292 /* RCVThread1 */
14293 WRT32_IO_REG(ha, io_base_addr, 0x7720);
14294 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14295 ha->iobase + 0xC0, 16, 32);
14296 WRT32_IO_REG(ha, io_base_addr, 0x7730);
14297 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14298
14299 /* RISC registers. */
14300
14301 /* RISC GP */
14302 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14303 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14304 16, 32);
14305 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14306 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14307 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14308 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14309 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14310 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14311 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14312 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14313 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14314 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14315 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14316 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14317 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14318 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14319
14320 /* Local memory controller (LMC) registers. */
14321
14322 /* LMC */
14323 WRT32_IO_REG(ha, io_base_addr, 0x3000);
14324 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14325 16, 32);
14326 WRT32_IO_REG(ha, io_base_addr, 0x3010);
14327 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14328 WRT32_IO_REG(ha, io_base_addr, 0x3020);
14329 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14330 WRT32_IO_REG(ha, io_base_addr, 0x3030);
14331 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14332 WRT32_IO_REG(ha, io_base_addr, 0x3040);
14333 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14334 WRT32_IO_REG(ha, io_base_addr, 0x3050);
14335 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14336 WRT32_IO_REG(ha, io_base_addr, 0x3060);
14337 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14338 WRT32_IO_REG(ha, io_base_addr, 0x3070);
14339 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14340
14341 /* Fibre Protocol Module registers. */
14342
14343 /* FPM hardware */
14344 WRT32_IO_REG(ha, io_base_addr, 0x4000);
14345 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14346 16, 32);
14347 WRT32_IO_REG(ha, io_base_addr, 0x4010);
14348 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14349 WRT32_IO_REG(ha, io_base_addr, 0x4020);
14350 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14351 WRT32_IO_REG(ha, io_base_addr, 0x4030);
14352 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14353 WRT32_IO_REG(ha, io_base_addr, 0x4040);
14354 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14355 WRT32_IO_REG(ha, io_base_addr, 0x4050);
14356 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14357 WRT32_IO_REG(ha, io_base_addr, 0x4060);
14358 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14359 WRT32_IO_REG(ha, io_base_addr, 0x4070);
14360 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14361 WRT32_IO_REG(ha, io_base_addr, 0x4080);
14362 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14363 WRT32_IO_REG(ha, io_base_addr, 0x4090);
14364 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14365 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14366 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14367 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14368 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14369
14370 /* Frame Buffer registers. */
14371
14372 /* FB hardware */
14373 WRT32_IO_REG(ha, io_base_addr, 0x6000);
14374 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14375 16, 32);
14376 WRT32_IO_REG(ha, io_base_addr, 0x6010);
14377 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14378 WRT32_IO_REG(ha, io_base_addr, 0x6020);
14379 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14380 WRT32_IO_REG(ha, io_base_addr, 0x6030);
14381 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14382 WRT32_IO_REG(ha, io_base_addr, 0x6040);
14383 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14384 WRT32_IO_REG(ha, io_base_addr, 0x6100);
14385 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14386 WRT32_IO_REG(ha, io_base_addr, 0x6130);
14387 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14388 WRT32_IO_REG(ha, io_base_addr, 0x6150);
14389 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14390 WRT32_IO_REG(ha, io_base_addr, 0x6170);
14391 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14392 WRT32_IO_REG(ha, io_base_addr, 0x6190);
14393 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14394 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14395 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14396 WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14397 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14398 }
14399
14400 /* Get the request queue */
14401 if (rval == QL_SUCCESS) {
14402 uint32_t cnt;
14403 uint32_t *w32 = (uint32_t *)ha->request_ring_bp;
14404
14405 /* Sync DMA buffer. */
14406 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14407 REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14408 DDI_DMA_SYNC_FORKERNEL);
14409
14410 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14411 fw->req_q[cnt] = *w32++;
14412 LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14413 }
14414 }
14415
14416 /* Get the respons queue */
14417 if (rval == QL_SUCCESS) {
14418 uint32_t cnt;
14419 uint32_t *w32 = (uint32_t *)ha->response_ring_bp;
14420
14421 /* Sync DMA buffer. */
14422 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14423 RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14424 DDI_DMA_SYNC_FORKERNEL);
14425
14426 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14427 fw->rsp_q[cnt] = *w32++;
14428 LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14429 }
14430 }
14431
14432 /* Reset RISC. */
14433
14434 ql_reset_chip(ha);
14435
14436 /* Memory. */
14437
14438 if (rval == QL_SUCCESS) {
14439 /* Code RAM. */
14440 rval = ql_read_risc_ram(ha, 0x20000,
14441 sizeof (fw->code_ram) / 4, fw->code_ram);
14442 }
14443 if (rval == QL_SUCCESS) {
14444 /* External Memory. */
14445 rval = ql_read_risc_ram(ha, 0x100000,
14446 ha->fw_ext_memory_size / 4, fw->ext_mem);
14447 }
14448
14449 /* Get the FC event trace buffer */
14450 if (rval == QL_SUCCESS) {
14451 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14452 (ha->fwfcetracebuf.bp != NULL)) {
14453 uint32_t cnt;
14454 uint32_t *w32 = ha->fwfcetracebuf.bp;
14455
14456 /* Sync DMA buffer. */
14457 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14458 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14459
14460 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14461 fw->fce_trace_buf[cnt] = *w32++;
14462 }
14463 }
14464 }
14465
14466 /* Get the extended trace buffer */
14467 if (rval == QL_SUCCESS) {
14468 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14469 (ha->fwexttracebuf.bp != NULL)) {
14470 uint32_t cnt;
14471 uint32_t *w32 = ha->fwexttracebuf.bp;
14472
14473 /* Sync DMA buffer. */
14474 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14475 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14476
14477 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14478 fw->ext_trace_buf[cnt] = *w32++;
14479 }
14480 }
14481 }
14482
14483 if (rval != QL_SUCCESS) {
14484 EL(ha, "failed=%xh\n", rval);
14485 } else {
14486 /*EMPTY*/
14487 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14488 }
14489
14490 return (rval);
14491 }
14492
14493 /*
14494 * ql_81xx_binary_fw_dump
14495 *
14496 * Input:
14497 * ha: adapter state pointer.
14498 * fw: firmware dump context pointer.
14499 *
14500 * Returns:
14501 * ql local function return status code.
14502 *
14503 * Context:
14504 * Interrupt or Kernel context, no mailbox commands allowed.
14505 */
14506 static int
14507 ql_81xx_binary_fw_dump(ql_adapter_state_t *ha, ql_81xx_fw_dump_t *fw)
14508 {
14509 uint32_t *reg32;
14510 void *bp;
14511 clock_t timer;
14512 int rval = QL_SUCCESS;
14513
14514 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14515
14516 fw->r2h_status = RD32_IO_REG(ha, risc2host);
14517
14518 /* Pause RISC. */
14519 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
14520 /* Disable ISP interrupts. */
14521 WRT16_IO_REG(ha, ictrl, 0);
14522
14523 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
14524 for (timer = 30000;
14525 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
14526 rval == QL_SUCCESS; timer--) {
14527 if (timer) {
14528 drv_usecwait(100);
14529 if (timer % 10000 == 0) {
14530 EL(ha, "risc pause %d\n", timer);
14531 }
14532 } else {
14533 EL(ha, "risc pause timeout\n");
14534 rval = QL_FUNCTION_TIMEOUT;
14535 }
14536 }
14537 }
14538
14539 if (rval == QL_SUCCESS) {
14540
14541 /* Host Interface registers */
14542
14543 /* HostRisc registers. */
14544 WRT32_IO_REG(ha, io_base_addr, 0x7000);
14545 bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
14546 16, 32);
14547 WRT32_IO_REG(ha, io_base_addr, 0x7010);
14548 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14549
14550 /* PCIe registers. */
14551 WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14552 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14553 bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14554 3, 32);
14555 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14556 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14557
14558 /* Host interface registers. */
14559 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14560 sizeof (fw->host_reg) / 4, 32);
14561
14562 /* Disable ISP interrupts. */
14563
14564 WRT32_IO_REG(ha, ictrl, 0);
14565 RD32_IO_REG(ha, ictrl);
14566 ADAPTER_STATE_LOCK(ha);
14567 ha->flags &= ~INTERRUPTS_ENABLED;
14568 ADAPTER_STATE_UNLOCK(ha);
14569
14570 /* Shadow registers. */
14571
14572 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14573 RD32_IO_REG(ha, io_base_addr);
14574
14575 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14576 WRT_REG_DWORD(ha, reg32, 0xB0000000);
14577 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14578 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14579
14580 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14581 WRT_REG_DWORD(ha, reg32, 0xB0100000);
14582 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14583 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14584
14585 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14586 WRT_REG_DWORD(ha, reg32, 0xB0200000);
14587 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14588 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14589
14590 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14591 WRT_REG_DWORD(ha, reg32, 0xB0300000);
14592 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14593 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14594
14595 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14596 WRT_REG_DWORD(ha, reg32, 0xB0400000);
14597 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14598 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14599
14600 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14601 WRT_REG_DWORD(ha, reg32, 0xB0500000);
14602 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14603 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14604
14605 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14606 WRT_REG_DWORD(ha, reg32, 0xB0600000);
14607 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14608 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14609
14610 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14611 WRT_REG_DWORD(ha, reg32, 0xB0700000);
14612 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14613 fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
14614
14615 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14616 WRT_REG_DWORD(ha, reg32, 0xB0800000);
14617 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14618 fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
14619
14620 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14621 WRT_REG_DWORD(ha, reg32, 0xB0900000);
14622 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14623 fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
14624
14625 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14626 WRT_REG_DWORD(ha, reg32, 0xB0A00000);
14627 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14628 fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
14629
14630 /* RISC I/O register. */
14631
14632 WRT32_IO_REG(ha, io_base_addr, 0x0010);
14633 (void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
14634 1, 32);
14635
14636 /* Mailbox registers. */
14637
14638 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14639 sizeof (fw->mailbox_reg) / 2, 16);
14640
14641 /* Transfer sequence registers. */
14642
14643 /* XSEQ GP */
14644 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14645 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14646 16, 32);
14647 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14648 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14649 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14650 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14651 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14652 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14653 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14654 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14655 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14656 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14657 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14658 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14659 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14660 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14661
14662 /* XSEQ-0 */
14663 WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
14664 bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14665 16, 32);
14666 WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
14667 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14668 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14669 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14670
14671 /* XSEQ-1 */
14672 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14673 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14674 16, 32);
14675
14676 /* Receive sequence registers. */
14677
14678 /* RSEQ GP */
14679 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14680 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14681 16, 32);
14682 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14683 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14684 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14685 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14686 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14687 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14688 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14689 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14690 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14691 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14692 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14693 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14694 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14695 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14696
14697 /* RSEQ-0 */
14698 WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
14699 bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14700 16, 32);
14701 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14702 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14703
14704 /* RSEQ-1 */
14705 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14706 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14707 sizeof (fw->rseq_1_reg) / 4, 32);
14708
14709 /* RSEQ-2 */
14710 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14711 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14712 sizeof (fw->rseq_2_reg) / 4, 32);
14713
14714 /* Auxiliary sequencer registers. */
14715
14716 /* ASEQ GP */
14717 WRT32_IO_REG(ha, io_base_addr, 0xB000);
14718 bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
14719 16, 32);
14720 WRT32_IO_REG(ha, io_base_addr, 0xB010);
14721 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14722 WRT32_IO_REG(ha, io_base_addr, 0xB020);
14723 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14724 WRT32_IO_REG(ha, io_base_addr, 0xB030);
14725 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14726 WRT32_IO_REG(ha, io_base_addr, 0xB040);
14727 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14728 WRT32_IO_REG(ha, io_base_addr, 0xB050);
14729 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14730 WRT32_IO_REG(ha, io_base_addr, 0xB060);
14731 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14732 WRT32_IO_REG(ha, io_base_addr, 0xB070);
14733 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14734
14735 /* ASEQ-0 */
14736 WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
14737 bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
14738 16, 32);
14739 WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
14740 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14741
14742 /* ASEQ-1 */
14743 WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
14744 (void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
14745 16, 32);
14746
14747 /* ASEQ-2 */
14748 WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
14749 (void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
14750 16, 32);
14751
14752 /* Command DMA registers. */
14753
14754 WRT32_IO_REG(ha, io_base_addr, 0x7100);
14755 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14756 sizeof (fw->cmd_dma_reg) / 4, 32);
14757
14758 /* Queues. */
14759
14760 /* RequestQ0 */
14761 WRT32_IO_REG(ha, io_base_addr, 0x7200);
14762 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14763 8, 32);
14764 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14765
14766 /* ResponseQ0 */
14767 WRT32_IO_REG(ha, io_base_addr, 0x7300);
14768 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14769 8, 32);
14770 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14771
14772 /* RequestQ1 */
14773 WRT32_IO_REG(ha, io_base_addr, 0x7400);
14774 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14775 8, 32);
14776 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14777
14778 /* Transmit DMA registers. */
14779
14780 /* XMT0 */
14781 WRT32_IO_REG(ha, io_base_addr, 0x7600);
14782 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14783 16, 32);
14784 WRT32_IO_REG(ha, io_base_addr, 0x7610);
14785 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14786
14787 /* XMT1 */
14788 WRT32_IO_REG(ha, io_base_addr, 0x7620);
14789 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14790 16, 32);
14791 WRT32_IO_REG(ha, io_base_addr, 0x7630);
14792 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14793
14794 /* XMT2 */
14795 WRT32_IO_REG(ha, io_base_addr, 0x7640);
14796 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14797 16, 32);
14798 WRT32_IO_REG(ha, io_base_addr, 0x7650);
14799 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14800
14801 /* XMT3 */
14802 WRT32_IO_REG(ha, io_base_addr, 0x7660);
14803 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14804 16, 32);
14805 WRT32_IO_REG(ha, io_base_addr, 0x7670);
14806 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14807
14808 /* XMT4 */
14809 WRT32_IO_REG(ha, io_base_addr, 0x7680);
14810 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14811 16, 32);
14812 WRT32_IO_REG(ha, io_base_addr, 0x7690);
14813 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14814
14815 /* XMT Common */
14816 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14817 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14818 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14819
14820 /* Receive DMA registers. */
14821
14822 /* RCVThread0 */
14823 WRT32_IO_REG(ha, io_base_addr, 0x7700);
14824 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14825 ha->iobase + 0xC0, 16, 32);
14826 WRT32_IO_REG(ha, io_base_addr, 0x7710);
14827 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14828
14829 /* RCVThread1 */
14830 WRT32_IO_REG(ha, io_base_addr, 0x7720);
14831 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14832 ha->iobase + 0xC0, 16, 32);
14833 WRT32_IO_REG(ha, io_base_addr, 0x7730);
14834 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14835
14836 /* RISC registers. */
14837
14838 /* RISC GP */
14839 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14840 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14841 16, 32);
14842 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14843 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14844 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14845 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14846 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14847 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14848 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14849 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14850 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14851 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14852 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14853 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14854 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14855 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14856
14857 /* Local memory controller (LMC) registers. */
14858
14859 /* LMC */
14860 WRT32_IO_REG(ha, io_base_addr, 0x3000);
14861 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14862 16, 32);
14863 WRT32_IO_REG(ha, io_base_addr, 0x3010);
14864 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14865 WRT32_IO_REG(ha, io_base_addr, 0x3020);
14866 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14867 WRT32_IO_REG(ha, io_base_addr, 0x3030);
14868 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14869 WRT32_IO_REG(ha, io_base_addr, 0x3040);
14870 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14871 WRT32_IO_REG(ha, io_base_addr, 0x3050);
14872 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14873 WRT32_IO_REG(ha, io_base_addr, 0x3060);
14874 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14875 WRT32_IO_REG(ha, io_base_addr, 0x3070);
14876 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14877
14878 /* Fibre Protocol Module registers. */
14879
14880 /* FPM hardware */
14881 WRT32_IO_REG(ha, io_base_addr, 0x4000);
14882 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14883 16, 32);
14884 WRT32_IO_REG(ha, io_base_addr, 0x4010);
14885 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14886 WRT32_IO_REG(ha, io_base_addr, 0x4020);
14887 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14888 WRT32_IO_REG(ha, io_base_addr, 0x4030);
14889 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14890 WRT32_IO_REG(ha, io_base_addr, 0x4040);
14891 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14892 WRT32_IO_REG(ha, io_base_addr, 0x4050);
14893 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14894 WRT32_IO_REG(ha, io_base_addr, 0x4060);
14895 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14896 WRT32_IO_REG(ha, io_base_addr, 0x4070);
14897 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14898 WRT32_IO_REG(ha, io_base_addr, 0x4080);
14899 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14900 WRT32_IO_REG(ha, io_base_addr, 0x4090);
14901 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14902 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14903 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14904 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14905 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14906 WRT32_IO_REG(ha, io_base_addr, 0x40C0);
14907 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14908 WRT32_IO_REG(ha, io_base_addr, 0x40D0);
14909 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14910
14911 /* Frame Buffer registers. */
14912
14913 /* FB hardware */
14914 WRT32_IO_REG(ha, io_base_addr, 0x6000);
14915 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14916 16, 32);
14917 WRT32_IO_REG(ha, io_base_addr, 0x6010);
14918 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14919 WRT32_IO_REG(ha, io_base_addr, 0x6020);
14920 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14921 WRT32_IO_REG(ha, io_base_addr, 0x6030);
14922 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14923 WRT32_IO_REG(ha, io_base_addr, 0x6040);
14924 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14925 WRT32_IO_REG(ha, io_base_addr, 0x6100);
14926 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14927 WRT32_IO_REG(ha, io_base_addr, 0x6130);
14928 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14929 WRT32_IO_REG(ha, io_base_addr, 0x6150);
14930 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14931 WRT32_IO_REG(ha, io_base_addr, 0x6170);
14932 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14933 WRT32_IO_REG(ha, io_base_addr, 0x6190);
14934 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14935 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14936 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14937 WRT32_IO_REG(ha, io_base_addr, 0x61C0);
14938 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14939 WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14940 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14941 }
14942
14943 /* Get the request queue */
14944 if (rval == QL_SUCCESS) {
14945 uint32_t cnt;
14946 uint32_t *w32 = (uint32_t *)ha->request_ring_bp;
14947
14948 /* Sync DMA buffer. */
14949 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14950 REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14951 DDI_DMA_SYNC_FORKERNEL);
14952
14953 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14954 fw->req_q[cnt] = *w32++;
14955 LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14956 }
14957 }
14958
14959 /* Get the response queue */
14960 if (rval == QL_SUCCESS) {
14961 uint32_t cnt;
14962 uint32_t *w32 = (uint32_t *)ha->response_ring_bp;
14963
14964 /* Sync DMA buffer. */
14965 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14966 RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14967 DDI_DMA_SYNC_FORKERNEL);
14968
14969 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14970 fw->rsp_q[cnt] = *w32++;
14971 LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14972 }
14973 }
14974
14975 /* Reset RISC. */
14976
14977 ql_reset_chip(ha);
14978
14979 /* Memory. */
14980
14981 if (rval == QL_SUCCESS) {
14982 /* Code RAM. */
14983 rval = ql_read_risc_ram(ha, 0x20000,
14984 sizeof (fw->code_ram) / 4, fw->code_ram);
14985 }
14986 if (rval == QL_SUCCESS) {
14987 /* External Memory. */
14988 rval = ql_read_risc_ram(ha, 0x100000,
14989 ha->fw_ext_memory_size / 4, fw->ext_mem);
14990 }
14991
14992 /* Get the FC event trace buffer */
14993 if (rval == QL_SUCCESS) {
14994 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14995 (ha->fwfcetracebuf.bp != NULL)) {
14996 uint32_t cnt;
14997 uint32_t *w32 = ha->fwfcetracebuf.bp;
14998
14999 /* Sync DMA buffer. */
15000 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
15001 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
15002
15003 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
15004 fw->fce_trace_buf[cnt] = *w32++;
15005 }
15006 }
15007 }
15008
15009 /* Get the extended trace buffer */
15010 if (rval == QL_SUCCESS) {
15011 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
15012 (ha->fwexttracebuf.bp != NULL)) {
15013 uint32_t cnt;
15014 uint32_t *w32 = ha->fwexttracebuf.bp;
15015
15016 /* Sync DMA buffer. */
15017 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
15018 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
15019
15020 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
15021 fw->ext_trace_buf[cnt] = *w32++;
15022 }
15023 }
15024 }
15025
15026 if (rval != QL_SUCCESS) {
15027 EL(ha, "failed=%xh\n", rval);
15028 } else {
15029 /*EMPTY*/
15030 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15031 }
15032
15033 return (rval);
15034 }
15035
15036 /*
15037 * ql_read_risc_ram
15038 * Reads RISC RAM one word at a time.
15039 * Risc interrupts must be disabled when this routine is called.
15040 *
15041 * Input:
15042 * ha: adapter state pointer.
15043 * risc_address: RISC code start address.
15044 * len: Number of words.
15045 * buf: buffer pointer.
15046 *
15047 * Returns:
15048 * ql local function return status code.
15049 *
15050 * Context:
15051 * Interrupt or Kernel context, no mailbox commands allowed.
15052 */
15053 static int
15054 ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
15055 void *buf)
15056 {
15057 uint32_t cnt;
15058 uint16_t stat;
15059 clock_t timer;
15060 uint16_t *buf16 = (uint16_t *)buf;
15061 uint32_t *buf32 = (uint32_t *)buf;
15062 int rval = QL_SUCCESS;
15063
15064 for (cnt = 0; cnt < len; cnt++, risc_address++) {
15065 WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_EXTENDED);
15066 WRT16_IO_REG(ha, mailbox_in[1], LSW(risc_address));
15067 WRT16_IO_REG(ha, mailbox_in[8], MSW(risc_address));
15068 if (CFG_IST(ha, CFG_CTRL_8021)) {
15069 WRT32_IO_REG(ha, nx_host_int, NX_MBX_CMD);
15070 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
15071 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
15072 } else {
15073 WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
15074 }
15075 for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
15076 if (INTERRUPT_PENDING(ha)) {
15077 stat = (uint16_t)
15078 (RD16_IO_REG(ha, risc2host) & 0xff);
15079 if ((stat == 1) || (stat == 0x10)) {
15080 if (CFG_IST(ha, CFG_CTRL_24258081)) {
15081 buf32[cnt] = SHORT_TO_LONG(
15082 RD16_IO_REG(ha,
15083 mailbox_out[2]),
15084 RD16_IO_REG(ha,
15085 mailbox_out[3]));
15086 } else {
15087 buf16[cnt] =
15088 RD16_IO_REG(ha,
15089 mailbox_out[2]);
15090 }
15091
15092 break;
15093 } else if ((stat == 2) || (stat == 0x11)) {
15094 rval = RD16_IO_REG(ha, mailbox_out[0]);
15095 break;
15096 }
15097 if (CFG_IST(ha, CFG_CTRL_8021)) {
15098 ql_8021_clr_hw_intr(ha);
15099 ql_8021_clr_fw_intr(ha);
15100 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
15101 WRT32_IO_REG(ha, hccr,
15102 HC24_CLR_RISC_INT);
15103 RD32_IO_REG(ha, hccr);
15104 } else {
15105 WRT16_IO_REG(ha, hccr,
15106 HC_CLR_RISC_INT);
15107 }
15108 }
15109 drv_usecwait(5);
15110 }
15111 if (CFG_IST(ha, CFG_CTRL_8021)) {
15112 ql_8021_clr_hw_intr(ha);
15113 ql_8021_clr_fw_intr(ha);
15114 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
15115 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
15116 RD32_IO_REG(ha, hccr);
15117 } else {
15118 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
15119 WRT16_IO_REG(ha, semaphore, 0);
15120 }
15121
15122 if (timer == 0) {
15123 rval = QL_FUNCTION_TIMEOUT;
15124 }
15125 }
15126
15127 return (rval);
15128 }
15129
15130 /*
15131 * ql_read_regs
15132 * Reads adapter registers to buffer.
15133 *
15134 * Input:
15135 * ha: adapter state pointer.
15136 * buf: buffer pointer.
15137 * reg: start address.
15138 * count: number of registers.
15139 * wds: register size.
15140 *
15141 * Context:
15142 * Interrupt or Kernel context, no mailbox commands allowed.
15143 */
15144 static void *
15145 ql_read_regs(ql_adapter_state_t *ha, void *buf, void *reg, uint32_t count,
15146 uint8_t wds)
15147 {
15148 uint32_t *bp32, *reg32;
15149 uint16_t *bp16, *reg16;
15150 uint8_t *bp8, *reg8;
15151
15152 switch (wds) {
15153 case 32:
15154 bp32 = buf;
15155 reg32 = reg;
15156 while (count--) {
15157 *bp32++ = RD_REG_DWORD(ha, reg32++);
15158 }
15159 return (bp32);
15160 case 16:
15161 bp16 = buf;
15162 reg16 = reg;
15163 while (count--) {
15164 *bp16++ = RD_REG_WORD(ha, reg16++);
15165 }
15166 return (bp16);
15167 case 8:
15168 bp8 = buf;
15169 reg8 = reg;
15170 while (count--) {
15171 *bp8++ = RD_REG_BYTE(ha, reg8++);
15172 }
15173 return (bp8);
15174 default:
15175 EL(ha, "Unknown word size=%d\n", wds);
15176 return (buf);
15177 }
15178 }
15179
15180 static int
15181 ql_save_config_regs(dev_info_t *dip)
15182 {
15183 ql_adapter_state_t *ha;
15184 int ret;
15185 ql_config_space_t chs;
15186 caddr_t prop = "ql-config-space";
15187
15188 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
15189 if (ha == NULL) {
15190 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
15191 ddi_get_instance(dip));
15192 return (DDI_FAILURE);
15193 }
15194
15195 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15196
15197 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
15198 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
15199 1) {
15200 QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
15201 return (DDI_SUCCESS);
15202 }
15203
15204 chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
15205 chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
15206 PCI_CONF_HEADER);
15207 if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15208 chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
15209 PCI_BCNF_BCNTRL);
15210 }
15211
15212 chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
15213 PCI_CONF_CACHE_LINESZ);
15214
15215 chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
15216 PCI_CONF_LATENCY_TIMER);
15217
15218 if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15219 chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
15220 PCI_BCNF_LATENCY_TIMER);
15221 }
15222
15223 chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
15224 chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
15225 chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
15226 chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
15227 chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
15228 chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
15229
15230 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15231 ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
15232 (uchar_t *)&chs, sizeof (ql_config_space_t));
15233
15234 if (ret != DDI_PROP_SUCCESS) {
15235 cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
15236 QL_NAME, ddi_get_instance(dip), prop);
15237 return (DDI_FAILURE);
15238 }
15239
15240 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15241
15242 return (DDI_SUCCESS);
15243 }
15244
15245 static int
15246 ql_restore_config_regs(dev_info_t *dip)
15247 {
15248 ql_adapter_state_t *ha;
15249 uint_t elements;
15250 ql_config_space_t *chs_p;
15251 caddr_t prop = "ql-config-space";
15252
15253 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
15254 if (ha == NULL) {
15255 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
15256 ddi_get_instance(dip));
15257 return (DDI_FAILURE);
15258 }
15259
15260 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15261
15262 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
15263 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
15264 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
15265 (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
15266 QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
15267 return (DDI_FAILURE);
15268 }
15269
15270 ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
15271
15272 if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15273 ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
15274 chs_p->chs_bridge_control);
15275 }
15276
15277 ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
15278 chs_p->chs_cache_line_size);
15279
15280 ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
15281 chs_p->chs_latency_timer);
15282
15283 if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15284 ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
15285 chs_p->chs_sec_latency_timer);
15286 }
15287
15288 ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
15289 ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
15290 ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
15291 ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
15292 ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
15293 ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
15294
15295 ddi_prop_free(chs_p);
15296
15297 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15298 if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
15299 cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
15300 QL_NAME, ddi_get_instance(dip), prop);
15301 }
15302
15303 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15304
15305 return (DDI_SUCCESS);
15306 }
15307
15308 uint8_t
15309 ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
15310 {
15311 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15312 return (ddi_get8(ha->sbus_config_handle,
15313 (uint8_t *)(ha->sbus_config_base + off)));
15314 }
15315
15316 #ifdef KERNEL_32
15317 return (pci_config_getb(ha->pci_handle, off));
15318 #else
15319 return (pci_config_get8(ha->pci_handle, off));
15320 #endif
15321 }
15322
15323 uint16_t
15324 ql_pci_config_get16(ql_adapter_state_t *ha, off_t off)
15325 {
15326 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15327 return (ddi_get16(ha->sbus_config_handle,
15328 (uint16_t *)(ha->sbus_config_base + off)));
15329 }
15330
15331 #ifdef KERNEL_32
15332 return (pci_config_getw(ha->pci_handle, off));
15333 #else
15334 return (pci_config_get16(ha->pci_handle, off));
15335 #endif
15336 }
15337
15338 uint32_t
15339 ql_pci_config_get32(ql_adapter_state_t *ha, off_t off)
15340 {
15341 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15342 return (ddi_get32(ha->sbus_config_handle,
15343 (uint32_t *)(ha->sbus_config_base + off)));
15344 }
15345
15346 #ifdef KERNEL_32
15347 return (pci_config_getl(ha->pci_handle, off));
15348 #else
15349 return (pci_config_get32(ha->pci_handle, off));
15350 #endif
15351 }
15352
15353 void
15354 ql_pci_config_put8(ql_adapter_state_t *ha, off_t off, uint8_t val)
15355 {
15356 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15357 ddi_put8(ha->sbus_config_handle,
15358 (uint8_t *)(ha->sbus_config_base + off), val);
15359 } else {
15360 #ifdef KERNEL_32
15361 pci_config_putb(ha->pci_handle, off, val);
15362 #else
15363 pci_config_put8(ha->pci_handle, off, val);
15364 #endif
15365 }
15366 }
15367
15368 void
15369 ql_pci_config_put16(ql_adapter_state_t *ha, off_t off, uint16_t val)
15370 {
15371 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15372 ddi_put16(ha->sbus_config_handle,
15373 (uint16_t *)(ha->sbus_config_base + off), val);
15374 } else {
15375 #ifdef KERNEL_32
15376 pci_config_putw(ha->pci_handle, off, val);
15377 #else
15378 pci_config_put16(ha->pci_handle, off, val);
15379 #endif
15380 }
15381 }
15382
15383 void
15384 ql_pci_config_put32(ql_adapter_state_t *ha, off_t off, uint32_t val)
15385 {
15386 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15387 ddi_put32(ha->sbus_config_handle,
15388 (uint32_t *)(ha->sbus_config_base + off), val);
15389 } else {
15390 #ifdef KERNEL_32
15391 pci_config_putl(ha->pci_handle, off, val);
15392 #else
15393 pci_config_put32(ha->pci_handle, off, val);
15394 #endif
15395 }
15396 }
15397
15398 /*
15399 * ql_halt
15400 * Waits for commands that are running to finish and
15401 * if they do not, commands are aborted.
15402 * Finally the adapter is reset.
15403 *
15404 * Input:
15405 * ha: adapter state pointer.
15406 * pwr: power state.
15407 *
15408 * Context:
15409 * Kernel context.
15410 */
15411 static void
15412 ql_halt(ql_adapter_state_t *ha, int pwr)
15413 {
15414 uint32_t cnt;
15415 ql_tgt_t *tq;
15416 ql_srb_t *sp;
15417 uint16_t index;
15418 ql_link_t *link;
15419
15420 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15421
15422 /* Wait for all commands running to finish. */
15423 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
15424 for (link = ha->dev[index].first; link != NULL;
15425 link = link->next) {
15426 tq = link->base_address;
15427 (void) ql_abort_device(ha, tq, 0);
15428
15429 /* Wait for 30 seconds for commands to finish. */
15430 for (cnt = 3000; cnt != 0; cnt--) {
15431 /* Acquire device queue lock. */
15432 DEVICE_QUEUE_LOCK(tq);
15433 if (tq->outcnt == 0) {
15434 /* Release device queue lock. */
15435 DEVICE_QUEUE_UNLOCK(tq);
15436 break;
15437 } else {
15438 /* Release device queue lock. */
15439 DEVICE_QUEUE_UNLOCK(tq);
15440 ql_delay(ha, 10000);
15441 }
15442 }
15443
15444 /* Finish any commands waiting for more status. */
15445 if (ha->status_srb != NULL) {
15446 sp = ha->status_srb;
15447 ha->status_srb = NULL;
15448 sp->cmd.next = NULL;
15449 ql_done(&sp->cmd);
15450 }
15451
15452 /* Abort commands that did not finish. */
15453 if (cnt == 0) {
15454 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS;
15455 cnt++) {
15456 if (ha->pending_cmds.first != NULL) {
15457 ql_start_iocb(ha, NULL);
15458 cnt = 1;
15459 }
15460 sp = ha->outstanding_cmds[cnt];
15461 if (sp != NULL &&
15462 sp->lun_queue->target_queue ==
15463 tq) {
15464 (void) ql_abort((opaque_t)ha,
15465 sp->pkt, 0);
15466 }
15467 }
15468 }
15469 }
15470 }
15471
15472 /* Shutdown IP. */
15473 if (ha->flags & IP_INITIALIZED) {
15474 (void) ql_shutdown_ip(ha);
15475 }
15476
15477 /* Stop all timers. */
15478 ADAPTER_STATE_LOCK(ha);
15479 ha->port_retry_timer = 0;
15480 ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
15481 ha->watchdog_timer = 0;
15482 ADAPTER_STATE_UNLOCK(ha);
15483
15484 if (pwr == PM_LEVEL_D3) {
15485 ADAPTER_STATE_LOCK(ha);
15486 ha->flags &= ~ONLINE;
15487 ADAPTER_STATE_UNLOCK(ha);
15488
15489 /* Reset ISP chip. */
15490 ql_reset_chip(ha);
15491 }
15492
15493 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15494 }
15495
15496 /*
15497 * ql_get_dma_mem
15498 * Function used to allocate dma memory.
15499 *
15500 * Input:
15501 * ha: adapter state pointer.
15502 * mem: pointer to dma memory object.
15503 * size: size of the request in bytes
15504 *
15505 * Returns:
15506 * qn local function return status code.
15507 *
15508 * Context:
15509 * Kernel context.
15510 */
15511 int
15512 ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
15513 mem_alloc_type_t allocation_type, mem_alignment_t alignment)
15514 {
15515 int rval;
15516
15517 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15518
15519 mem->size = size;
15520 mem->type = allocation_type;
15521 mem->cookie_count = 1;
15522
15523 switch (alignment) {
15524 case QL_DMA_DATA_ALIGN:
15525 mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
15526 break;
15527 case QL_DMA_RING_ALIGN:
15528 mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
15529 break;
15530 default:
15531 EL(ha, "failed, unknown alignment type %x\n", alignment);
15532 break;
15533 }
15534
15535 if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
15536 ql_free_phys(ha, mem);
15537 EL(ha, "failed, alloc_phys=%xh\n", rval);
15538 }
15539
15540 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15541
15542 return (rval);
15543 }
15544
15545 /*
15546 * ql_alloc_phys
15547 * Function used to allocate memory and zero it.
15548 * Memory is below 4 GB.
15549 *
15550 * Input:
15551 * ha: adapter state pointer.
15552 * mem: pointer to dma memory object.
15553 * sleep: KM_SLEEP/KM_NOSLEEP flag.
15554 * mem->cookie_count number of segments allowed.
15555 * mem->type memory allocation type.
15556 * mem->size memory size.
15557 * mem->alignment memory alignment.
15558 *
15559 * Returns:
15560 * qn local function return status code.
15561 *
15562 * Context:
15563 * Kernel context.
15564 */
15565 int
15566 ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15567 {
15568 size_t rlen;
15569 ddi_dma_attr_t dma_attr;
15570 ddi_device_acc_attr_t acc_attr = ql_dev_acc_attr;
15571
15572 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15573
15574 dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15575 ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15576
15577 dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
15578 dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15579
15580 /*
15581 * Workaround for SUN XMITS buffer must end and start on 8 byte
15582 * boundary. Else, hardware will overrun the buffer. Simple fix is
15583 * to make sure buffer has enough room for overrun.
15584 */
15585 if (mem->size & 7) {
15586 mem->size += 8 - (mem->size & 7);
15587 }
15588
15589 mem->flags = DDI_DMA_CONSISTENT;
15590
15591 /*
15592 * Allocate DMA memory for command.
15593 */
15594 if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15595 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15596 DDI_SUCCESS) {
15597 EL(ha, "failed, ddi_dma_alloc_handle\n");
15598 mem->dma_handle = NULL;
15599 return (QL_MEMORY_ALLOC_FAILED);
15600 }
15601
15602 switch (mem->type) {
15603 case KERNEL_MEM:
15604 mem->bp = kmem_zalloc(mem->size, sleep);
15605 break;
15606 case BIG_ENDIAN_DMA:
15607 case LITTLE_ENDIAN_DMA:
15608 case NO_SWAP_DMA:
15609 if (mem->type == BIG_ENDIAN_DMA) {
15610 acc_attr.devacc_attr_endian_flags =
15611 DDI_STRUCTURE_BE_ACC;
15612 } else if (mem->type == NO_SWAP_DMA) {
15613 acc_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
15614 }
15615 if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
15616 mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15617 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&mem->bp, &rlen,
15618 &mem->acc_handle) == DDI_SUCCESS) {
15619 bzero(mem->bp, mem->size);
15620 /* ensure we got what we asked for (32bit) */
15621 if (dma_attr.dma_attr_addr_hi == NULL) {
15622 if (mem->cookie.dmac_notused != NULL) {
15623 EL(ha, "failed, ddi_dma_mem_alloc "
15624 "returned 64 bit DMA address\n");
15625 ql_free_phys(ha, mem);
15626 return (QL_MEMORY_ALLOC_FAILED);
15627 }
15628 }
15629 } else {
15630 mem->acc_handle = NULL;
15631 mem->bp = NULL;
15632 }
15633 break;
15634 default:
15635 EL(ha, "failed, unknown type=%xh\n", mem->type);
15636 mem->acc_handle = NULL;
15637 mem->bp = NULL;
15638 break;
15639 }
15640
15641 if (mem->bp == NULL) {
15642 EL(ha, "failed, ddi_dma_mem_alloc\n");
15643 ddi_dma_free_handle(&mem->dma_handle);
15644 mem->dma_handle = NULL;
15645 return (QL_MEMORY_ALLOC_FAILED);
15646 }
15647
15648 mem->flags |= DDI_DMA_RDWR;
15649
15650 if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15651 EL(ha, "failed, ddi_dma_addr_bind_handle\n");
15652 ql_free_phys(ha, mem);
15653 return (QL_MEMORY_ALLOC_FAILED);
15654 }
15655
15656 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15657
15658 return (QL_SUCCESS);
15659 }
15660
15661 /*
15662 * ql_free_phys
15663 * Function used to free physical memory.
15664 *
15665 * Input:
15666 * ha: adapter state pointer.
15667 * mem: pointer to dma memory object.
15668 *
15669 * Context:
15670 * Kernel context.
15671 */
15672 void
15673 ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
15674 {
15675 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15676
15677 if (mem != NULL && mem->dma_handle != NULL) {
15678 ql_unbind_dma_buffer(ha, mem);
15679 switch (mem->type) {
15680 case KERNEL_MEM:
15681 if (mem->bp != NULL) {
15682 kmem_free(mem->bp, mem->size);
15683 }
15684 break;
15685 case LITTLE_ENDIAN_DMA:
15686 case BIG_ENDIAN_DMA:
15687 case NO_SWAP_DMA:
15688 if (mem->acc_handle != NULL) {
15689 ddi_dma_mem_free(&mem->acc_handle);
15690 mem->acc_handle = NULL;
15691 }
15692 break;
15693 default:
15694 break;
15695 }
15696 mem->bp = NULL;
15697 ddi_dma_free_handle(&mem->dma_handle);
15698 mem->dma_handle = NULL;
15699 }
15700
15701 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15702 }
15703
15704 /*
15705 * ql_alloc_dma_resouce.
15706 * Allocates DMA resource for buffer.
15707 *
15708 * Input:
15709 * ha: adapter state pointer.
15710 * mem: pointer to dma memory object.
15711 * sleep: KM_SLEEP/KM_NOSLEEP flag.
15712 * mem->cookie_count number of segments allowed.
15713 * mem->type memory allocation type.
15714 * mem->size memory size.
15715 * mem->bp pointer to memory or struct buf
15716 *
15717 * Returns:
15718 * qn local function return status code.
15719 *
15720 * Context:
15721 * Kernel context.
15722 */
15723 int
15724 ql_alloc_dma_resouce(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15725 {
15726 ddi_dma_attr_t dma_attr;
15727
15728 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15729
15730 dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15731 ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15732 dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15733
15734 /*
15735 * Allocate DMA handle for command.
15736 */
15737 if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15738 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15739 DDI_SUCCESS) {
15740 EL(ha, "failed, ddi_dma_alloc_handle\n");
15741 mem->dma_handle = NULL;
15742 return (QL_MEMORY_ALLOC_FAILED);
15743 }
15744
15745 mem->flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
15746
15747 if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15748 EL(ha, "failed, bind_dma_buffer\n");
15749 ddi_dma_free_handle(&mem->dma_handle);
15750 mem->dma_handle = NULL;
15751 return (QL_MEMORY_ALLOC_FAILED);
15752 }
15753
15754 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15755
15756 return (QL_SUCCESS);
15757 }
15758
15759 /*
15760 * ql_free_dma_resource
15761 * Frees DMA resources.
15762 *
15763 * Input:
15764 * ha: adapter state pointer.
15765 * mem: pointer to dma memory object.
15766 * mem->dma_handle DMA memory handle.
15767 *
15768 * Context:
15769 * Kernel context.
15770 */
15771 void
15772 ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
15773 {
15774 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15775
15776 ql_free_phys(ha, mem);
15777
15778 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15779 }
15780
15781 /*
15782 * ql_bind_dma_buffer
15783 * Binds DMA buffer.
15784 *
15785 * Input:
15786 * ha: adapter state pointer.
15787 * mem: pointer to dma memory object.
15788 * sleep: KM_SLEEP or KM_NOSLEEP.
15789 * mem->dma_handle DMA memory handle.
15790 * mem->cookie_count number of segments allowed.
15791 * mem->type memory allocation type.
15792 * mem->size memory size.
15793 * mem->bp pointer to memory or struct buf
15794 *
15795 * Returns:
15796 * mem->cookies pointer to list of cookies.
15797 * mem->cookie_count number of cookies.
15798 * status success = DDI_DMA_MAPPED
15799 * DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
15800 * DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
15801 * DDI_DMA_TOOBIG
15802 *
15803 * Context:
15804 * Kernel context.
15805 */
15806 static int
15807 ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15808 {
15809 int rval;
15810 ddi_dma_cookie_t *cookiep;
15811 uint32_t cnt = mem->cookie_count;
15812
15813 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15814
15815 if (mem->type == STRUCT_BUF_MEMORY) {
15816 rval = ddi_dma_buf_bind_handle(mem->dma_handle, mem->bp,
15817 mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15818 DDI_DMA_DONTWAIT, NULL, &mem->cookie, &mem->cookie_count);
15819 } else {
15820 rval = ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
15821 mem->size, mem->flags, (sleep == KM_SLEEP) ?
15822 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
15823 &mem->cookie_count);
15824 }
15825
15826 if (rval == DDI_DMA_MAPPED) {
15827 if (mem->cookie_count > cnt) {
15828 (void) ddi_dma_unbind_handle(mem->dma_handle);
15829 EL(ha, "failed, cookie_count %d > %d\n",
15830 mem->cookie_count, cnt);
15831 rval = DDI_DMA_TOOBIG;
15832 } else {
15833 if (mem->cookie_count > 1) {
15834 if (mem->cookies = kmem_zalloc(
15835 sizeof (ddi_dma_cookie_t) *
15836 mem->cookie_count, sleep)) {
15837 *mem->cookies = mem->cookie;
15838 cookiep = mem->cookies;
15839 for (cnt = 1; cnt < mem->cookie_count;
15840 cnt++) {
15841 ddi_dma_nextcookie(
15842 mem->dma_handle,
15843 ++cookiep);
15844 }
15845 } else {
15846 (void) ddi_dma_unbind_handle(
15847 mem->dma_handle);
15848 EL(ha, "failed, kmem_zalloc\n");
15849 rval = DDI_DMA_NORESOURCES;
15850 }
15851 } else {
15852 /*
15853 * It has been reported that dmac_size at times
15854 * may be incorrect on sparc machines so for
15855 * sparc machines that only have one segment
15856 * use the buffer size instead.
15857 */
15858 mem->cookies = &mem->cookie;
15859 mem->cookies->dmac_size = mem->size;
15860 }
15861 }
15862 }
15863
15864 if (rval != DDI_DMA_MAPPED) {
15865 EL(ha, "failed=%xh\n", rval);
15866 } else {
15867 /*EMPTY*/
15868 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15869 }
15870
15871 return (rval);
15872 }
15873
15874 /*
15875 * ql_unbind_dma_buffer
15876 * Unbinds DMA buffer.
15877 *
15878 * Input:
15879 * ha: adapter state pointer.
15880 * mem: pointer to dma memory object.
15881 * mem->dma_handle DMA memory handle.
15882 * mem->cookies pointer to cookie list.
15883 * mem->cookie_count number of cookies.
15884 *
15885 * Context:
15886 * Kernel context.
15887 */
15888 /* ARGSUSED */
15889 static void
15890 ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
15891 {
15892 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15893
15894 (void) ddi_dma_unbind_handle(mem->dma_handle);
15895 if (mem->cookie_count > 1) {
15896 kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
15897 mem->cookie_count);
15898 mem->cookies = NULL;
15899 }
15900 mem->cookie_count = 0;
15901
15902 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15903 }
15904
15905 static int
15906 ql_suspend_adapter(ql_adapter_state_t *ha)
15907 {
15908 clock_t timer = 32 * drv_usectohz(1000000);
15909
15910 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15911
15912 /*
15913 * First we will claim mbox ownership so that no
15914 * thread using mbox hangs when we disable the
15915 * interrupt in the middle of it.
15916 */
15917 MBX_REGISTER_LOCK(ha);
15918
15919 /* Check for mailbox available, if not wait for signal. */
15920 while (ha->mailbox_flags & MBX_BUSY_FLG) {
15921 ha->mailbox_flags = (uint8_t)
15922 (ha->mailbox_flags | MBX_WANT_FLG);
15923
15924 /* 30 seconds from now */
15925 if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
15926 timer, TR_CLOCK_TICK) == -1) {
15927
15928 /* Release mailbox register lock. */
15929 MBX_REGISTER_UNLOCK(ha);
15930 EL(ha, "failed, Suspend mbox");
15931 return (QL_FUNCTION_TIMEOUT);
15932 }
15933 }
15934
15935 /* Set busy flag. */
15936 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
15937 MBX_REGISTER_UNLOCK(ha);
15938
15939 (void) ql_wait_outstanding(ha);
15940
15941 /*
15942 * here we are sure that there will not be any mbox interrupt.
15943 * So, let's make sure that we return back all the outstanding
15944 * cmds as well as internally queued commands.
15945 */
15946 ql_halt(ha, PM_LEVEL_D0);
15947
15948 if (ha->power_level != PM_LEVEL_D3) {
15949 /* Disable ISP interrupts. */
15950 WRT16_IO_REG(ha, ictrl, 0);
15951 }
15952
15953 ADAPTER_STATE_LOCK(ha);
15954 ha->flags &= ~INTERRUPTS_ENABLED;
15955 ADAPTER_STATE_UNLOCK(ha);
15956
15957 MBX_REGISTER_LOCK(ha);
15958 /* Reset busy status. */
15959 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
15960
15961 /* If thread is waiting for mailbox go signal it to start. */
15962 if (ha->mailbox_flags & MBX_WANT_FLG) {
15963 ha->mailbox_flags = (uint8_t)
15964 (ha->mailbox_flags & ~MBX_WANT_FLG);
15965 cv_broadcast(&ha->cv_mbx_wait);
15966 }
15967 /* Release mailbox register lock. */
15968 MBX_REGISTER_UNLOCK(ha);
15969
15970 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15971
15972 return (QL_SUCCESS);
15973 }
15974
15975 /*
15976 * ql_add_link_b
15977 * Add link to the end of the chain.
15978 *
15979 * Input:
15980 * head = Head of link list.
15981 * link = link to be added.
15982 * LOCK must be already obtained.
15983 *
15984 * Context:
15985 * Interrupt or Kernel context, no mailbox commands allowed.
15986 */
15987 void
15988 ql_add_link_b(ql_head_t *head, ql_link_t *link)
15989 {
15990 /* at the end there isn't a next */
15991 link->next = NULL;
15992
15993 if ((link->prev = head->last) == NULL) {
15994 head->first = link;
15995 } else {
15996 head->last->next = link;
15997 }
15998
15999 head->last = link;
16000 link->head = head; /* the queue we're on */
16001 }
16002
16003 /*
16004 * ql_add_link_t
16005 * Add link to the beginning of the chain.
16006 *
16007 * Input:
16008 * head = Head of link list.
16009 * link = link to be added.
16010 * LOCK must be already obtained.
16011 *
16012 * Context:
16013 * Interrupt or Kernel context, no mailbox commands allowed.
16014 */
16015 void
16016 ql_add_link_t(ql_head_t *head, ql_link_t *link)
16017 {
16018 link->prev = NULL;
16019
16020 if ((link->next = head->first) == NULL) {
16021 head->last = link;
16022 } else {
16023 head->first->prev = link;
16024 }
16025
16026 head->first = link;
16027 link->head = head; /* the queue we're on */
16028 }
16029
16030 /*
16031 * ql_remove_link
16032 * Remove a link from the chain.
16033 *
16034 * Input:
16035 * head = Head of link list.
16036 * link = link to be removed.
16037 * LOCK must be already obtained.
16038 *
16039 * Context:
16040 * Interrupt or Kernel context, no mailbox commands allowed.
16041 */
16042 void
16043 ql_remove_link(ql_head_t *head, ql_link_t *link)
16044 {
16045 if (link->prev != NULL) {
16046 if ((link->prev->next = link->next) == NULL) {
16047 head->last = link->prev;
16048 } else {
16049 link->next->prev = link->prev;
16050 }
16051 } else if ((head->first = link->next) == NULL) {
16052 head->last = NULL;
16053 } else {
16054 head->first->prev = NULL;
16055 }
16056
16057 /* not on a queue any more */
16058 link->prev = link->next = NULL;
16059 link->head = NULL;
16060 }
16061
16062 /*
16063 * ql_chg_endian
16064 * Change endianess of byte array.
16065 *
16066 * Input:
16067 * buf = array pointer.
16068 * size = size of array in bytes.
16069 *
16070 * Context:
16071 * Interrupt or Kernel context, no mailbox commands allowed.
16072 */
16073 void
16074 ql_chg_endian(uint8_t buf[], size_t size)
16075 {
16076 uint8_t byte;
16077 size_t cnt1;
16078 size_t cnt;
16079
16080 cnt1 = size - 1;
16081 for (cnt = 0; cnt < size / 2; cnt++) {
16082 byte = buf[cnt1];
16083 buf[cnt1] = buf[cnt];
16084 buf[cnt] = byte;
16085 cnt1--;
16086 }
16087 }
16088
16089 /*
16090 * ql_bstr_to_dec
16091 * Convert decimal byte string to number.
16092 *
16093 * Input:
16094 * s: byte string pointer.
16095 * ans: interger pointer for number.
16096 * size: number of ascii bytes.
16097 *
16098 * Returns:
16099 * success = number of ascii bytes processed.
16100 *
16101 * Context:
16102 * Kernel/Interrupt context.
16103 */
16104 static int
16105 ql_bstr_to_dec(char *s, uint32_t *ans, uint32_t size)
16106 {
16107 int mul, num, cnt, pos;
16108 char *str;
16109
16110 /* Calculate size of number. */
16111 if (size == 0) {
16112 for (str = s; *str >= '0' && *str <= '9'; str++) {
16113 size++;
16114 }
16115 }
16116
16117 *ans = 0;
16118 for (cnt = 0; *s != '\0' && size; size--, cnt++) {
16119 if (*s >= '0' && *s <= '9') {
16120 num = *s++ - '0';
16121 } else {
16122 break;
16123 }
16124
16125 for (mul = 1, pos = 1; pos < size; pos++) {
16126 mul *= 10;
16127 }
16128 *ans += num * mul;
16129 }
16130
16131 return (cnt);
16132 }
16133
16134 /*
16135 * ql_delay
16136 * Calls delay routine if threads are not suspended, otherwise, busy waits
16137 * Minimum = 1 tick = 10ms
16138 *
16139 * Input:
16140 * dly = delay time in microseconds.
16141 *
16142 * Context:
16143 * Kernel or Interrupt context, no mailbox commands allowed.
16144 */
16145 void
16146 ql_delay(ql_adapter_state_t *ha, clock_t usecs)
16147 {
16148 if (QL_DAEMON_SUSPENDED(ha) || ddi_in_panic()) {
16149 drv_usecwait(usecs);
16150 } else {
16151 delay(drv_usectohz(usecs));
16152 }
16153 }
16154
16155 /*
16156 * ql_stall_drv
16157 * Stalls one or all driver instances, waits for 30 seconds.
16158 *
16159 * Input:
16160 * ha: adapter state pointer or NULL for all.
16161 * options: BIT_0 --> leave driver stalled on exit if
16162 * failed.
16163 *
16164 * Returns:
16165 * ql local function return status code.
16166 *
16167 * Context:
16168 * Kernel context.
16169 */
16170 int
16171 ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
16172 {
16173 ql_link_t *link;
16174 ql_adapter_state_t *ha2;
16175 uint32_t timer;
16176
16177 QL_PRINT_3(CE_CONT, "started\n");
16178
16179 /* Wait for 30 seconds for daemons unstall. */
16180 timer = 3000;
16181 link = ha == NULL ? ql_hba.first : &ha->hba;
16182 while (link != NULL && timer) {
16183 ha2 = link->base_address;
16184
16185 ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
16186
16187 if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
16188 (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
16189 (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
16190 ql_wait_outstanding(ha2) == MAX_OUTSTANDING_COMMANDS)) {
16191 link = ha == NULL ? link->next : NULL;
16192 continue;
16193 }
16194
16195 ql_delay(ha2, 10000);
16196 timer--;
16197 link = ha == NULL ? ql_hba.first : &ha->hba;
16198 }
16199
16200 if (ha2 != NULL && timer == 0) {
16201 EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
16202 ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
16203 "unstalled"));
16204 if (options & BIT_0) {
16205 ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
16206 }
16207 return (QL_FUNCTION_TIMEOUT);
16208 }
16209
16210 QL_PRINT_3(CE_CONT, "done\n");
16211
16212 return (QL_SUCCESS);
16213 }
16214
16215 /*
16216 * ql_restart_driver
16217 * Restarts one or all driver instances.
16218 *
16219 * Input:
16220 * ha: adapter state pointer or NULL for all.
16221 *
16222 * Context:
16223 * Kernel context.
16224 */
16225 void
16226 ql_restart_driver(ql_adapter_state_t *ha)
16227 {
16228 ql_link_t *link;
16229 ql_adapter_state_t *ha2;
16230 uint32_t timer;
16231
16232 QL_PRINT_3(CE_CONT, "started\n");
16233
16234 /* Tell all daemons to unstall. */
16235 link = ha == NULL ? ql_hba.first : &ha->hba;
16236 while (link != NULL) {
16237 ha2 = link->base_address;
16238
16239 ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
16240
16241 link = ha == NULL ? link->next : NULL;
16242 }
16243
16244 /* Wait for 30 seconds for all daemons unstall. */
16245 timer = 3000;
16246 link = ha == NULL ? ql_hba.first : &ha->hba;
16247 while (link != NULL && timer) {
16248 ha2 = link->base_address;
16249
16250 if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
16251 (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
16252 (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
16253 QL_PRINT_2(CE_CONT, "(%d,%d): restarted\n",
16254 ha2->instance, ha2->vp_index);
16255 ql_restart_queues(ha2);
16256 link = ha == NULL ? link->next : NULL;
16257 continue;
16258 }
16259
16260 QL_PRINT_2(CE_CONT, "(%d,%d): failed, tdf=%xh\n",
16261 ha2->instance, ha2->vp_index, ha2->task_daemon_flags);
16262
16263 ql_delay(ha2, 10000);
16264 timer--;
16265 link = ha == NULL ? ql_hba.first : &ha->hba;
16266 }
16267
16268 QL_PRINT_3(CE_CONT, "done\n");
16269 }
16270
16271 /*
16272 * ql_setup_interrupts
16273 * Sets up interrupts based on the HBA's and platform's
16274 * capabilities (e.g., legacy / MSI / FIXED).
16275 *
16276 * Input:
16277 * ha = adapter state pointer.
16278 *
16279 * Returns:
16280 * DDI_SUCCESS or DDI_FAILURE.
16281 *
16282 * Context:
16283 * Kernel context.
16284 */
16285 static int
16286 ql_setup_interrupts(ql_adapter_state_t *ha)
16287 {
16288 int32_t rval = DDI_FAILURE;
16289 int32_t i;
16290 int32_t itypes = 0;
16291
16292 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16293
16294 /*
16295 * The Solaris Advanced Interrupt Functions (aif) are only
16296 * supported on s10U1 or greater.
16297 */
16298 if (ql_os_release_level < 10 || ql_disable_aif != 0) {
16299 EL(ha, "interrupt framework is not supported or is "
16300 "disabled, using legacy\n");
16301 return (ql_legacy_intr(ha));
16302 } else if (ql_os_release_level == 10) {
16303 /*
16304 * See if the advanced interrupt functions (aif) are
16305 * in the kernel
16306 */
16307 void *fptr = (void *)&ddi_intr_get_supported_types;
16308
16309 if (fptr == NULL) {
16310 EL(ha, "aif is not supported, using legacy "
16311 "interrupts (rev)\n");
16312 return (ql_legacy_intr(ha));
16313 }
16314 }
16315
16316 /* See what types of interrupts this HBA and platform support */
16317 if ((i = ddi_intr_get_supported_types(ha->dip, &itypes)) !=
16318 DDI_SUCCESS) {
16319 EL(ha, "get supported types failed, rval=%xh, "
16320 "assuming FIXED\n", i);
16321 itypes = DDI_INTR_TYPE_FIXED;
16322 }
16323
16324 EL(ha, "supported types are: %xh\n", itypes);
16325
16326 if ((itypes & DDI_INTR_TYPE_MSIX) &&
16327 (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
16328 EL(ha, "successful MSI-X setup\n");
16329 } else if ((itypes & DDI_INTR_TYPE_MSI) &&
16330 (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
16331 EL(ha, "successful MSI setup\n");
16332 } else {
16333 rval = ql_setup_fixed(ha);
16334 }
16335
16336 if (rval != DDI_SUCCESS) {
16337 EL(ha, "failed, aif, rval=%xh\n", rval);
16338 } else {
16339 /*EMPTY*/
16340 QL_PRINT_3(CE_CONT, "(%d): done\n");
16341 }
16342
16343 return (rval);
16344 }
16345
16346 /*
16347 * ql_setup_msi
16348 * Set up aif MSI interrupts
16349 *
16350 * Input:
16351 * ha = adapter state pointer.
16352 *
16353 * Returns:
16354 * DDI_SUCCESS or DDI_FAILURE.
16355 *
16356 * Context:
16357 * Kernel context.
16358 */
16359 static int
16360 ql_setup_msi(ql_adapter_state_t *ha)
16361 {
16362 int32_t count = 0;
16363 int32_t avail = 0;
16364 int32_t actual = 0;
16365 int32_t msitype = DDI_INTR_TYPE_MSI;
16366 int32_t ret;
16367 ql_ifunc_t itrfun[10] = {0};
16368
16369 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16370
16371 if (ql_disable_msi != 0) {
16372 EL(ha, "MSI is disabled by user\n");
16373 return (DDI_FAILURE);
16374 }
16375
16376 /* MSI support is only suported on 24xx HBA's. */
16377 if (!(CFG_IST(ha, CFG_CTRL_24258081))) {
16378 EL(ha, "HBA does not support MSI\n");
16379 return (DDI_FAILURE);
16380 }
16381
16382 /* Get number of MSI interrupts the system supports */
16383 if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16384 DDI_SUCCESS) || count == 0) {
16385 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16386 return (DDI_FAILURE);
16387 }
16388
16389 /* Get number of available MSI interrupts */
16390 if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16391 DDI_SUCCESS) || avail == 0) {
16392 EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16393 return (DDI_FAILURE);
16394 }
16395
16396 /* MSI requires only 1. */
16397 count = 1;
16398 itrfun[0].ifunc = &ql_isr_aif;
16399
16400 /* Allocate space for interrupt handles */
16401 ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16402 ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16403
16404 ha->iflags |= IFLG_INTR_MSI;
16405
16406 /* Allocate the interrupts */
16407 if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
16408 &actual, 0)) != DDI_SUCCESS || actual < count) {
16409 EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16410 "actual=%xh\n", ret, count, actual);
16411 ql_release_intr(ha);
16412 return (DDI_FAILURE);
16413 }
16414
16415 ha->intr_cnt = actual;
16416
16417 /* Get interrupt priority */
16418 if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16419 DDI_SUCCESS) {
16420 EL(ha, "failed, get_pri ret=%xh\n", ret);
16421 ql_release_intr(ha);
16422 return (ret);
16423 }
16424
16425 /* Add the interrupt handler */
16426 if ((ret = ddi_intr_add_handler(ha->htable[0], itrfun[0].ifunc,
16427 (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
16428 EL(ha, "failed, intr_add ret=%xh\n", ret);
16429 ql_release_intr(ha);
16430 return (ret);
16431 }
16432
16433 /* Setup mutexes */
16434 if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16435 EL(ha, "failed, mutex init ret=%xh\n", ret);
16436 ql_release_intr(ha);
16437 return (ret);
16438 }
16439
16440 /* Get the capabilities */
16441 (void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16442
16443 /* Enable interrupts */
16444 if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16445 if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16446 DDI_SUCCESS) {
16447 EL(ha, "failed, block enable, ret=%xh\n", ret);
16448 ql_destroy_mutex(ha);
16449 ql_release_intr(ha);
16450 return (ret);
16451 }
16452 } else {
16453 if ((ret = ddi_intr_enable(ha->htable[0])) != DDI_SUCCESS) {
16454 EL(ha, "failed, intr enable, ret=%xh\n", ret);
16455 ql_destroy_mutex(ha);
16456 ql_release_intr(ha);
16457 return (ret);
16458 }
16459 }
16460
16461 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16462
16463 return (DDI_SUCCESS);
16464 }
16465
16466 /*
16467 * ql_setup_msix
16468 * Set up aif MSI-X interrupts
16469 *
16470 * Input:
16471 * ha = adapter state pointer.
16472 *
16473 * Returns:
16474 * DDI_SUCCESS or DDI_FAILURE.
16475 *
16476 * Context:
16477 * Kernel context.
16478 */
16479 static int
16480 ql_setup_msix(ql_adapter_state_t *ha)
16481 {
16482 uint16_t hwvect;
16483 int32_t count = 0;
16484 int32_t avail = 0;
16485 int32_t actual = 0;
16486 int32_t msitype = DDI_INTR_TYPE_MSIX;
16487 int32_t ret;
16488 uint32_t i;
16489 ql_ifunc_t itrfun[QL_MSIX_MAXAIF] = {0};
16490
16491 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16492
16493 if (ql_disable_msix != 0) {
16494 EL(ha, "MSI-X is disabled by user\n");
16495 return (DDI_FAILURE);
16496 }
16497
16498 /*
16499 * MSI-X support is only available on 24xx HBA's that have
16500 * rev A2 parts (revid = 3) or greater.
16501 */
16502 if (!((ha->device_id == 0x2532) || (ha->device_id == 0x2432) ||
16503 (ha->device_id == 0x8432) || (ha->device_id == 0x8001) ||
16504 (ha->device_id == 0x8021))) {
16505 EL(ha, "HBA does not support MSI-X\n");
16506 return (DDI_FAILURE);
16507 }
16508
16509 if (CFG_IST(ha, CFG_CTRL_2422) && (ha->rev_id < 3)) {
16510 EL(ha, "HBA does not support MSI-X (revid)\n");
16511 return (DDI_FAILURE);
16512 }
16513
16514 /* Per HP, these HP branded HBA's are not supported with MSI-X */
16515 if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
16516 ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
16517 EL(ha, "HBA does not support MSI-X (subdevid)\n");
16518 return (DDI_FAILURE);
16519 }
16520
16521 /* Get the number of 24xx/25xx MSI-X h/w vectors */
16522 hwvect = (uint16_t)(((CFG_IST(ha, CFG_CTRL_2422) ?
16523 ql_pci_config_get16(ha, 0x7e) :
16524 ql_pci_config_get16(ha, 0xa2)) & 0x3ff) + 1);
16525
16526 EL(ha, "pcie config space hwvect = %d\n", hwvect);
16527
16528 if (hwvect < QL_MSIX_MAXAIF) {
16529 EL(ha, "failed, min h/w vectors req'd: %d, avail: %d\n",
16530 QL_MSIX_MAXAIF, hwvect);
16531 return (DDI_FAILURE);
16532 }
16533
16534 /* Get number of MSI-X interrupts the platform h/w supports */
16535 if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16536 DDI_SUCCESS) || count == 0) {
16537 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16538 return (DDI_FAILURE);
16539 }
16540
16541 /* Get number of available system interrupts */
16542 if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16543 DDI_SUCCESS) || avail == 0) {
16544 EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16545 return (DDI_FAILURE);
16546 }
16547
16548 /* Fill out the intr table */
16549 count = QL_MSIX_MAXAIF;
16550 itrfun[QL_MSIX_AIF].ifunc = &ql_isr_aif;
16551 itrfun[QL_MSIX_RSPQ].ifunc = &ql_isr_aif;
16552
16553 /* Allocate space for interrupt handles */
16554 ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
16555 if ((ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP)) == NULL) {
16556 ha->hsize = 0;
16557 EL(ha, "failed, unable to allocate htable space\n");
16558 return (DDI_FAILURE);
16559 }
16560
16561 ha->iflags |= IFLG_INTR_MSIX;
16562
16563 /* Allocate the interrupts */
16564 if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
16565 DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
16566 actual < QL_MSIX_MAXAIF) {
16567 EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16568 "actual=%xh\n", ret, count, actual);
16569 ql_release_intr(ha);
16570 return (DDI_FAILURE);
16571 }
16572
16573 ha->intr_cnt = actual;
16574
16575 /* Get interrupt priority */
16576 if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16577 DDI_SUCCESS) {
16578 EL(ha, "failed, get_pri ret=%xh\n", ret);
16579 ql_release_intr(ha);
16580 return (ret);
16581 }
16582
16583 /* Add the interrupt handlers */
16584 for (i = 0; i < actual; i++) {
16585 if ((ret = ddi_intr_add_handler(ha->htable[i], itrfun[i].ifunc,
16586 (void *)ha, (void *)((ulong_t)i))) != DDI_SUCCESS) {
16587 EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
16588 actual, ret);
16589 ql_release_intr(ha);
16590 return (ret);
16591 }
16592 }
16593
16594 /*
16595 * duplicate the rest of the intr's
16596 * ddi_intr_dup_handler() isn't working on x86 just yet...
16597 */
16598 #ifdef __sparc
16599 for (i = actual; i < hwvect; i++) {
16600 if ((ret = ddi_intr_dup_handler(ha->htable[0], (int)i,
16601 &ha->htable[i])) != DDI_SUCCESS) {
16602 EL(ha, "failed, intr_dup#=%xh, act=%xh, ret=%xh\n",
16603 i, actual, ret);
16604 ql_release_intr(ha);
16605 return (ret);
16606 }
16607 }
16608 #endif
16609
16610 /* Setup mutexes */
16611 if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16612 EL(ha, "failed, mutex init ret=%xh\n", ret);
16613 ql_release_intr(ha);
16614 return (ret);
16615 }
16616
16617 /* Get the capabilities */
16618 (void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16619
16620 /* Enable interrupts */
16621 if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16622 if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16623 DDI_SUCCESS) {
16624 EL(ha, "failed, block enable, ret=%xh\n", ret);
16625 ql_destroy_mutex(ha);
16626 ql_release_intr(ha);
16627 return (ret);
16628 }
16629 } else {
16630 for (i = 0; i < ha->intr_cnt; i++) {
16631 if ((ret = ddi_intr_enable(ha->htable[i])) !=
16632 DDI_SUCCESS) {
16633 EL(ha, "failed, intr enable, ret=%xh\n", ret);
16634 ql_destroy_mutex(ha);
16635 ql_release_intr(ha);
16636 return (ret);
16637 }
16638 }
16639 }
16640
16641 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16642
16643 return (DDI_SUCCESS);
16644 }
16645
16646 /*
16647 * ql_setup_fixed
16648 * Sets up aif FIXED interrupts
16649 *
16650 * Input:
16651 * ha = adapter state pointer.
16652 *
16653 * Returns:
16654 * DDI_SUCCESS or DDI_FAILURE.
16655 *
16656 * Context:
16657 * Kernel context.
16658 */
16659 static int
16660 ql_setup_fixed(ql_adapter_state_t *ha)
16661 {
16662 int32_t count = 0;
16663 int32_t actual = 0;
16664 int32_t ret;
16665 uint32_t i;
16666
16667 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16668
16669 /* Get number of fixed interrupts the system supports */
16670 if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
16671 &count)) != DDI_SUCCESS) || count == 0) {
16672 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16673 return (DDI_FAILURE);
16674 }
16675
16676 ha->iflags |= IFLG_INTR_FIXED;
16677
16678 /* Allocate space for interrupt handles */
16679 ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16680 ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16681
16682 /* Allocate the interrupts */
16683 if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
16684 0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
16685 actual < count) {
16686 EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
16687 "actual=%xh\n", ret, count, actual);
16688 ql_release_intr(ha);
16689 return (DDI_FAILURE);
16690 }
16691
16692 ha->intr_cnt = actual;
16693
16694 /* Get interrupt priority */
16695 if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16696 DDI_SUCCESS) {
16697 EL(ha, "failed, get_pri ret=%xh\n", ret);
16698 ql_release_intr(ha);
16699 return (ret);
16700 }
16701
16702 /* Add the interrupt handlers */
16703 for (i = 0; i < ha->intr_cnt; i++) {
16704 if ((ret = ddi_intr_add_handler(ha->htable[i], &ql_isr_aif,
16705 (void *)ha, (void *)((ulong_t)(i)))) != DDI_SUCCESS) {
16706 EL(ha, "failed, intr_add ret=%xh\n", ret);
16707 ql_release_intr(ha);
16708 return (ret);
16709 }
16710 }
16711
16712 /* Setup mutexes */
16713 if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16714 EL(ha, "failed, mutex init ret=%xh\n", ret);
16715 ql_release_intr(ha);
16716 return (ret);
16717 }
16718
16719 /* Enable interrupts */
16720 for (i = 0; i < ha->intr_cnt; i++) {
16721 if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
16722 EL(ha, "failed, intr enable, ret=%xh\n", ret);
16723 ql_destroy_mutex(ha);
16724 ql_release_intr(ha);
16725 return (ret);
16726 }
16727 }
16728
16729 EL(ha, "using FIXED interupts\n");
16730
16731 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16732
16733 return (DDI_SUCCESS);
16734 }
16735
16736 /*
16737 * ql_disable_intr
16738 * Disables interrupts
16739 *
16740 * Input:
16741 * ha = adapter state pointer.
16742 *
16743 * Returns:
16744 *
16745 * Context:
16746 * Kernel context.
16747 */
16748 static void
16749 ql_disable_intr(ql_adapter_state_t *ha)
16750 {
16751 uint32_t i, rval;
16752
16753 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16754
16755 if (!(ha->iflags & IFLG_INTR_AIF)) {
16756
16757 /* Disable legacy interrupts */
16758 (void) ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
16759
16760 } else if ((ha->intr_cap & DDI_INTR_FLAG_BLOCK) &&
16761 (ha->iflags & (IFLG_INTR_MSI | IFLG_INTR_MSIX))) {
16762
16763 /* Remove AIF block interrupts (MSI) */
16764 if ((rval = ddi_intr_block_disable(ha->htable, ha->intr_cnt))
16765 != DDI_SUCCESS) {
16766 EL(ha, "failed intr block disable, rval=%x\n", rval);
16767 }
16768
16769 } else {
16770
16771 /* Remove AIF non-block interrupts (fixed). */
16772 for (i = 0; i < ha->intr_cnt; i++) {
16773 if ((rval = ddi_intr_disable(ha->htable[i])) !=
16774 DDI_SUCCESS) {
16775 EL(ha, "failed intr disable, intr#=%xh, "
16776 "rval=%xh\n", i, rval);
16777 }
16778 }
16779 }
16780
16781 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16782 }
16783
16784 /*
16785 * ql_release_intr
16786 * Releases aif legacy interrupt resources
16787 *
16788 * Input:
16789 * ha = adapter state pointer.
16790 *
16791 * Returns:
16792 *
16793 * Context:
16794 * Kernel context.
16795 */
16796 static void
16797 ql_release_intr(ql_adapter_state_t *ha)
16798 {
16799 int32_t i;
16800
16801 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16802
16803 if (!(ha->iflags & IFLG_INTR_AIF)) {
16804 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16805 return;
16806 }
16807
16808 ha->iflags &= ~(IFLG_INTR_AIF);
16809 if (ha->htable != NULL && ha->hsize > 0) {
16810 i = (int32_t)ha->hsize / (int32_t)sizeof (ddi_intr_handle_t);
16811 while (i-- > 0) {
16812 if (ha->htable[i] == 0) {
16813 EL(ha, "htable[%x]=0h\n", i);
16814 continue;
16815 }
16816
16817 (void) ddi_intr_disable(ha->htable[i]);
16818
16819 if (i < ha->intr_cnt) {
16820 (void) ddi_intr_remove_handler(ha->htable[i]);
16821 }
16822
16823 (void) ddi_intr_free(ha->htable[i]);
16824 }
16825
16826 kmem_free(ha->htable, ha->hsize);
16827 ha->htable = NULL;
16828 }
16829
16830 ha->hsize = 0;
16831 ha->intr_cnt = 0;
16832 ha->intr_pri = 0;
16833 ha->intr_cap = 0;
16834
16835 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16836 }
16837
16838 /*
16839 * ql_legacy_intr
16840 * Sets up legacy interrupts.
16841 *
16842 * NB: Only to be used if AIF (Advanced Interupt Framework)
16843 * if NOT in the kernel.
16844 *
16845 * Input:
16846 * ha = adapter state pointer.
16847 *
16848 * Returns:
16849 * DDI_SUCCESS or DDI_FAILURE.
16850 *
16851 * Context:
16852 * Kernel context.
16853 */
16854 static int
16855 ql_legacy_intr(ql_adapter_state_t *ha)
16856 {
16857 int rval = DDI_SUCCESS;
16858
16859 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16860
16861 /* Setup mutexes */
16862 if (ql_init_mutex(ha) != DDI_SUCCESS) {
16863 EL(ha, "failed, mutex init\n");
16864 return (DDI_FAILURE);
16865 }
16866
16867 /* Setup standard/legacy interrupt handler */
16868 if (ddi_add_intr(ha->dip, (uint_t)0, &ha->iblock_cookie,
16869 (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
16870 cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
16871 QL_NAME, ha->instance);
16872 ql_destroy_mutex(ha);
16873 rval = DDI_FAILURE;
16874 }
16875
16876 if (rval == DDI_SUCCESS) {
16877 ha->iflags |= IFLG_INTR_LEGACY;
16878 EL(ha, "using legacy interrupts\n");
16879 }
16880
16881 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16882
16883 return (rval);
16884 }
16885
16886 /*
16887 * ql_init_mutex
16888 * Initializes mutex's
16889 *
16890 * Input:
16891 * ha = adapter state pointer.
16892 *
16893 * Returns:
16894 * DDI_SUCCESS or DDI_FAILURE.
16895 *
16896 * Context:
16897 * Kernel context.
16898 */
16899 static int
16900 ql_init_mutex(ql_adapter_state_t *ha)
16901 {
16902 int ret;
16903 void *intr;
16904
16905 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16906
16907 if (ha->iflags & IFLG_INTR_AIF) {
16908 intr = (void *)(uintptr_t)ha->intr_pri;
16909 } else {
16910 /* Get iblock cookies to initialize mutexes */
16911 if ((ret = ddi_get_iblock_cookie(ha->dip, 0,
16912 &ha->iblock_cookie)) != DDI_SUCCESS) {
16913 EL(ha, "failed, get_iblock: %xh\n", ret);
16914 return (DDI_FAILURE);
16915 }
16916 intr = (void *)ha->iblock_cookie;
16917 }
16918
16919 /* mutexes to protect the adapter state structure. */
16920 mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, intr);
16921
16922 /* mutex to protect the ISP response ring. */
16923 mutex_init(&ha->intr_mutex, NULL, MUTEX_DRIVER, intr);
16924
16925 /* mutex to protect the mailbox registers. */
16926 mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, intr);
16927
16928 /* power management protection */
16929 mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, intr);
16930
16931 /* Mailbox wait and interrupt conditional variable. */
16932 cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
16933 cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
16934
16935 /* mutex to protect the ISP request ring. */
16936 mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, intr);
16937
16938 /* Unsolicited buffer conditional variable. */
16939 cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
16940
16941 mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, intr);
16942 mutex_init(&ha->cache_mutex, NULL, MUTEX_DRIVER, intr);
16943
16944 /* Suspended conditional variable. */
16945 cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
16946
16947 /* mutex to protect task daemon context. */
16948 mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, intr);
16949
16950 /* Task_daemon thread conditional variable. */
16951 cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
16952
16953 /* mutex to protect diag port manage interface */
16954 mutex_init(&ha->portmutex, NULL, MUTEX_DRIVER, intr);
16955
16956 /* mutex to protect per instance f/w dump flags and buffer */
16957 mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, intr);
16958
16959 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16960
16961 return (DDI_SUCCESS);
16962 }
16963
16964 /*
16965 * ql_destroy_mutex
16966 * Destroys mutex's
16967 *
16968 * Input:
16969 * ha = adapter state pointer.
16970 *
16971 * Returns:
16972 *
16973 * Context:
16974 * Kernel context.
16975 */
16976 static void
16977 ql_destroy_mutex(ql_adapter_state_t *ha)
16978 {
16979 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16980
16981 mutex_destroy(&ha->dump_mutex);
16982 mutex_destroy(&ha->portmutex);
16983 cv_destroy(&ha->cv_task_daemon);
16984 mutex_destroy(&ha->task_daemon_mutex);
16985 cv_destroy(&ha->cv_dr_suspended);
16986 mutex_destroy(&ha->cache_mutex);
16987 mutex_destroy(&ha->ub_mutex);
16988 cv_destroy(&ha->cv_ub);
16989 mutex_destroy(&ha->req_ring_mutex);
16990 cv_destroy(&ha->cv_mbx_intr);
16991 cv_destroy(&ha->cv_mbx_wait);
16992 mutex_destroy(&ha->pm_mutex);
16993 mutex_destroy(&ha->mbx_mutex);
16994 mutex_destroy(&ha->intr_mutex);
16995 mutex_destroy(&ha->mutex);
16996
16997 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16998 }
16999
17000 /*
17001 * ql_fwmodule_resolve
17002 * Loads and resolves external firmware module and symbols
17003 *
17004 * Input:
17005 * ha: adapter state pointer.
17006 *
17007 * Returns:
17008 * ql local function return status code:
17009 * QL_SUCCESS - external f/w module module and symbols resolved
17010 * QL_FW_NOT_SUPPORTED - Driver does not support ISP type
17011 * QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
17012 * QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
17013 * Context:
17014 * Kernel context.
17015 *
17016 * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time. We
17017 * could switch to a tighter scope around acutal download (and add an extra
17018 * ddi_modopen for module opens that occur before root is mounted).
17019 *
17020 */
17021 uint32_t
17022 ql_fwmodule_resolve(ql_adapter_state_t *ha)
17023 {
17024 int8_t module[128];
17025 int8_t fw_version[128];
17026 uint32_t rval = QL_SUCCESS;
17027 caddr_t code, code02;
17028 uint8_t *p_ucfw;
17029 uint16_t *p_usaddr, *p_uslen;
17030 uint32_t *p_uiaddr, *p_uilen, *p_uifw;
17031 uint32_t *p_uiaddr02, *p_uilen02;
17032 struct fw_table *fwt;
17033 extern struct fw_table fw_table[];
17034
17035 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17036
17037 if (ha->fw_module != NULL) {
17038 EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
17039 ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
17040 ha->fw_subminor_version);
17041 return (rval);
17042 }
17043
17044 /* make sure the fw_class is in the fw_table of supported classes */
17045 for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
17046 if (fwt->fw_class == ha->fw_class)
17047 break; /* match */
17048 }
17049 if (fwt->fw_version == NULL) {
17050 cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
17051 "in driver's fw_table", QL_NAME, ha->instance,
17052 ha->fw_class);
17053 return (QL_FW_NOT_SUPPORTED);
17054 }
17055
17056 /*
17057 * open the module related to the fw_class
17058 */
17059 (void) snprintf(module, sizeof (module), "misc/qlc/qlc_fw_%x",
17060 ha->fw_class);
17061
17062 ha->fw_module = ddi_modopen(module, KRTLD_MODE_FIRST, NULL);
17063 if (ha->fw_module == NULL) {
17064 cmn_err(CE_WARN, "%s(%d): can't load firmware file %s",
17065 QL_NAME, ha->instance, module);
17066 return (QL_FWMODLOAD_FAILED);
17067 }
17068
17069 /*
17070 * resolve the fw module symbols, data types depend on fw_class
17071 */
17072
17073 switch (ha->fw_class) {
17074 case 0x2200:
17075 case 0x2300:
17076 case 0x6322:
17077
17078 if ((code = ddi_modsym(ha->fw_module, "risc_code01",
17079 NULL)) == NULL) {
17080 rval = QL_FWSYM_NOT_FOUND;
17081 EL(ha, "failed, f/w module %d rc01 symbol\n", module);
17082 } else if ((p_usaddr = ddi_modsym(ha->fw_module,
17083 "risc_code_addr01", NULL)) == NULL) {
17084 rval = QL_FWSYM_NOT_FOUND;
17085 EL(ha, "failed, f/w module %d rca01 symbol\n", module);
17086 } else if ((p_uslen = ddi_modsym(ha->fw_module,
17087 "risc_code_length01", NULL)) == NULL) {
17088 rval = QL_FWSYM_NOT_FOUND;
17089 EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
17090 } else if ((p_ucfw = ddi_modsym(ha->fw_module,
17091 "firmware_version", NULL)) == NULL) {
17092 rval = QL_FWSYM_NOT_FOUND;
17093 EL(ha, "failed, f/w module %d fwver symbol\n", module);
17094 }
17095
17096 if (rval == QL_SUCCESS) {
17097 ha->risc_fw[0].code = code;
17098 ha->risc_fw[0].addr = *p_usaddr;
17099 ha->risc_fw[0].length = *p_uslen;
17100
17101 (void) snprintf(fw_version, sizeof (fw_version),
17102 "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
17103 }
17104 break;
17105
17106 case 0x2400:
17107 case 0x2500:
17108 case 0x8100:
17109
17110 if ((code = ddi_modsym(ha->fw_module, "risc_code01",
17111 NULL)) == NULL) {
17112 rval = QL_FWSYM_NOT_FOUND;
17113 EL(ha, "failed, f/w module %d rc01 symbol\n", module);
17114 } else if ((p_uiaddr = ddi_modsym(ha->fw_module,
17115 "risc_code_addr01", NULL)) == NULL) {
17116 rval = QL_FWSYM_NOT_FOUND;
17117 EL(ha, "failed, f/w module %d rca01 symbol\n", module);
17118 } else if ((p_uilen = ddi_modsym(ha->fw_module,
17119 "risc_code_length01", NULL)) == NULL) {
17120 rval = QL_FWSYM_NOT_FOUND;
17121 EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
17122 } else if ((p_uifw = ddi_modsym(ha->fw_module,
17123 "firmware_version", NULL)) == NULL) {
17124 rval = QL_FWSYM_NOT_FOUND;
17125 EL(ha, "failed, f/w module %d fwver symbol\n", module);
17126 }
17127
17128 if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
17129 NULL)) == NULL) {
17130 rval = QL_FWSYM_NOT_FOUND;
17131 EL(ha, "failed, f/w module %d rc02 symbol\n", module);
17132 } else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
17133 "risc_code_addr02", NULL)) == NULL) {
17134 rval = QL_FWSYM_NOT_FOUND;
17135 EL(ha, "failed, f/w module %d rca02 symbol\n", module);
17136 } else if ((p_uilen02 = ddi_modsym(ha->fw_module,
17137 "risc_code_length02", NULL)) == NULL) {
17138 rval = QL_FWSYM_NOT_FOUND;
17139 EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
17140 }
17141
17142 if (rval == QL_SUCCESS) {
17143 ha->risc_fw[0].code = code;
17144 ha->risc_fw[0].addr = *p_uiaddr;
17145 ha->risc_fw[0].length = *p_uilen;
17146 ha->risc_fw[1].code = code02;
17147 ha->risc_fw[1].addr = *p_uiaddr02;
17148 ha->risc_fw[1].length = *p_uilen02;
17149
17150 (void) snprintf(fw_version, sizeof (fw_version),
17151 "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
17152 }
17153 break;
17154
17155 default:
17156 EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
17157 rval = QL_FW_NOT_SUPPORTED;
17158 }
17159
17160 if (rval != QL_SUCCESS) {
17161 cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
17162 "module %s (%x)", QL_NAME, ha->instance, module, rval);
17163 if (ha->fw_module != NULL) {
17164 (void) ddi_modclose(ha->fw_module);
17165 ha->fw_module = NULL;
17166 }
17167 } else {
17168 /*
17169 * check for firmware version mismatch between module and
17170 * compiled in fw_table version.
17171 */
17172
17173 if (strcmp(fwt->fw_version, fw_version) != 0) {
17174
17175 /*
17176 * If f/w / driver version mismatches then
17177 * return a successful status -- however warn
17178 * the user that this is NOT recommended.
17179 */
17180
17181 cmn_err(CE_WARN, "%s(%d): driver / f/w version "
17182 "mismatch for %x: driver-%s module-%s", QL_NAME,
17183 ha->instance, ha->fw_class, fwt->fw_version,
17184 fw_version);
17185
17186 ha->cfg_flags |= CFG_FW_MISMATCH;
17187 } else {
17188 ha->cfg_flags &= ~CFG_FW_MISMATCH;
17189 }
17190 }
17191
17192 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17193
17194 return (rval);
17195 }
17196
17197 /*
17198 * ql_port_state
17199 * Set the state on all adapter ports.
17200 *
17201 * Input:
17202 * ha: parent adapter state pointer.
17203 * state: port state.
17204 * flags: task daemon flags to set.
17205 *
17206 * Context:
17207 * Interrupt or Kernel context, no mailbox commands allowed.
17208 */
17209 void
17210 ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
17211 {
17212 ql_adapter_state_t *vha;
17213
17214 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17215
17216 TASK_DAEMON_LOCK(ha);
17217 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
17218 if (FC_PORT_STATE_MASK(vha->state) != state) {
17219 vha->state = state != FC_STATE_OFFLINE ?
17220 (FC_PORT_SPEED_MASK(vha->state) | state) : state;
17221 vha->task_daemon_flags |= flags;
17222 }
17223 }
17224 ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
17225 TASK_DAEMON_UNLOCK(ha);
17226
17227 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17228 }
17229
17230 /*
17231 * ql_el_trace_desc_ctor - Construct an extended logging trace descriptor.
17232 *
17233 * Input: Pointer to the adapter state structure.
17234 * Returns: Success or Failure.
17235 * Context: Kernel context.
17236 */
17237 int
17238 ql_el_trace_desc_ctor(ql_adapter_state_t *ha)
17239 {
17240 int rval = DDI_SUCCESS;
17241
17242 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17243
17244 ha->el_trace_desc =
17245 (el_trace_desc_t *)kmem_zalloc(sizeof (el_trace_desc_t), KM_SLEEP);
17246
17247 if (ha->el_trace_desc == NULL) {
17248 cmn_err(CE_WARN, "%s(%d): can't construct trace descriptor",
17249 QL_NAME, ha->instance);
17250 rval = DDI_FAILURE;
17251 } else {
17252 ha->el_trace_desc->next = 0;
17253 ha->el_trace_desc->trace_buffer =
17254 (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
17255
17256 if (ha->el_trace_desc->trace_buffer == NULL) {
17257 cmn_err(CE_WARN, "%s(%d): can't get trace buffer",
17258 QL_NAME, ha->instance);
17259 kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17260 rval = DDI_FAILURE;
17261 } else {
17262 ha->el_trace_desc->trace_buffer_size =
17263 EL_TRACE_BUF_SIZE;
17264 mutex_init(&ha->el_trace_desc->mutex, NULL,
17265 MUTEX_DRIVER, NULL);
17266 }
17267 }
17268
17269 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17270
17271 return (rval);
17272 }
17273
17274 /*
17275 * ql_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
17276 *
17277 * Input: Pointer to the adapter state structure.
17278 * Returns: Success or Failure.
17279 * Context: Kernel context.
17280 */
17281 int
17282 ql_el_trace_desc_dtor(ql_adapter_state_t *ha)
17283 {
17284 int rval = DDI_SUCCESS;
17285
17286 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17287
17288 if (ha->el_trace_desc == NULL) {
17289 cmn_err(CE_WARN, "%s(%d): can't destroy el trace descriptor",
17290 QL_NAME, ha->instance);
17291 rval = DDI_FAILURE;
17292 } else {
17293 if (ha->el_trace_desc->trace_buffer != NULL) {
17294 kmem_free(ha->el_trace_desc->trace_buffer,
17295 ha->el_trace_desc->trace_buffer_size);
17296 }
17297 mutex_destroy(&ha->el_trace_desc->mutex);
17298 kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17299 }
17300
17301 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17302
17303 return (rval);
17304 }
17305
17306 /*
17307 * els_cmd_text - Return a pointer to a string describing the command
17308 *
17309 * Input: els_cmd = the els command opcode.
17310 * Returns: pointer to a string.
17311 * Context: Kernel context.
17312 */
17313 char *
17314 els_cmd_text(int els_cmd)
17315 {
17316 cmd_table_t *entry = &els_cmd_tbl[0];
17317
17318 return (cmd_text(entry, els_cmd));
17319 }
17320
17321 /*
17322 * mbx_cmd_text - Return a pointer to a string describing the command
17323 *
17324 * Input: mbx_cmd = the mailbox command opcode.
17325 * Returns: pointer to a string.
17326 * Context: Kernel context.
17327 */
17328 char *
17329 mbx_cmd_text(int mbx_cmd)
17330 {
17331 cmd_table_t *entry = &mbox_cmd_tbl[0];
17332
17333 return (cmd_text(entry, mbx_cmd));
17334 }
17335
17336 /*
17337 * cmd_text Return a pointer to a string describing the command
17338 *
17339 * Input: entry = the command table
17340 * cmd = the command.
17341 * Returns: pointer to a string.
17342 * Context: Kernel context.
17343 */
17344 char *
17345 cmd_text(cmd_table_t *entry, int cmd)
17346 {
17347 for (; entry->cmd != 0; entry++) {
17348 if (entry->cmd == cmd) {
17349 break;
17350 }
17351 }
17352 return (entry->string);
17353 }
17354
17355 /*
17356 * ql_els_24xx_mbox_cmd_iocb - els request indication.
17357 *
17358 * Input: ha = adapter state pointer.
17359 * srb = scsi request block pointer.
17360 * arg = els passthru entry iocb pointer.
17361 * Returns:
17362 * Context: Kernel context.
17363 */
17364 void
17365 ql_els_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *srb, void *arg)
17366 {
17367 els_descriptor_t els_desc;
17368
17369 /* Extract the ELS information */
17370 ql_fca_isp_els_request(ha, (fc_packet_t *)srb->pkt, &els_desc);
17371
17372 /* Construct the passthru entry */
17373 ql_isp_els_request_ctor(&els_desc, (els_passthru_entry_t *)arg);
17374
17375 /* Ensure correct endianness */
17376 ql_isp_els_handle_cmd_endian(ha, srb);
17377 }
17378
17379 /*
17380 * ql_fca_isp_els_request - Extract into an els descriptor the info required
17381 * to build an els_passthru iocb from an fc packet.
17382 *
17383 * Input: ha = adapter state pointer.
17384 * pkt = fc packet pointer
17385 * els_desc = els descriptor pointer
17386 * Returns:
17387 * Context: Kernel context.
17388 */
17389 static void
17390 ql_fca_isp_els_request(ql_adapter_state_t *ha, fc_packet_t *pkt,
17391 els_descriptor_t *els_desc)
17392 {
17393 ls_code_t els;
17394
17395 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17396 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17397
17398 els_desc->els = els.ls_code;
17399
17400 els_desc->els_handle = ha->hba_buf.acc_handle;
17401 els_desc->d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
17402 els_desc->s_id.b24 = pkt->pkt_cmd_fhdr.s_id;
17403 /* if n_port_handle is not < 0x7d use 0 */
17404 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17405 els_desc->n_port_handle = ha->n_port->n_port_handle;
17406 } else {
17407 els_desc->n_port_handle = 0;
17408 }
17409 els_desc->control_flags = 0;
17410 els_desc->cmd_byte_count = pkt->pkt_cmdlen;
17411 /*
17412 * Transmit DSD. This field defines the Fibre Channel Frame payload
17413 * (without the frame header) in system memory.
17414 */
17415 els_desc->tx_dsd.addr[0] = LSD(pkt->pkt_cmd_cookie->dmac_laddress);
17416 els_desc->tx_dsd.addr[1] = MSD(pkt->pkt_cmd_cookie->dmac_laddress);
17417 els_desc->tx_dsd.length = (uint32_t)pkt->pkt_cmd_cookie->dmac_size;
17418
17419 els_desc->rsp_byte_count = pkt->pkt_rsplen;
17420 /*
17421 * Receive DSD. This field defines the ELS response payload buffer
17422 * for the ISP24xx firmware transferring the received ELS
17423 * response frame to a location in host memory.
17424 */
17425 els_desc->rx_dsd.addr[0] = LSD(pkt->pkt_resp_cookie->dmac_laddress);
17426 els_desc->rx_dsd.addr[1] = MSD(pkt->pkt_resp_cookie->dmac_laddress);
17427 els_desc->rx_dsd.length = (uint32_t)pkt->pkt_resp_cookie->dmac_size;
17428 }
17429
17430 /*
17431 * ql_isp_els_request_ctor - Construct an els_passthru_entry iocb
17432 * using the els descriptor.
17433 *
17434 * Input: ha = adapter state pointer.
17435 * els_desc = els descriptor pointer.
17436 * els_entry = els passthru entry iocb pointer.
17437 * Returns:
17438 * Context: Kernel context.
17439 */
17440 static void
17441 ql_isp_els_request_ctor(els_descriptor_t *els_desc,
17442 els_passthru_entry_t *els_entry)
17443 {
17444 uint32_t *ptr32;
17445
17446 /*
17447 * Construct command packet.
17448 */
17449 ddi_put8(els_desc->els_handle, &els_entry->entry_type,
17450 (uint8_t)ELS_PASSTHRU_TYPE);
17451 ddi_put16(els_desc->els_handle, &els_entry->n_port_hdl,
17452 els_desc->n_port_handle);
17453 ddi_put8(els_desc->els_handle, &els_entry->sof_type, (uint8_t)BIT_4);
17454 ddi_put32(els_desc->els_handle, &els_entry->rcv_exch_address,
17455 (uint32_t)0);
17456 ddi_put8(els_desc->els_handle, &els_entry->els_cmd_opcode,
17457 els_desc->els);
17458 ddi_put8(els_desc->els_handle, &els_entry->d_id_7_0,
17459 els_desc->d_id.b.al_pa);
17460 ddi_put8(els_desc->els_handle, &els_entry->d_id_15_8,
17461 els_desc->d_id.b.area);
17462 ddi_put8(els_desc->els_handle, &els_entry->d_id_23_16,
17463 els_desc->d_id.b.domain);
17464 ddi_put8(els_desc->els_handle, &els_entry->s_id_7_0,
17465 els_desc->s_id.b.al_pa);
17466 ddi_put8(els_desc->els_handle, &els_entry->s_id_15_8,
17467 els_desc->s_id.b.area);
17468 ddi_put8(els_desc->els_handle, &els_entry->s_id_23_16,
17469 els_desc->s_id.b.domain);
17470 ddi_put16(els_desc->els_handle, &els_entry->control_flags,
17471 els_desc->control_flags);
17472 ddi_put32(els_desc->els_handle, &els_entry->rcv_payld_data_bcnt,
17473 els_desc->rsp_byte_count);
17474 ddi_put32(els_desc->els_handle, &els_entry->xmt_payld_data_bcnt,
17475 els_desc->cmd_byte_count);
17476 /* Load transmit data segments and count. */
17477 ptr32 = (uint32_t *)&els_entry->xmt_dseg_0_address;
17478 ddi_put16(els_desc->els_handle, &els_entry->xmt_dseg_count, 1);
17479 ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[0]);
17480 ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[1]);
17481 ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.length);
17482 ddi_put16(els_desc->els_handle, &els_entry->rcv_dseg_count, 1);
17483 ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[0]);
17484 ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[1]);
17485 ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.length);
17486 }
17487
17488 /*
17489 * ql_isp_els_handle_cmd_endian - els requests must be in big endian
17490 * in host memory.
17491 *
17492 * Input: ha = adapter state pointer.
17493 * srb = scsi request block
17494 * Returns:
17495 * Context: Kernel context.
17496 */
17497 void
17498 ql_isp_els_handle_cmd_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17499 {
17500 ls_code_t els;
17501 fc_packet_t *pkt;
17502 uint8_t *ptr;
17503
17504 pkt = srb->pkt;
17505
17506 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17507 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17508
17509 ptr = (uint8_t *)pkt->pkt_cmd;
17510
17511 ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17512 }
17513
17514 /*
17515 * ql_isp_els_handle_rsp_endian - els responses must be in big endian
17516 * in host memory.
17517 * Input: ha = adapter state pointer.
17518 * srb = scsi request block
17519 * Returns:
17520 * Context: Kernel context.
17521 */
17522 void
17523 ql_isp_els_handle_rsp_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17524 {
17525 ls_code_t els;
17526 fc_packet_t *pkt;
17527 uint8_t *ptr;
17528
17529 pkt = srb->pkt;
17530
17531 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17532 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17533
17534 ptr = (uint8_t *)pkt->pkt_resp;
17535 BIG_ENDIAN_32(&els);
17536 ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17537 }
17538
17539 /*
17540 * ql_isp_els_handle_endian - els requests/responses must be in big endian
17541 * in host memory.
17542 * Input: ha = adapter state pointer.
17543 * ptr = els request/response buffer pointer.
17544 * ls_code = els command code.
17545 * Returns:
17546 * Context: Kernel context.
17547 */
17548 void
17549 ql_isp_els_handle_endian(ql_adapter_state_t *ha, uint8_t *ptr, uint8_t ls_code)
17550 {
17551 switch (ls_code) {
17552 case LA_ELS_PLOGI: {
17553 BIG_ENDIAN_32(ptr); /* Command Code */
17554 ptr += 4;
17555 BIG_ENDIAN_16(ptr); /* FC-PH version */
17556 ptr += 2;
17557 BIG_ENDIAN_16(ptr); /* b2b credit */
17558 ptr += 2;
17559 BIG_ENDIAN_16(ptr); /* Cmn Feature flags */
17560 ptr += 2;
17561 BIG_ENDIAN_16(ptr); /* Rcv data size */
17562 ptr += 2;
17563 BIG_ENDIAN_16(ptr); /* Concurrent Seq */
17564 ptr += 2;
17565 BIG_ENDIAN_16(ptr); /* Rel offset */
17566 ptr += 2;
17567 BIG_ENDIAN_32(ptr); /* E_D_TOV */
17568 ptr += 4; /* Port Name */
17569 ptr += 8; /* Node Name */
17570 ptr += 8; /* Class 1 */
17571 ptr += 16; /* Class 2 */
17572 ptr += 16; /* Class 3 */
17573 BIG_ENDIAN_16(ptr); /* Service options */
17574 ptr += 2;
17575 BIG_ENDIAN_16(ptr); /* Initiator control */
17576 ptr += 2;
17577 BIG_ENDIAN_16(ptr); /* Recipient Control */
17578 ptr += 2;
17579 BIG_ENDIAN_16(ptr); /* Rcv size */
17580 ptr += 2;
17581 BIG_ENDIAN_16(ptr); /* Concurrent Seq */
17582 ptr += 2;
17583 BIG_ENDIAN_16(ptr); /* N_Port e2e credit */
17584 ptr += 2;
17585 BIG_ENDIAN_16(ptr); /* Open Seq/Exch */
17586 break;
17587 }
17588 case LA_ELS_PRLI: {
17589 BIG_ENDIAN_32(ptr); /* Command Code/Page length */
17590 ptr += 4; /* Type */
17591 ptr += 2;
17592 BIG_ENDIAN_16(ptr); /* Flags */
17593 ptr += 2;
17594 BIG_ENDIAN_32(ptr); /* Originator Process associator */
17595 ptr += 4;
17596 BIG_ENDIAN_32(ptr); /* Responder Process associator */
17597 ptr += 4;
17598 BIG_ENDIAN_32(ptr); /* Flags */
17599 break;
17600 }
17601 default:
17602 EL(ha, "can't handle els code %x\n", ls_code);
17603 break;
17604 }
17605 }
17606
17607 /*
17608 * ql_n_port_plogi
17609 * In N port 2 N port topology where an N Port has logged in with the
17610 * firmware because it has the N_Port login initiative, we send up
17611 * a plogi by proxy which stimulates the login procedure to continue.
17612 *
17613 * Input:
17614 * ha = adapter state pointer.
17615 * Returns:
17616 *
17617 * Context:
17618 * Kernel context.
17619 */
17620 static int
17621 ql_n_port_plogi(ql_adapter_state_t *ha)
17622 {
17623 int rval;
17624 ql_tgt_t *tq;
17625 ql_head_t done_q = { NULL, NULL };
17626
17627 rval = QL_SUCCESS;
17628
17629 if (ha->topology & QL_N_PORT) {
17630 /* if we're doing this the n_port_handle must be good */
17631 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17632 tq = ql_loop_id_to_queue(ha,
17633 ha->n_port->n_port_handle);
17634 if (tq != NULL) {
17635 (void) ql_send_plogi(ha, tq, &done_q);
17636 } else {
17637 EL(ha, "n_port_handle = %x, tq = %x\n",
17638 ha->n_port->n_port_handle, tq);
17639 }
17640 } else {
17641 EL(ha, "n_port_handle = %x, tq = %x\n",
17642 ha->n_port->n_port_handle, tq);
17643 }
17644 if (done_q.first != NULL) {
17645 ql_done(done_q.first);
17646 }
17647 }
17648 return (rval);
17649 }
17650
17651 /*
17652 * Compare two WWNs. The NAA is omitted for comparison.
17653 *
17654 * Note particularly that the indentation used in this
17655 * function isn't according to Sun recommendations. It
17656 * is indented to make reading a bit easy.
17657 *
17658 * Return Values:
17659 * if first == second return 0
17660 * if first > second return 1
17661 * if first < second return -1
17662 */
17663 int
17664 ql_wwn_cmp(ql_adapter_state_t *ha, la_wwn_t *first, la_wwn_t *second)
17665 {
17666 la_wwn_t t1, t2;
17667 int rval;
17668
17669 EL(ha, "WWPN=%08x%08x\n",
17670 BE_32(first->i_wwn[0]), BE_32(first->i_wwn[1]));
17671 EL(ha, "WWPN=%08x%08x\n",
17672 BE_32(second->i_wwn[0]), BE_32(second->i_wwn[1]));
17673 /*
17674 * Fibre Channel protocol is big endian, so compare
17675 * as big endian values
17676 */
17677 t1.i_wwn[0] = BE_32(first->i_wwn[0]);
17678 t1.i_wwn[1] = BE_32(first->i_wwn[1]);
17679
17680 t2.i_wwn[0] = BE_32(second->i_wwn[0]);
17681 t2.i_wwn[1] = BE_32(second->i_wwn[1]);
17682
17683 if (t1.i_wwn[0] == t2.i_wwn[0]) {
17684 if (t1.i_wwn[1] == t2.i_wwn[1]) {
17685 rval = 0;
17686 } else if (t1.i_wwn[1] > t2.i_wwn[1]) {
17687 rval = 1;
17688 } else {
17689 rval = -1;
17690 }
17691 } else {
17692 if (t1.i_wwn[0] > t2.i_wwn[0]) {
17693 rval = 1;
17694 } else {
17695 rval = -1;
17696 }
17697 }
17698 return (rval);
17699 }
17700
17701 /*
17702 * ql_wait_for_td_stop
17703 * Wait for task daemon to stop running. Internal command timeout
17704 * is approximately 30 seconds, so it may help in some corner
17705 * cases to wait that long
17706 *
17707 * Input:
17708 * ha = adapter state pointer.
17709 *
17710 * Returns:
17711 * DDI_SUCCESS or DDI_FAILURE.
17712 *
17713 * Context:
17714 * Kernel context.
17715 */
17716
17717 static int
17718 ql_wait_for_td_stop(ql_adapter_state_t *ha)
17719 {
17720 int rval = DDI_FAILURE;
17721 UINT16 wait_cnt;
17722
17723 for (wait_cnt = 0; wait_cnt < 3000; wait_cnt++) {
17724 /* The task daemon clears the stop flag on exit. */
17725 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
17726 if (ha->cprinfo.cc_events & CALLB_CPR_START ||
17727 ddi_in_panic()) {
17728 drv_usecwait(10000);
17729 } else {
17730 delay(drv_usectohz(10000));
17731 }
17732 } else {
17733 rval = DDI_SUCCESS;
17734 break;
17735 }
17736 }
17737 return (rval);
17738 }
17739
17740 /*
17741 * ql_nvram_cache_desc_ctor - Construct an nvram cache descriptor.
17742 *
17743 * Input: Pointer to the adapter state structure.
17744 * Returns: Success or Failure.
17745 * Context: Kernel context.
17746 */
17747 int
17748 ql_nvram_cache_desc_ctor(ql_adapter_state_t *ha)
17749 {
17750 int rval = DDI_SUCCESS;
17751
17752 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17753
17754 ha->nvram_cache =
17755 (nvram_cache_desc_t *)kmem_zalloc(sizeof (nvram_cache_desc_t),
17756 KM_SLEEP);
17757
17758 if (ha->nvram_cache == NULL) {
17759 cmn_err(CE_WARN, "%s(%d): can't construct nvram cache"
17760 " descriptor", QL_NAME, ha->instance);
17761 rval = DDI_FAILURE;
17762 } else {
17763 if (CFG_IST(ha, CFG_CTRL_24258081)) {
17764 ha->nvram_cache->size = sizeof (nvram_24xx_t);
17765 } else {
17766 ha->nvram_cache->size = sizeof (nvram_t);
17767 }
17768 ha->nvram_cache->cache =
17769 (void *)kmem_zalloc(ha->nvram_cache->size, KM_SLEEP);
17770 if (ha->nvram_cache->cache == NULL) {
17771 cmn_err(CE_WARN, "%s(%d): can't get nvram cache buffer",
17772 QL_NAME, ha->instance);
17773 kmem_free(ha->nvram_cache,
17774 sizeof (nvram_cache_desc_t));
17775 ha->nvram_cache = 0;
17776 rval = DDI_FAILURE;
17777 } else {
17778 mutex_init(&ha->nvram_cache->mutex, NULL,
17779 MUTEX_DRIVER, NULL);
17780 ha->nvram_cache->valid = 0;
17781 }
17782 }
17783
17784 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17785
17786 return (rval);
17787 }
17788
17789 /*
17790 * ql_nvram_cache_desc_dtor - Destroy an nvram cache descriptor.
17791 *
17792 * Input: Pointer to the adapter state structure.
17793 * Returns: Success or Failure.
17794 * Context: Kernel context.
17795 */
17796 int
17797 ql_nvram_cache_desc_dtor(ql_adapter_state_t *ha)
17798 {
17799 int rval = DDI_SUCCESS;
17800
17801 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17802
17803 if (ha->nvram_cache == NULL) {
17804 cmn_err(CE_WARN, "%s(%d): can't destroy nvram descriptor",
17805 QL_NAME, ha->instance);
17806 rval = DDI_FAILURE;
17807 } else {
17808 if (ha->nvram_cache->cache != NULL) {
17809 kmem_free(ha->nvram_cache->cache,
17810 ha->nvram_cache->size);
17811 }
17812 mutex_destroy(&ha->nvram_cache->mutex);
17813 kmem_free(ha->nvram_cache, sizeof (nvram_cache_desc_t));
17814 }
17815
17816 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17817
17818 return (rval);
17819 }
17820
17821 /*
17822 * ql_process_idc_event - Handle an Inter-Driver Communication async event.
17823 *
17824 * Input: Pointer to the adapter state structure.
17825 * Returns: void
17826 * Context: Kernel context.
17827 */
17828 static void
17829 ql_process_idc_event(ql_adapter_state_t *ha)
17830 {
17831 int rval;
17832
17833 switch (ha->idc_mb[0]) {
17834 case MBA_IDC_NOTIFICATION:
17835 /*
17836 * The informational opcode (idc_mb[2]) can be a
17837 * defined value or the mailbox command being executed
17838 * on another function which stimulated this IDC message.
17839 */
17840 ADAPTER_STATE_LOCK(ha);
17841 switch (ha->idc_mb[2]) {
17842 case IDC_OPC_DRV_START:
17843 if (ha->idc_flash_acc != 0) {
17844 ha->idc_flash_acc--;
17845 if (ha->idc_flash_acc == 0) {
17846 ha->idc_flash_acc_timer = 0;
17847 GLOBAL_HW_UNLOCK();
17848 }
17849 }
17850 if (ha->idc_restart_cnt != 0) {
17851 ha->idc_restart_cnt--;
17852 if (ha->idc_restart_cnt == 0) {
17853 ha->idc_restart_timer = 0;
17854 ADAPTER_STATE_UNLOCK(ha);
17855 TASK_DAEMON_LOCK(ha);
17856 ha->task_daemon_flags &= ~DRIVER_STALL;
17857 TASK_DAEMON_UNLOCK(ha);
17858 ql_restart_queues(ha);
17859 } else {
17860 ADAPTER_STATE_UNLOCK(ha);
17861 }
17862 } else {
17863 ADAPTER_STATE_UNLOCK(ha);
17864 }
17865 break;
17866 case IDC_OPC_FLASH_ACC:
17867 ha->idc_flash_acc_timer = 30;
17868 if (ha->idc_flash_acc == 0) {
17869 GLOBAL_HW_LOCK();
17870 }
17871 ha->idc_flash_acc++;
17872 ADAPTER_STATE_UNLOCK(ha);
17873 break;
17874 case IDC_OPC_RESTART_MPI:
17875 ha->idc_restart_timer = 30;
17876 ha->idc_restart_cnt++;
17877 ADAPTER_STATE_UNLOCK(ha);
17878 TASK_DAEMON_LOCK(ha);
17879 ha->task_daemon_flags |= DRIVER_STALL;
17880 TASK_DAEMON_UNLOCK(ha);
17881 break;
17882 case IDC_OPC_PORT_RESET_MBC:
17883 case IDC_OPC_SET_PORT_CONFIG_MBC:
17884 ha->idc_restart_timer = 30;
17885 ha->idc_restart_cnt++;
17886 ADAPTER_STATE_UNLOCK(ha);
17887 TASK_DAEMON_LOCK(ha);
17888 ha->task_daemon_flags |= DRIVER_STALL;
17889 TASK_DAEMON_UNLOCK(ha);
17890 (void) ql_wait_outstanding(ha);
17891 break;
17892 default:
17893 ADAPTER_STATE_UNLOCK(ha);
17894 EL(ha, "Unknown IDC opcode=%xh %xh\n", ha->idc_mb[0],
17895 ha->idc_mb[2]);
17896 break;
17897 }
17898 /*
17899 * If there is a timeout value associated with this IDC
17900 * notification then there is an implied requirement
17901 * that we return an ACK.
17902 */
17903 if (ha->idc_mb[1] & IDC_TIMEOUT_MASK) {
17904 rval = ql_idc_ack(ha);
17905 if (rval != QL_SUCCESS) {
17906 EL(ha, "idc_ack status=%xh %xh\n", rval,
17907 ha->idc_mb[2]);
17908 }
17909 }
17910 break;
17911 case MBA_IDC_COMPLETE:
17912 /*
17913 * We don't ACK completions, only these require action.
17914 */
17915 switch (ha->idc_mb[2]) {
17916 case IDC_OPC_PORT_RESET_MBC:
17917 case IDC_OPC_SET_PORT_CONFIG_MBC:
17918 ADAPTER_STATE_LOCK(ha);
17919 if (ha->idc_restart_cnt != 0) {
17920 ha->idc_restart_cnt--;
17921 if (ha->idc_restart_cnt == 0) {
17922 ha->idc_restart_timer = 0;
17923 ADAPTER_STATE_UNLOCK(ha);
17924 TASK_DAEMON_LOCK(ha);
17925 ha->task_daemon_flags &= ~DRIVER_STALL;
17926 TASK_DAEMON_UNLOCK(ha);
17927 ql_restart_queues(ha);
17928 } else {
17929 ADAPTER_STATE_UNLOCK(ha);
17930 }
17931 } else {
17932 ADAPTER_STATE_UNLOCK(ha);
17933 }
17934 break;
17935 default:
17936 break; /* Don't care... */
17937 }
17938 break;
17939 case MBA_IDC_TIME_EXTENDED:
17940 QL_PRINT_10(CE_CONT, "(%d): MBA_IDC_TIME_EXTENDED="
17941 "%xh\n", ha->instance, ha->idc_mb[2]);
17942 break;
17943 default:
17944 EL(ha, "Inconsistent IDC event =%xh %xh\n", ha->idc_mb[0],
17945 ha->idc_mb[2]);
17946 ADAPTER_STATE_UNLOCK(ha);
17947 break;
17948 }
17949 }