1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright 2015 QLogic Corporation */
23
24 /*
25 * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
26 */
27 /*
28 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
29 * Copyright (c) 2016 by Delphix. All rights reserved.
30 */
31
32 #pragma ident "Copyright 2015 QLogic Corporation; ql_api.c"
33
34 /*
35 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
36 *
37 * ***********************************************************************
38 * * **
39 * * NOTICE **
40 * * COPYRIGHT (C) 1996-2015 QLOGIC CORPORATION **
41 * * ALL RIGHTS RESERVED **
42 * * **
43 * ***********************************************************************
44 *
45 */
46
47 #include <ql_apps.h>
48 #include <ql_api.h>
49 #include <ql_debug.h>
50 #include <ql_init.h>
51 #include <ql_iocb.h>
52 #include <ql_ioctl.h>
53 #include <ql_isr.h>
54 #include <ql_mbx.h>
55 #include <ql_nx.h>
56 #include <ql_xioctl.h>
57 #include <ql_fm.h>
58
59 /*
60 * Solaris external defines.
61 */
62 extern pri_t minclsyspri;
63 extern pri_t maxclsyspri;
64
65 /*
66 * dev_ops functions prototypes
67 */
68 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
69 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
70 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
71 static int ql_power(dev_info_t *, int, int);
72 static int ql_quiesce(dev_info_t *);
73
74 /*
75 * FCA functions prototypes exported by means of the transport table
76 */
77 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
78 fc_fca_bind_info_t *);
79 static void ql_unbind_port(opaque_t);
80 static int ql_init_pkt(opaque_t, fc_packet_t *, int);
81 static int ql_un_init_pkt(opaque_t, fc_packet_t *);
82 static int ql_els_send(opaque_t, fc_packet_t *);
83 static int ql_get_cap(opaque_t, char *, void *);
84 static int ql_set_cap(opaque_t, char *, void *);
85 static int ql_getmap(opaque_t, fc_lilpmap_t *);
86 static int ql_transport(opaque_t, fc_packet_t *);
87 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
88 static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
89 static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
90 static int ql_abort(opaque_t, fc_packet_t *, int);
91 static int ql_reset(opaque_t, uint32_t);
92 static int ql_port_manage(opaque_t, fc_fca_pm_t *);
93 static opaque_t ql_get_device(opaque_t, fc_portid_t);
94
95 /*
96 * FCA Driver Support Function Prototypes.
97 */
98 static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
99 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
100 static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *);
101 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
102 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
103 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
104 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
105 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
106 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
107 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
108 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
109 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
110 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
111 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
112 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
113 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
114 static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
115 static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
116 static int ql_login_port(ql_adapter_state_t *, port_id_t);
117 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
118 static int ql_logout_port(ql_adapter_state_t *, port_id_t);
119 static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint64_t);
120 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
121 static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
122 ql_srb_t *);
123 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
124 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
125 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
126 ql_srb_t *);
127 static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
128 static void ql_task_daemon(void *);
129 static void ql_task_thread(ql_adapter_state_t *);
130 static void ql_idle_check(ql_adapter_state_t *);
131 static void ql_unsol_callback(ql_srb_t *);
132 static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
133 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
134 static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
135 static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
136 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
137 static int ql_handle_rscn_update(ql_adapter_state_t *);
138 static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
139 fc_unsol_buf_t *);
140 static void ql_timer(void *);
141 static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
142 static void ql_watchdog(ql_adapter_state_t *);
143 static void ql_wdg_tq_list(ql_adapter_state_t *, ql_tgt_t *);
144 static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *);
145 static uint16_t ql_wait_outstanding(ql_adapter_state_t *);
146 static void ql_iidma(ql_adapter_state_t *);
147 static void ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq);
148 static void ql_loop_resync(ql_adapter_state_t *);
149 static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
150 static int ql_kstat_update(kstat_t *, int);
151 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
152 static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
153 static size_t ql_25xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
154 static size_t ql_81xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
155 static size_t ql_8021_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
156 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
157 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
158 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
159 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
160 static int ql_81xx_binary_fw_dump(ql_adapter_state_t *, ql_81xx_fw_dump_t *);
161 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
162 void *);
163 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
164 uint8_t);
165 static int ql_save_config_regs(dev_info_t *);
166 static int ql_restore_config_regs(dev_info_t *);
167 static void ql_halt(ql_adapter_state_t *, int);
168 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
169 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
170 static int ql_suspend_adapter(ql_adapter_state_t *);
171 static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
172 static int ql_setup_interrupts(ql_adapter_state_t *);
173 static int ql_setup_msi(ql_adapter_state_t *);
174 static int ql_setup_msix(ql_adapter_state_t *);
175 static int ql_setup_fixed(ql_adapter_state_t *);
176 static void ql_release_intr(ql_adapter_state_t *);
177 static int ql_legacy_intr(ql_adapter_state_t *);
178 static int ql_init_mutex(ql_adapter_state_t *);
179 static void ql_destroy_mutex(ql_adapter_state_t *);
180 static void ql_fca_isp_els_request(ql_adapter_state_t *, ql_request_q_t *,
181 fc_packet_t *, els_descriptor_t *);
182 static void ql_isp_els_request_ctor(els_descriptor_t *,
183 els_passthru_entry_t *);
184 static int ql_n_port_plogi(ql_adapter_state_t *);
185 static int ql_create_queues(ql_adapter_state_t *);
186 static int ql_create_rsp_queue(ql_adapter_state_t *, uint16_t);
187 static void ql_delete_queues(ql_adapter_state_t *);
188 static int ql_multi_queue_support(ql_adapter_state_t *);
189 static int ql_map_mem_bar(ql_adapter_state_t *, ddi_acc_handle_t *, caddr_t *,
190 uint32_t, uint32_t);
191 static void ql_completion_thread(void *);
192 static void ql_process_comp_queue(void *);
193 static int ql_abort_io(ql_adapter_state_t *vha, ql_srb_t *);
194 static void ql_idc(ql_adapter_state_t *);
195 static int ql_83xx_binary_fw_dump(ql_adapter_state_t *, ql_83xx_fw_dump_t *);
196 static size_t ql_83xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
197 static caddr_t ql_str_ptr(ql_adapter_state_t *, caddr_t, uint32_t *);
198 static int ql_27xx_binary_fw_dump(ql_adapter_state_t *);
199 static size_t ql_27xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
200 static uint32_t ql_2700_dmp_parse_template(ql_adapter_state_t *, ql_dt_hdr_t *,
201 uint8_t *, uint32_t);
202 static int ql_2700_dt_riob1(ql_adapter_state_t *, ql_dt_riob1_t *, uint8_t *,
203 uint8_t *);
204 static void ql_2700_dt_wiob1(ql_adapter_state_t *, ql_dt_wiob1_t *, uint8_t *,
205 uint8_t *);
206 static int ql_2700_dt_riob2(ql_adapter_state_t *, ql_dt_riob2_t *, uint8_t *,
207 uint8_t *);
208 static void ql_2700_dt_wiob2(ql_adapter_state_t *, ql_dt_wiob2_t *, uint8_t *,
209 uint8_t *);
210 static int ql_2700_dt_rpci(ql_adapter_state_t *, ql_dt_rpci_t *, uint8_t *,
211 uint8_t *);
212 static void ql_2700_dt_wpci(ql_adapter_state_t *, ql_dt_wpci_t *, uint8_t *,
213 uint8_t *);
214 static int ql_2700_dt_rram(ql_adapter_state_t *, ql_dt_rram_t *, uint8_t *,
215 uint8_t *);
216 static int ql_2700_dt_gque(ql_adapter_state_t *, ql_dt_gque_t *, uint8_t *,
217 uint8_t *);
218 static int ql_2700_dt_gfce(ql_adapter_state_t *, ql_dt_gfce_t *, uint8_t *,
219 uint8_t *);
220 static void ql_2700_dt_prisc(ql_adapter_state_t *, ql_dt_prisc_t *, uint8_t *,
221 uint8_t *);
222 static void ql_2700_dt_rrisc(ql_adapter_state_t *, ql_dt_rrisc_t *, uint8_t *,
223 uint8_t *);
224 static void ql_2700_dt_dint(ql_adapter_state_t *, ql_dt_dint_t *, uint8_t *,
225 uint8_t *);
226 static int ql_2700_dt_ghbd(ql_adapter_state_t *, ql_dt_ghbd_t *, uint8_t *,
227 uint8_t *);
228 static int ql_2700_dt_scra(ql_adapter_state_t *, ql_dt_scra_t *, uint8_t *,
229 uint8_t *);
230 static int ql_2700_dt_rrreg(ql_adapter_state_t *, ql_dt_rrreg_t *, uint8_t *,
231 uint8_t *);
232 static void ql_2700_dt_wrreg(ql_adapter_state_t *, ql_dt_wrreg_t *, uint8_t *,
233 uint8_t *);
234 static int ql_2700_dt_rrram(ql_adapter_state_t *, ql_dt_rrram_t *, uint8_t *,
235 uint8_t *);
236 static int ql_2700_dt_rpcic(ql_adapter_state_t *, ql_dt_rpcic_t *, uint8_t *,
237 uint8_t *);
238 static int ql_2700_dt_gques(ql_adapter_state_t *, ql_dt_gques_t *, uint8_t *,
239 uint8_t *);
240 static int ql_2700_dt_wdmp(ql_adapter_state_t *, ql_dt_wdmp_t *, uint8_t *,
241 uint8_t *);
242 static int ql_2700_dump_ram(ql_adapter_state_t *, uint16_t, uint32_t, uint32_t,
243 uint8_t *);
244
245 /*
246 * Global data
247 */
248 static uint8_t ql_enable_pm = 1;
249 static int ql_flash_sbus_fpga = 0;
250 uint32_t ql_os_release_level;
251 uint32_t ql_disable_aif = 0;
252 uint32_t ql_disable_intx = 0;
253 uint32_t ql_disable_msi = 0;
254 uint32_t ql_disable_msix = 0;
255 uint32_t ql_enable_ets = 0;
256 uint16_t ql_osc_wait_count = 1000;
257 uint32_t ql_task_cb_dly = 64;
258 uint32_t qlc_disable_load = 0;
259
260 /* Timer routine variables. */
261 static timeout_id_t ql_timer_timeout_id = NULL;
262 static clock_t ql_timer_ticks;
263
264 /* Soft state head pointer. */
265 void *ql_state = NULL;
266
267 /* Head adapter link. */
268 ql_head_t ql_hba = {
269 NULL,
270 NULL
271 };
272
273 /* Global hba index */
274 uint32_t ql_gfru_hba_index = 1;
275
276 /*
277 * Some IP defines and globals
278 */
279 uint32_t ql_ip_buffer_count = 128;
280 uint32_t ql_ip_low_water = 10;
281 uint8_t ql_ip_fast_post_count = 5;
282 static int ql_ip_mtu = 65280; /* equivalent to FCIPMTU */
283
284 /* Device AL_PA to Device Head Queue index array. */
285 uint8_t ql_alpa_to_index[] = {
286 0x7e, 0x7d, 0x7c, 0x00, 0x7b, 0x01, 0x02, 0x03, 0x7a, 0x04,
287 0x05, 0x06, 0x07, 0x08, 0x09, 0x79, 0x78, 0x0a, 0x0b, 0x0c,
288 0x0d, 0x0e, 0x0f, 0x77, 0x76, 0x10, 0x11, 0x75, 0x12, 0x74,
289 0x73, 0x72, 0x13, 0x14, 0x15, 0x71, 0x16, 0x70, 0x6f, 0x6e,
290 0x17, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x18, 0x19, 0x67,
291 0x66, 0x65, 0x64, 0x63, 0x62, 0x20, 0x21, 0x61, 0x60, 0x23,
292 0x5f, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x5e, 0x2a, 0x5d,
293 0x5c, 0x5b, 0x2b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x2c,
294 0x2d, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x2e, 0x2f, 0x4e,
295 0x4d, 0x30, 0x4c, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x4b,
296 0x37, 0x4a, 0x49, 0x48, 0x38, 0x47, 0x46, 0x45, 0x44, 0x43,
297 0x42, 0x39, 0x3a, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b,
298 0x3c, 0x3b, 0x3a, 0x3d, 0x39, 0x3e, 0x3f, 0x40, 0x38, 0x37,
299 0x36, 0x41, 0x35, 0x42, 0x43, 0x44, 0x34, 0x45, 0x46, 0x47,
300 0x48, 0x49, 0x4a, 0x33, 0x32, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
301 0x50, 0x31, 0x30, 0x51, 0x52, 0x2f, 0x53, 0x2e, 0x2d, 0x2c,
302 0x54, 0x55, 0x56, 0x2b, 0x57, 0x2a, 0x29, 0x28, 0x58, 0x27,
303 0x26, 0x25, 0x24, 0x23, 0x22, 0x59, 0x5a, 0x21, 0x20, 0x1f,
304 0x1e, 0x1d, 0x1c, 0x5b, 0x5c, 0x1b, 0x1a, 0x5d, 0x19, 0x5e,
305 0x5f, 0x60, 0x61, 0x62, 0x63, 0x18, 0x64, 0x17, 0x16, 0x15,
306 0x65, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x66, 0x67, 0x0e,
307 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x68, 0x69, 0x08, 0x07, 0x6a,
308 0x06, 0x6b, 0x6c, 0x6d, 0x05, 0x04, 0x03, 0x6e, 0x02, 0x6f,
309 0x70, 0x71, 0x01, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x00,
310 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7f, 0x80, 0x00, 0x01,
311 0x02, 0x03, 0x80, 0x7f, 0x7e, 0x04
312 };
313
314 /* Device loop_id to ALPA array. */
315 static uint8_t ql_index_to_alpa[] = {
316 0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
317 0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
318 0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
319 0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
320 0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
321 0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
322 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
323 0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
324 0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
325 0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
326 0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
327 0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
328 0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
329 };
330
331 /* 2200 register offsets */
332 static reg_off_t reg_off_2200 = {
333 0x00, /* flash_address */
334 0x02, /* flash_data */
335 0x06, /* ctrl_status */
336 0x08, /* ictrl */
337 0x0a, /* istatus */
338 0x0c, /* semaphore */
339 0x0e, /* nvram */
340 0x18, /* req_in */
341 0x18, /* req_out */
342 0x1a, /* resp_in */
343 0x1a, /* resp_out */
344 0xff, /* risc2host - n/a */
345 24, /* Number of mailboxes */
346
347 /* Mailbox in register offsets 0 - 23 */
348 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
349 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
350 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
351 /* 2200 does not have mailbox 24-31 - n/a */
352 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
353
354 /* Mailbox out register offsets 0 - 23 */
355 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
356 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
357 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
358 /* 2200 does not have mailbox 24-31 - n/a */
359 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
360
361 0x96, /* fpm_diag_config */
362 0xa4, /* pcr */
363 0xb0, /* mctr */
364 0xb8, /* fb_cmd */
365 0xc0, /* hccr */
366 0xcc, /* gpiod */
367 0xce, /* gpioe */
368 0xff, /* host_to_host_sema - n/a */
369 0xff, /* pri_req_in - n/a */
370 0xff, /* pri_req_out - n/a */
371 0xff, /* atio_req_in - n/a */
372 0xff, /* atio_req_out - n/a */
373 0xff, /* io_base_addr - n/a */
374 0xff, /* nx_host_int - n/a */
375 0xff /* nx_risc_int - n/a */
376 };
377
378 /* 2300 register offsets */
379 static reg_off_t reg_off_2300 = {
380 0x00, /* flash_address */
381 0x02, /* flash_data */
382 0x06, /* ctrl_status */
383 0x08, /* ictrl */
384 0x0a, /* istatus */
385 0x0c, /* semaphore */
386 0x0e, /* nvram */
387 0x10, /* req_in */
388 0x12, /* req_out */
389 0x14, /* resp_in */
390 0x16, /* resp_out */
391 0x18, /* risc2host */
392 32, /* Number of mailboxes */
393
394 /* Mailbox in register offsets 0 - 31 */
395 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
396 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
397 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
398 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
399
400 /* Mailbox out register offsets 0 - 31 */
401 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
402 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
403 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
404 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
405
406 0x96, /* fpm_diag_config */
407 0xa4, /* pcr */
408 0xb0, /* mctr */
409 0x80, /* fb_cmd */
410 0xc0, /* hccr */
411 0xcc, /* gpiod */
412 0xce, /* gpioe */
413 0x1c, /* host_to_host_sema */
414 0xff, /* pri_req_in - n/a */
415 0xff, /* pri_req_out - n/a */
416 0xff, /* atio_req_in - n/a */
417 0xff, /* atio_req_out - n/a */
418 0xff, /* io_base_addr - n/a */
419 0xff, /* nx_host_int - n/a */
420 0xff /* nx_risc_int - n/a */
421 };
422
423 /* 2400/2500 register offsets */
424 reg_off_t reg_off_2400_2500 = {
425 0x00, /* flash_address */
426 0x04, /* flash_data */
427 0x08, /* ctrl_status */
428 0x0c, /* ictrl */
429 0x10, /* istatus */
430 0xff, /* semaphore - n/a */
431 0xff, /* nvram - n/a */
432 0x1c, /* req_in */
433 0x20, /* req_out */
434 0x24, /* resp_in */
435 0x28, /* resp_out */
436 0x44, /* risc2host */
437 32, /* Number of mailboxes */
438
439 /* Mailbox in register offsets 0 - 31 */
440 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
441 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
442 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
443 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
444
445 /* Mailbox out register offsets 0 - 31 */
446 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
447 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
448 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
449 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
450
451 0xff, /* fpm_diag_config - n/a */
452 0xff, /* pcr - n/a */
453 0xff, /* mctr - n/a */
454 0xff, /* fb_cmd - n/a */
455 0x48, /* hccr */
456 0x4c, /* gpiod */
457 0x50, /* gpioe */
458 0xff, /* host_to_host_sema - n/a */
459 0x2c, /* pri_req_in */
460 0x30, /* pri_req_out */
461 0x3c, /* atio_req_in */
462 0x40, /* atio_req_out */
463 0x54, /* io_base_addr */
464 0xff, /* nx_host_int - n/a */
465 0xff /* nx_risc_int - n/a */
466 };
467
468 /* P3 register offsets */
469 static reg_off_t reg_off_8021 = {
470 0x00, /* flash_address */
471 0x04, /* flash_data */
472 0x08, /* ctrl_status */
473 0x0c, /* ictrl */
474 0x10, /* istatus */
475 0xff, /* semaphore - n/a */
476 0xff, /* nvram - n/a */
477 0xff, /* req_in - n/a */
478 0x0, /* req_out */
479 0x100, /* resp_in */
480 0x200, /* resp_out */
481 0x500, /* risc2host */
482 32, /* Number of mailboxes */
483
484 /* Mailbox in register offsets 0 - 31 */
485 0x300, 0x302, 0x304, 0x306, 0x308, 0x30a, 0x30c, 0x30e,
486 0x310, 0x312, 0x314, 0x316, 0x318, 0x31a, 0x31c, 0x31e,
487 0x320, 0x322, 0x324, 0x326, 0x328, 0x32a, 0x32c, 0x32e,
488 0x330, 0x332, 0x334, 0x336, 0x338, 0x33a, 0x33c, 0x33e,
489
490 /* Mailbox out register offsets 0 - 31 */
491 0x400, 0x402, 0x404, 0x406, 0x408, 0x40a, 0x40c, 0x40e,
492 0x410, 0x412, 0x414, 0x416, 0x418, 0x41a, 0x41c, 0x41e,
493 0x420, 0x422, 0x424, 0x426, 0x428, 0x42a, 0x42c, 0x42e,
494 0x430, 0x432, 0x434, 0x436, 0x438, 0x43a, 0x43c, 0x43e,
495
496 0xff, /* fpm_diag_config - n/a */
497 0xff, /* pcr - n/a */
498 0xff, /* mctr - n/a */
499 0xff, /* fb_cmd - n/a */
500 0x48, /* hccr */
501 0x4c, /* gpiod */
502 0x50, /* gpioe */
503 0xff, /* host_to_host_sema - n/a */
504 0x2c, /* pri_req_in */
505 0x30, /* pri_req_out */
506 0x3c, /* atio_req_in */
507 0x40, /* atio_req_out */
508 0x54, /* io_base_addr */
509 0x380, /* nx_host_int */
510 0x504 /* nx_risc_int */
511 };
512
513 /* 2700/8300 register offsets */
514 static reg_off_t reg_off_2700_8300 = {
515 0x00, /* flash_address */
516 0x04, /* flash_data */
517 0x08, /* ctrl_status */
518 0x0c, /* ictrl */
519 0x10, /* istatus */
520 0xff, /* semaphore - n/a */
521 0xff, /* nvram - n/a */
522 0xff, /* req_in - n/a */
523 0xff, /* req_out - n/a */
524 0xff, /* resp_in - n/a */
525 0xff, /* resp_out - n/a */
526 0x44, /* risc2host */
527 32, /* Number of mailboxes */
528
529 /* Mailbox in register offsets 0 - 31 */
530 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
531 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
532 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
533 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
534
535 /* Mailbox out register offsets 0 - 31 */
536 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
537 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
538 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
539 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
540
541 0xff, /* fpm_diag_config - n/a */
542 0xff, /* pcr - n/a */
543 0xff, /* mctr - n/a */
544 0xff, /* fb_cmd - n/a */
545 0x48, /* hccr */
546 0x4c, /* gpiod */
547 0x50, /* gpioe */
548 0x58, /* host_to_host_sema - n/a */
549 0xff, /* pri_req_in - n/a */
550 0xff, /* pri_req_out - n/a */
551 0xff, /* atio_req_in - n/a */
552 0xff, /* atio_req_out - n/a */
553 0x54, /* io_base_addr */
554 0xff, /* nx_host_int - n/a */
555 0xff /* nx_risc_int - n/a */
556 };
557
558 /* mutex for protecting variables shared by all instances of the driver */
559 kmutex_t ql_global_mutex;
560 kmutex_t ql_global_hw_mutex;
561 kmutex_t ql_global_el_mutex;
562 kmutex_t ql_global_timer_mutex;
563
564 /* DMA access attribute structure. */
565 ddi_device_acc_attr_t ql_dev_acc_attr = {
566 DDI_DEVICE_ATTR_V0,
567 DDI_STRUCTURE_LE_ACC,
568 DDI_STRICTORDER_ACC
569 };
570
571 /* I/O DMA attributes structures. */
572 ddi_dma_attr_t ql_64bit_io_dma_attr = {
573 DMA_ATTR_V0, /* dma_attr_version */
574 QL_DMA_LOW_ADDRESS, /* low DMA address range */
575 QL_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */
576 QL_DMA_XFER_COUNTER, /* DMA counter register */
577 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */
578 QL_DMA_BURSTSIZES, /* DMA burstsizes */
579 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
580 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
581 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
582 QL_DMA_SG_LIST_LENGTH, /* s/g list length */
583 QL_DMA_GRANULARITY, /* granularity of device */
584 QL_DMA_XFER_FLAGS /* DMA transfer flags */
585 };
586
587 ddi_dma_attr_t ql_32bit_io_dma_attr = {
588 DMA_ATTR_V0, /* dma_attr_version */
589 QL_DMA_LOW_ADDRESS, /* low DMA address range */
590 QL_DMA_HIGH_32BIT_ADDRESS, /* high DMA address range */
591 QL_DMA_XFER_COUNTER, /* DMA counter register */
592 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */
593 QL_DMA_BURSTSIZES, /* DMA burstsizes */
594 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
595 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
596 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
597 QL_DMA_SG_LIST_LENGTH, /* s/g list length */
598 QL_DMA_GRANULARITY, /* granularity of device */
599 QL_DMA_XFER_FLAGS /* DMA transfer flags */
600 };
601
602 /* Static declarations of cb_ops entry point functions... */
603 static struct cb_ops ql_cb_ops = {
604 ql_open, /* b/c open */
605 ql_close, /* b/c close */
606 nodev, /* b strategy */
607 nodev, /* b print */
608 nodev, /* b dump */
609 nodev, /* c read */
610 nodev, /* c write */
611 ql_ioctl, /* c ioctl */
612 nodev, /* c devmap */
613 nodev, /* c mmap */
614 nodev, /* c segmap */
615 nochpoll, /* c poll */
616 nodev, /* cb_prop_op */
617 NULL, /* streamtab */
618 D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flag */
619 CB_REV, /* cb_ops revision */
620 nodev, /* c aread */
621 nodev /* c awrite */
622 };
623
624 /* Static declarations of dev_ops entry point functions... */
625 static struct dev_ops ql_devops = {
626 DEVO_REV, /* devo_rev */
627 0, /* refcnt */
628 ql_getinfo, /* getinfo */
629 nulldev, /* identify */
630 nulldev, /* probe */
631 ql_attach, /* attach */
632 ql_detach, /* detach */
633 nodev, /* reset */
634 &ql_cb_ops, /* char/block ops */
635 NULL, /* bus operations */
636 ql_power, /* power management */
637 ql_quiesce /* quiesce device */
638 };
639
640 /* ELS command code to text converter */
641 cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE();
642 /* Mailbox command code to text converter */
643 cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE();
644
645 char ql_driver_version[] = QL_VERSION;
646
647 uint32_t ql_log_entries = QL_LOG_ENTRIES;
648
649 /*
650 * Loadable Driver Interface Structures.
651 * Declare and initialize the module configuration section...
652 */
653 static struct modldrv modldrv = {
654 &mod_driverops, /* type of module: driver */
655 "SunFC Qlogic FCA v" QL_VERSION, /* name of module */
656 &ql_devops /* driver dev_ops */
657 };
658
659 static struct modlinkage modlinkage = {
660 MODREV_1,
661 &modldrv,
662 NULL
663 };
664
665 /* ************************************************************************ */
666 /* Loadable Module Routines. */
667 /* ************************************************************************ */
668
669 /*
670 * _init
671 * Initializes a loadable module. It is called before any other
672 * routine in a loadable module.
673 *
674 * Returns:
675 * 0 = success
676 *
677 * Context:
678 * Kernel context.
679 */
680 int
681 _init(void)
682 {
683 uint16_t w16;
684 int rval = 0;
685
686 if (qlc_disable_load) {
687 cmn_err(CE_WARN, "%s load disabled", QL_NAME);
688 return (EINVAL);
689 }
690
691 /* Get OS major release level. */
692 for (w16 = 0; w16 < sizeof (utsname.release); w16++) {
693 if (utsname.release[w16] == '.') {
694 w16++;
695 break;
696 }
697 }
698 if (w16 < sizeof (utsname.release)) {
699 (void) ql_bstr_to_dec(&utsname.release[w16],
700 &ql_os_release_level, 0);
701 } else {
702 ql_os_release_level = 0;
703 }
704 if (ql_os_release_level < 6) {
705 cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
706 QL_NAME, ql_os_release_level);
707 rval = EINVAL;
708 }
709 if (ql_os_release_level == 6) {
710 ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
711 ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
712 }
713
714 if (rval == 0) {
715 rval = ddi_soft_state_init(&ql_state,
716 sizeof (ql_adapter_state_t), 0);
717 }
718 if (rval == 0) {
719 /* allow the FC Transport to tweak the dev_ops */
720 fc_fca_init(&ql_devops);
721
722 mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
723 mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
724 mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
725 mutex_init(&ql_global_timer_mutex, NULL, MUTEX_DRIVER, NULL);
726 rval = mod_install(&modlinkage);
727 if (rval != 0) {
728 mutex_destroy(&ql_global_timer_mutex);
729 mutex_destroy(&ql_global_el_mutex);
730 mutex_destroy(&ql_global_hw_mutex);
731 mutex_destroy(&ql_global_mutex);
732 ddi_soft_state_fini(&ql_state);
733 }
734 }
735
736 if (rval != 0) {
737 cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
738 QL_NAME);
739 }
740
741 return (rval);
742 }
743
744 /*
745 * _fini
746 * Prepares a module for unloading. It is called when the system
747 * wants to unload a module. If the module determines that it can
748 * be unloaded, then _fini() returns the value returned by
749 * mod_remove(). Upon successful return from _fini() no other
750 * routine in the module will be called before _init() is called.
751 *
752 * Returns:
753 * 0 = success
754 *
755 * Context:
756 * Kernel context.
757 */
758 int
759 _fini(void)
760 {
761 int rval;
762
763 rval = mod_remove(&modlinkage);
764 if (rval == 0) {
765 mutex_destroy(&ql_global_timer_mutex);
766 mutex_destroy(&ql_global_el_mutex);
767 mutex_destroy(&ql_global_hw_mutex);
768 mutex_destroy(&ql_global_mutex);
769 ddi_soft_state_fini(&ql_state);
770 }
771
772 return (rval);
773 }
774
775 /*
776 * _info
777 * Returns information about loadable module.
778 *
779 * Input:
780 * modinfo = pointer to module information structure.
781 *
782 * Returns:
783 * Value returned by mod_info().
784 *
785 * Context:
786 * Kernel context.
787 */
788 int
789 _info(struct modinfo *modinfop)
790 {
791 return (mod_info(&modlinkage, modinfop));
792 }
793
794 /* ************************************************************************ */
795 /* dev_ops functions */
796 /* ************************************************************************ */
797
798 /*
799 * ql_getinfo
800 * Returns the pointer associated with arg when cmd is
801 * set to DDI_INFO_DEVT2DEVINFO, or it should return the
802 * instance number associated with arg when cmd is set
803 * to DDI_INFO_DEV2INSTANCE.
804 *
805 * Input:
806 * dip = Do not use.
807 * cmd = command argument.
808 * arg = command specific argument.
809 * resultp = pointer to where request information is stored.
810 *
811 * Returns:
812 * DDI_SUCCESS or DDI_FAILURE.
813 *
814 * Context:
815 * Kernel context.
816 */
817 /* ARGSUSED */
818 static int
819 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
820 {
821 ql_adapter_state_t *ha;
822 int minor;
823 int rval = DDI_FAILURE;
824
825 minor = (int)(getminor((dev_t)arg));
826 ha = ddi_get_soft_state(ql_state, minor);
827 if (ha == NULL) {
828 QL_PRINT_2(ha, "failed, unknown minor=%d\n",
829 getminor((dev_t)arg));
830 *resultp = NULL;
831 return (rval);
832 }
833
834 QL_PRINT_3(ha, "started\n");
835
836 switch (cmd) {
837 case DDI_INFO_DEVT2DEVINFO:
838 *resultp = ha->dip;
839 rval = DDI_SUCCESS;
840 break;
841 case DDI_INFO_DEVT2INSTANCE:
842 *resultp = (void *)(uintptr_t)(ha->instance);
843 rval = DDI_SUCCESS;
844 break;
845 default:
846 EL(ha, "failed, unsupported cmd=%d\n", cmd);
847 rval = DDI_FAILURE;
848 break;
849 }
850
851 QL_PRINT_3(ha, "done\n");
852
853 return (rval);
854 }
855
856 /*
857 * ql_attach
858 * Configure and attach an instance of the driver
859 * for a port.
860 *
861 * Input:
862 * dip = pointer to device information structure.
863 * cmd = attach type.
864 *
865 * Returns:
866 * DDI_SUCCESS or DDI_FAILURE.
867 *
868 * Context:
869 * Kernel context.
870 */
871 static int
872 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
873 {
874 off_t regsize;
875 uint32_t size;
876 int rval, *ptr;
877 uint_t progress = 0;
878 char *buf, taskq_name[32];
879 ushort_t caps_ptr, cap;
880 fc_fca_tran_t *tran;
881 ql_adapter_state_t *ha = NULL;
882 int instance = ddi_get_instance(dip);
883
884 static char *pmcomps[] = {
885 NULL,
886 PM_LEVEL_D3_STR, /* Device OFF */
887 PM_LEVEL_D0_STR, /* Device ON */
888 };
889
890 QL_PRINT_3(NULL, "started, instance=%d, cmd=%xh\n",
891 ddi_get_instance(dip), cmd);
892
893 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
894
895 switch (cmd) {
896 case DDI_ATTACH:
897 cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
898 QL_NAME, instance, QL_VERSION);
899
900 /* Correct OS version? */
901 if (ql_os_release_level != 11) {
902 cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
903 "11", QL_NAME, instance);
904 goto attach_failed;
905 }
906
907 /* Hardware is installed in a DMA-capable slot? */
908 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
909 cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
910 instance);
911 goto attach_failed;
912 }
913
914 /* Allocate our per-device-instance structure */
915 if (ddi_soft_state_zalloc(ql_state,
916 instance) != DDI_SUCCESS) {
917 cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
918 QL_NAME, instance);
919 goto attach_failed;
920 }
921
922 ha = ddi_get_soft_state(ql_state, instance);
923 if (ha == NULL) {
924 cmn_err(CE_WARN, "%s(%d): can't get soft state",
925 QL_NAME, instance);
926 goto attach_failed;
927 }
928 ha->dip = dip;
929 ha->instance = instance;
930 ha->hba.base_address = ha;
931 ha->pha = ha;
932
933 ha->bit32_io_dma_attr = ql_32bit_io_dma_attr;
934 ha->bit64_io_dma_attr = ql_64bit_io_dma_attr;
935
936 (void) ql_el_trace_alloc(ha);
937
938 progress |= QL_SOFT_STATE_ALLOCED;
939
940 /* Get extended logging and dump flags. */
941 ql_common_properties(ha);
942
943 qlc_fm_init(ha);
944 progress |= QL_FCA_INIT_FM;
945
946 ha->io_dma_attr = ha->bit32_io_dma_attr;
947
948 if (strcmp(ddi_driver_name(ddi_get_parent(dip)),
949 "sbus") == 0) {
950 EL(ha, "%s SBUS card detected\n", QL_NAME);
951 ha->cfg_flags |= CFG_SBUS_CARD;
952 }
953
954 ha->dev = kmem_zalloc(sizeof (*ha->dev) *
955 DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
956
957 ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
958 QL_UB_LIMIT, KM_SLEEP);
959
960 ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
961 KM_SLEEP);
962
963 (void) ddi_pathname(dip, buf);
964 ha->devpath = kmem_zalloc(strlen(buf) + 1, KM_SLEEP);
965 if (ha->devpath == NULL) {
966 EL(ha, "devpath mem alloc failed\n");
967 } else {
968 (void) strcpy(ha->devpath, buf);
969 EL(ha, "devpath is: %s\n", ha->devpath);
970 }
971
972 if (CFG_IST(ha, CFG_SBUS_CARD)) {
973 /*
974 * For cards where PCI is mapped to sbus e.g. Ivory.
975 *
976 * 0x00 : 0x000 - 0x0FF PCI Config Space for 2200
977 * : 0x100 - 0x3FF PCI IO space for 2200
978 * 0x01 : 0x000 - 0x0FF PCI Config Space for fpga
979 * : 0x100 - 0x3FF PCI IO Space for fpga
980 */
981 if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
982 0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle) !=
983 DDI_SUCCESS) {
984 cmn_err(CE_WARN, "%s(%d): Unable to map device"
985 " registers", QL_NAME, instance);
986 goto attach_failed;
987 }
988 if (ddi_regs_map_setup(dip, 1,
989 (caddr_t *)&ha->sbus_fpga_iobase, 0, 0x400,
990 &ql_dev_acc_attr, &ha->sbus_fpga_dev_handle) !=
991 DDI_SUCCESS) {
992 /* We should not fail attach here */
993 cmn_err(CE_WARN, "%s(%d): Unable to map FPGA",
994 QL_NAME, instance);
995 ha->sbus_fpga_iobase = NULL;
996 }
997 progress |= QL_REGS_MAPPED;
998
999 /*
1000 * We should map config space before adding interrupt
1001 * So that the chip type (2200 or 2300) can be
1002 * determined before the interrupt routine gets a
1003 * chance to execute.
1004 */
1005 if (ddi_regs_map_setup(dip, 0,
1006 (caddr_t *)&ha->sbus_config_base, 0, 0x100,
1007 &ql_dev_acc_attr, &ha->sbus_config_handle) !=
1008 DDI_SUCCESS) {
1009 cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
1010 "config registers", QL_NAME, instance);
1011 goto attach_failed;
1012 }
1013 progress |= QL_CONFIG_SPACE_SETUP;
1014 } else {
1015 /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
1016 rval = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
1017 DDI_PROP_DONTPASS, "reg", &ptr, &size);
1018 if (rval != DDI_PROP_SUCCESS) {
1019 cmn_err(CE_WARN, "%s(%d): Unable to get PCI "
1020 "address registers", QL_NAME, instance);
1021 goto attach_failed;
1022 } else {
1023 ha->pci_bus_addr = ptr[0];
1024 ha->pci_function_number = (uint8_t)
1025 (ha->pci_bus_addr >> 8 & 7);
1026 ddi_prop_free(ptr);
1027 }
1028
1029 /*
1030 * We should map config space before adding interrupt
1031 * So that the chip type (2200 or 2300) can be
1032 * determined before the interrupt routine gets a
1033 * chance to execute.
1034 */
1035 if (pci_config_setup(ha->dip, &ha->pci_handle) !=
1036 DDI_SUCCESS) {
1037 cmn_err(CE_WARN, "%s(%d): can't setup PCI "
1038 "config space", QL_NAME, instance);
1039 goto attach_failed;
1040 }
1041 progress |= QL_CONFIG_SPACE_SETUP;
1042
1043 /*
1044 * Setup the ISP2200 registers address mapping to be
1045 * accessed by this particular driver.
1046 * 0x0 Configuration Space
1047 * 0x1 I/O Space
1048 * 0x2 32-bit Memory Space address
1049 * 0x3 64-bit Memory Space address
1050 */
1051 size = ql_pci_config_get32(ha, PCI_CONF_BASE0) & BIT_0 ?
1052 2 : 1;
1053
1054 if (qlc_fm_check_acc_handle(ha, ha->pci_handle)
1055 != DDI_FM_OK) {
1056 qlc_fm_report_err_impact(ha,
1057 QL_FM_EREPORT_ACC_HANDLE_CHECK);
1058 goto attach_failed;
1059 }
1060
1061 if (ddi_dev_regsize(dip, size, ®size) !=
1062 DDI_SUCCESS ||
1063 ddi_regs_map_setup(dip, size, &ha->iobase,
1064 0, regsize, &ql_dev_acc_attr, &ha->dev_handle) !=
1065 DDI_SUCCESS) {
1066 cmn_err(CE_WARN, "%s(%d): regs_map_setup(mem) "
1067 "failed", QL_NAME, instance);
1068 goto attach_failed;
1069 }
1070 progress |= QL_REGS_MAPPED;
1071
1072 if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
1073 != DDI_FM_OK) {
1074 qlc_fm_report_err_impact(ha,
1075 QL_FM_EREPORT_ACC_HANDLE_CHECK);
1076 goto attach_failed;
1077 }
1078
1079 /*
1080 * We need I/O space mappings for 23xx HBAs for
1081 * loading flash (FCode). The chip has a bug due to
1082 * which loading flash fails through mem space
1083 * mappings in PCI-X mode.
1084 */
1085 if (size == 1) {
1086 ha->iomap_iobase = ha->iobase;
1087 ha->iomap_dev_handle = ha->dev_handle;
1088 } else {
1089 if (ddi_dev_regsize(dip, 1, ®size) !=
1090 DDI_SUCCESS ||
1091 ddi_regs_map_setup(dip, 1,
1092 &ha->iomap_iobase, 0, regsize,
1093 &ql_dev_acc_attr, &ha->iomap_dev_handle) !=
1094 DDI_SUCCESS) {
1095 cmn_err(CE_WARN, "%s(%d): regs_map_"
1096 "setup(I/O) failed", QL_NAME,
1097 instance);
1098 goto attach_failed;
1099 }
1100 progress |= QL_IOMAP_IOBASE_MAPPED;
1101
1102 if (qlc_fm_check_acc_handle(ha,
1103 ha->iomap_dev_handle) != DDI_FM_OK) {
1104 qlc_fm_report_err_impact(ha,
1105 QL_FM_EREPORT_ACC_HANDLE_CHECK);
1106 goto attach_failed;
1107 }
1108 }
1109 }
1110
1111 ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
1112 PCI_CONF_SUBSYSID);
1113 ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
1114 PCI_CONF_SUBVENID);
1115 ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
1116 PCI_CONF_VENID);
1117 ha->device_id = (uint16_t)ql_pci_config_get16(ha,
1118 PCI_CONF_DEVID);
1119 ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
1120 PCI_CONF_REVID);
1121
1122 EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
1123 "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
1124 ha->subven_id, ha->subsys_id);
1125
1126 switch (ha->device_id) {
1127 case 0x2300:
1128 case 0x2312:
1129 case 0x2322:
1130 case 0x6312:
1131 case 0x6322:
1132 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1133 ha->function_number = 1;
1134 }
1135 if (ha->device_id == 0x2322 ||
1136 ha->device_id == 0x6322) {
1137 ha->cfg_flags |= CFG_CTRL_63XX;
1138 ha->fw_class = 0x6322;
1139 ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
1140 } else {
1141 ha->cfg_flags |= CFG_CTRL_23XX;
1142 ha->fw_class = 0x2300;
1143 ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
1144 }
1145 ha->reg_off = ®_off_2300;
1146 ha->interrupt_count = 1;
1147 ha->osc_max_cnt = 1024;
1148 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1149 goto attach_failed;
1150 }
1151 ha->fcp_cmd = ql_command_iocb;
1152 ha->ip_cmd = ql_ip_iocb;
1153 ha->ms_cmd = ql_ms_iocb;
1154 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1155 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1156 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1157 } else {
1158 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1159 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1160 }
1161 break;
1162
1163 case 0x2200:
1164 ha->cfg_flags |= CFG_CTRL_22XX;
1165 ha->reg_off = ®_off_2200;
1166 ha->interrupt_count = 1;
1167 ha->osc_max_cnt = 1024;
1168 ha->fw_class = 0x2200;
1169 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1170 goto attach_failed;
1171 }
1172 ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
1173 ha->fcp_cmd = ql_command_iocb;
1174 ha->ip_cmd = ql_ip_iocb;
1175 ha->ms_cmd = ql_ms_iocb;
1176 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1177 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1178 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1179 } else {
1180 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1181 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1182 }
1183 break;
1184
1185 case 0x2422:
1186 case 0x2432:
1187 case 0x5422:
1188 case 0x5432:
1189 case 0x8432:
1190 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1191 ha->function_number = 1;
1192 }
1193 ha->cfg_flags |= CFG_CTRL_24XX;
1194 if (ha->device_id == 0x8432) {
1195 ha->cfg_flags |= CFG_CTRL_MENLO;
1196 } else {
1197 ha->flags |= VP_ENABLED;
1198 ha->max_vports = MAX_24_VIRTUAL_PORTS;
1199 }
1200
1201 ha->reg_off = ®_off_2400_2500;
1202 ha->interrupt_count = 2;
1203 ha->osc_max_cnt = 2048;
1204 ha->fw_class = 0x2400;
1205 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1206 goto attach_failed;
1207 }
1208 ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
1209 ha->fcp_cmd = ql_command_24xx_iocb;
1210 ha->ip_cmd = ql_ip_24xx_iocb;
1211 ha->ms_cmd = ql_ms_24xx_iocb;
1212 ha->els_cmd = ql_els_24xx_iocb;
1213 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1214 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1215 break;
1216
1217 case 0x2522:
1218 case 0x2532:
1219 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1220 ha->function_number = 1;
1221 }
1222 ha->cfg_flags |= CFG_CTRL_25XX;
1223 ha->flags |= VP_ENABLED;
1224 ha->max_vports = MAX_25_VIRTUAL_PORTS;
1225 ha->reg_off = ®_off_2400_2500;
1226 ha->mbar_queue_offset = MBAR2_REG_OFFSET;
1227 ha->interrupt_count = 2;
1228 ha->osc_max_cnt = 2048;
1229 ha->fw_class = 0x2500;
1230 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1231 goto attach_failed;
1232 }
1233 ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1234 ha->fcp_cmd = ql_command_24xx_iocb;
1235 ha->ms_cmd = ql_ms_24xx_iocb;
1236 ha->els_cmd = ql_els_24xx_iocb;
1237 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1238 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1239 if (ql_multi_queue_support(ha) == QL_SUCCESS) {
1240 ha->flags |= MULTI_QUEUE;
1241 }
1242 break;
1243
1244 case 0x2031:
1245 /* Get queue pointer memory mapped registers */
1246 if (ddi_dev_regsize(dip, 3, ®size) != DDI_SUCCESS ||
1247 ddi_regs_map_setup(dip, 3, &ha->mbar,
1248 0, regsize, &ql_dev_acc_attr,
1249 &ha->mbar_dev_handle) != DDI_SUCCESS) {
1250 cmn_err(CE_WARN, "%s(%d): regs_map_setup"
1251 "(mbar) failed", QL_NAME, instance);
1252 goto attach_failed;
1253 }
1254 ha->mbar_size = (uint32_t)regsize;
1255
1256 if (ha->pci_function_number != 0 &&
1257 ha->pci_function_number != 2) {
1258 ha->function_number = 1;
1259 }
1260 ha->cfg_flags |= CFG_CTRL_83XX;
1261 ha->flags |= VP_ENABLED | MULTI_QUEUE;
1262 ha->max_vports = MAX_83_VIRTUAL_PORTS;
1263 ha->reg_off = ®_off_2700_8300;
1264 ha->mbar_queue_offset = MBAR2_REG_OFFSET;
1265 ha->interrupt_count = 2;
1266 ha->osc_max_cnt = 2048;
1267 ha->fw_class = 0x8301fc;
1268 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1269 goto attach_failed;
1270 }
1271 ha->risc_dump_size = QL_83XX_FW_DUMP_SIZE;
1272 ha->fcp_cmd = ql_command_24xx_iocb;
1273 ha->ms_cmd = ql_ms_24xx_iocb;
1274 ha->els_cmd = ql_els_24xx_iocb;
1275 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1276 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1277 break;
1278
1279 case 0x2071:
1280 case 0x2261:
1281 case 0x2271:
1282 /* Get queue pointer memory mapped registers */
1283 if (ddi_dev_regsize(dip, 3, ®size) != DDI_SUCCESS ||
1284 ddi_regs_map_setup(dip, 3, &ha->mbar,
1285 0, regsize, &ql_dev_acc_attr,
1286 &ha->mbar_dev_handle) != DDI_SUCCESS) {
1287 cmn_err(CE_WARN, "%s(%d): regs_map_setup"
1288 "(mbar) failed", QL_NAME, instance);
1289 goto attach_failed;
1290 }
1291 ha->mbar_size = (uint32_t)regsize;
1292
1293 ha->function_number = ha->pci_function_number;
1294 ha->cfg_flags |= CFG_CTRL_27XX;
1295 ha->flags |= VP_ENABLED | MULTI_QUEUE |
1296 QUEUE_SHADOW_PTRS;
1297 ha->max_vports = MAX_27_VIRTUAL_PORTS;
1298 ha->reg_off = ®_off_2700_8300;
1299 ha->mbar_queue_offset = MBAR2_REG_OFFSET;
1300 ha->interrupt_count = 2;
1301 ha->osc_max_cnt = 2048;
1302 ha->fw_class = 0x2700;
1303 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1304 goto attach_failed;
1305 }
1306 ha->risc_dump_size = QL_27XX_FW_DUMP_SIZE;
1307 ha->fcp_cmd = ql_command_24xx_iocb;
1308 ha->ms_cmd = ql_ms_24xx_iocb;
1309 ha->els_cmd = ql_els_24xx_iocb;
1310 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1311 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1312 break;
1313
1314 case 0x8001:
1315 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 4) {
1316 ha->function_number = 1;
1317 }
1318 ha->cfg_flags |= CFG_CTRL_81XX;
1319 ha->flags |= VP_ENABLED;
1320 ha->max_vports = MAX_81XX_VIRTUAL_PORTS;
1321 ha->reg_off = ®_off_2400_2500;
1322 ha->mbar_queue_offset = MBAR2_REG_OFFSET;
1323 ha->interrupt_count = 2;
1324 ha->osc_max_cnt = 2048;
1325 ha->fw_class = 0x8100;
1326 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1327 goto attach_failed;
1328 }
1329 ha->risc_dump_size = QL_81XX_FW_DUMP_SIZE;
1330 ha->fcp_cmd = ql_command_24xx_iocb;
1331 ha->ms_cmd = ql_ms_24xx_iocb;
1332 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1333 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1334 if (ql_multi_queue_support(ha) == QL_SUCCESS) {
1335 ha->flags |= MULTI_QUEUE;
1336 }
1337 break;
1338
1339 case 0x8021:
1340 if (ha->pci_function_number & BIT_0) {
1341 ha->function_number = 1;
1342 }
1343 ha->cfg_flags |= CFG_CTRL_82XX;
1344 ha->flags |= VP_ENABLED;
1345 ha->max_vports = MAX_8021_VIRTUAL_PORTS;
1346 ha->reg_off = ®_off_8021;
1347 ha->interrupt_count = 2;
1348 ha->osc_max_cnt = 2048;
1349 ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1350 ha->fcp_cmd = ql_command_24xx_iocb;
1351 ha->ms_cmd = ql_ms_24xx_iocb;
1352 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1353 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1354 ha->io_dma_attr.dma_attr_flags |=
1355 DDI_DMA_RELAXED_ORDERING;
1356
1357 ha->nx_pcibase = ha->iobase;
1358 ha->iobase += 0xBC000 + (ha->pci_function_number << 11);
1359 ha->iomap_iobase += 0xBC000 +
1360 (ha->pci_function_number << 11);
1361
1362 /* map doorbell */
1363 if (ddi_dev_regsize(dip, 2, ®size) != DDI_SUCCESS ||
1364 ddi_regs_map_setup(dip, 2, &ha->db_iobase,
1365 0, regsize, &ql_dev_acc_attr,
1366 &ha->db_dev_handle) !=
1367 DDI_SUCCESS) {
1368 cmn_err(CE_WARN, "%s(%d): regs_map_setup"
1369 "(doorbell) failed", QL_NAME, instance);
1370 goto attach_failed;
1371 }
1372 progress |= QL_DB_IOBASE_MAPPED;
1373
1374 if (qlc_fm_check_acc_handle(ha, ha->db_dev_handle)
1375 != DDI_FM_OK) {
1376 qlc_fm_report_err_impact(ha,
1377 QL_FM_EREPORT_ACC_HANDLE_CHECK);
1378 goto attach_failed;
1379 }
1380
1381 ha->nx_req_in = (uint32_t *)(ha->db_iobase +
1382 (ha->pci_function_number << 12));
1383 ha->db_read = ha->nx_pcibase + (512 * 1024) +
1384 (ha->pci_function_number * 8);
1385
1386 ql_8021_update_crb_int_ptr(ha);
1387 ql_8021_set_drv_active(ha);
1388 break;
1389
1390 default:
1391 cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1392 QL_NAME, instance, ha->device_id);
1393 goto attach_failed;
1394 }
1395
1396 ha->outstanding_cmds = kmem_zalloc(
1397 sizeof (*ha->outstanding_cmds) * ha->osc_max_cnt,
1398 KM_SLEEP);
1399
1400 /* Setup interrupts */
1401 if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1402 cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1403 "rval=%xh", QL_NAME, instance, rval);
1404 goto attach_failed;
1405 }
1406
1407 progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1408
1409 /* Setup hba buffer. */
1410 if (ql_create_queues(ha) != QL_SUCCESS) {
1411 cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1412 "alloc failed", QL_NAME, instance);
1413 goto attach_failed;
1414 }
1415 progress |= QL_HBA_BUFFER_SETUP;
1416
1417 /* Allocate resource for QLogic IOCTL */
1418 (void) ql_alloc_xioctl_resource(ha);
1419
1420
1421 if (ql_nvram_cache_desc_ctor(ha) != DDI_SUCCESS) {
1422 cmn_err(CE_WARN, "%s(%d): can't setup nvram cache",
1423 QL_NAME, instance);
1424 goto attach_failed;
1425 }
1426
1427 progress |= QL_NVRAM_CACHE_CREATED;
1428
1429 if (ql_plogi_params_desc_ctor(ha) != DDI_SUCCESS) {
1430 cmn_err(CE_WARN, "%s(%d): can't setup plogi params",
1431 QL_NAME, instance);
1432 goto attach_failed;
1433 }
1434
1435 progress |= QL_PLOGI_PARAMS_CREATED;
1436
1437 /*
1438 * Allocate an N Port information structure
1439 * for use when in P2P topology.
1440 */
1441 ha->n_port = (ql_n_port_info_t *)
1442 kmem_zalloc(sizeof (ql_n_port_info_t), KM_SLEEP);
1443 if (ha->n_port == NULL) {
1444 cmn_err(CE_WARN, "%s(%d): Failed to create N Port info",
1445 QL_NAME, instance);
1446 goto attach_failed;
1447 }
1448
1449 progress |= QL_N_PORT_INFO_CREATED;
1450
1451 /*
1452 * Determine support for Power Management
1453 */
1454 caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1455
1456 while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1457 cap = (uint8_t)ql_pci_config_get8(ha, caps_ptr);
1458 if (cap == PCI_CAP_ID_PM) {
1459 ha->pm_capable = 1;
1460 break;
1461 }
1462 caps_ptr = (uint8_t)ql_pci_config_get8(ha, caps_ptr +
1463 PCI_CAP_NEXT_PTR);
1464 }
1465
1466 if (ha->pm_capable) {
1467 /*
1468 * Enable PM for 2200 based HBAs only.
1469 */
1470 if (ha->device_id != 0x2200) {
1471 ha->pm_capable = 0;
1472 }
1473 }
1474
1475 if (ha->pm_capable) {
1476 ha->pm_capable = ql_enable_pm;
1477 }
1478
1479 if (ha->pm_capable) {
1480 /*
1481 * Initialize power management bookkeeping;
1482 * components are created idle.
1483 */
1484 (void) sprintf(buf, "NAME=%s(%d)", QL_NAME, instance);
1485 pmcomps[0] = buf;
1486
1487 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1488 if (ddi_prop_update_string_array(DDI_DEV_T_NONE,
1489 dip, "pm-components", pmcomps,
1490 sizeof (pmcomps) / sizeof (pmcomps[0])) !=
1491 DDI_PROP_SUCCESS) {
1492 cmn_err(CE_WARN, "%s(%d): failed to create"
1493 " pm-components property", QL_NAME,
1494 instance);
1495
1496 /* Initialize adapter. */
1497 ha->power_level = PM_LEVEL_D0;
1498 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1499 cmn_err(CE_WARN, "%s(%d): failed to"
1500 " initialize adapter", QL_NAME,
1501 instance);
1502 goto attach_failed;
1503 }
1504 } else {
1505 ha->power_level = PM_LEVEL_D3;
1506 if (pm_raise_power(dip, QL_POWER_COMPONENT,
1507 PM_LEVEL_D0) != DDI_SUCCESS) {
1508 cmn_err(CE_WARN, "%s(%d): failed to"
1509 " raise power or initialize"
1510 " adapter", QL_NAME, instance);
1511 }
1512 }
1513 } else {
1514 /* Initialize adapter. */
1515 ha->power_level = PM_LEVEL_D0;
1516 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1517 cmn_err(CE_WARN, "%s(%d): failed to initialize"
1518 " adapter", QL_NAME, instance);
1519 }
1520 }
1521
1522 if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1523 ha->fw_subminor_version == 0) {
1524 cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1525 QL_NAME, ha->instance);
1526 } else {
1527 int rval, rval1;
1528 char ver_fmt[256];
1529
1530 rval1 = (int)snprintf(ver_fmt, (size_t)sizeof (ver_fmt),
1531 "Firmware version %d.%d.%d", ha->fw_major_version,
1532 ha->fw_minor_version, ha->fw_subminor_version);
1533
1534 if (CFG_IST(ha, CFG_CTRL_81XX)) {
1535 rval = (int)snprintf(ver_fmt + rval1,
1536 (size_t)sizeof (ver_fmt),
1537 ", MPI fw version %d.%d.%d",
1538 ha->mpi_fw_major_version,
1539 ha->mpi_fw_minor_version,
1540 ha->mpi_fw_subminor_version);
1541
1542 if (ha->subsys_id == 0x17B ||
1543 ha->subsys_id == 0x17D) {
1544 (void) snprintf(ver_fmt + rval1 + rval,
1545 (size_t)sizeof (ver_fmt),
1546 ", PHY fw version %d.%d.%d",
1547 ha->phy_fw_major_version,
1548 ha->phy_fw_minor_version,
1549 ha->phy_fw_subminor_version);
1550 }
1551 }
1552 cmn_err(CE_NOTE, "!%s(%d): %s",
1553 QL_NAME, ha->instance, ver_fmt);
1554 }
1555
1556 ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1557 "controller", KSTAT_TYPE_RAW,
1558 (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1559 if (ha->k_stats == NULL) {
1560 cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1561 QL_NAME, instance);
1562 goto attach_failed;
1563 }
1564 progress |= QL_KSTAT_CREATED;
1565
1566 ha->adapter_stats->version = 1;
1567 ha->k_stats->ks_data = (void *)ha->adapter_stats;
1568 ha->k_stats->ks_private = ha;
1569 ha->k_stats->ks_update = ql_kstat_update;
1570 ha->k_stats->ks_ndata = 1;
1571 ha->k_stats->ks_data_size = sizeof (ql_adapter_stat_t);
1572 kstat_install(ha->k_stats);
1573
1574 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
1575 instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1576 cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1577 QL_NAME, instance);
1578 goto attach_failed;
1579 }
1580 progress |= QL_MINOR_NODE_CREATED;
1581
1582 /* Allocate a transport structure for this instance */
1583 tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1584 if (tran == NULL) {
1585 cmn_err(CE_WARN, "%s(%d): failed to allocate transport",
1586 QL_NAME, instance);
1587 goto attach_failed;
1588 }
1589
1590 progress |= QL_FCA_TRAN_ALLOCED;
1591
1592 /* fill in the structure */
1593 tran->fca_numports = 1;
1594 tran->fca_version = FCTL_FCA_MODREV_5;
1595 tran->fca_num_npivports = ha->max_vports ?
1596 ha->max_vports - 1 : 0;
1597 bcopy(ha->loginparams.node_ww_name.raw_wwn,
1598 tran->fca_perm_pwwn.raw_wwn, 8);
1599
1600 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1601 ha->io_dma_attr = ha->bit64_io_dma_attr;
1602 ha->fcsm_cmd_dma_attr = ha->bit64_io_dma_attr;
1603 ha->fcsm_rsp_dma_attr = ha->bit64_io_dma_attr;
1604 ha->fcip_cmd_dma_attr = ha->bit64_io_dma_attr;
1605 ha->fcip_rsp_dma_attr = ha->bit64_io_dma_attr;
1606 ha->fcp_cmd_dma_attr = ha->bit64_io_dma_attr;
1607 ha->fcp_rsp_dma_attr = ha->bit64_io_dma_attr;
1608 ha->fcp_data_dma_attr = ha->bit64_io_dma_attr;
1609 } else {
1610 ha->io_dma_attr = ha->bit32_io_dma_attr;
1611 ha->fcsm_cmd_dma_attr = ha->bit32_io_dma_attr;
1612 ha->fcsm_rsp_dma_attr = ha->bit32_io_dma_attr;
1613 ha->fcip_cmd_dma_attr = ha->bit32_io_dma_attr;
1614 ha->fcip_rsp_dma_attr = ha->bit32_io_dma_attr;
1615 ha->fcp_cmd_dma_attr = ha->bit32_io_dma_attr;
1616 ha->fcp_rsp_dma_attr = ha->bit32_io_dma_attr;
1617 ha->fcp_data_dma_attr = ha->bit32_io_dma_attr;
1618 }
1619 ha->fcsm_cmd_dma_attr.dma_attr_sgllen = QL_FCSM_CMD_SGLLEN;
1620 ha->fcsm_rsp_dma_attr.dma_attr_sgllen = QL_FCSM_RSP_SGLLEN;
1621 ha->fcip_cmd_dma_attr.dma_attr_sgllen = QL_FCIP_CMD_SGLLEN;
1622 ha->fcip_rsp_dma_attr.dma_attr_sgllen = QL_FCIP_RSP_SGLLEN;
1623 ha->fcp_cmd_dma_attr.dma_attr_sgllen = QL_FCP_CMD_SGLLEN;
1624 ha->fcp_rsp_dma_attr.dma_attr_sgllen = QL_FCP_RSP_SGLLEN;
1625 if (CFG_IST(ha, CFG_CTRL_82XX)) {
1626 ha->io_dma_attr.dma_attr_flags |=
1627 DDI_DMA_RELAXED_ORDERING;
1628 ha->fcsm_cmd_dma_attr.dma_attr_flags |=
1629 DDI_DMA_RELAXED_ORDERING;
1630 ha->fcsm_rsp_dma_attr.dma_attr_flags |=
1631 DDI_DMA_RELAXED_ORDERING;
1632 ha->fcip_cmd_dma_attr.dma_attr_flags |=
1633 DDI_DMA_RELAXED_ORDERING;
1634 ha->fcip_rsp_dma_attr.dma_attr_flags |=
1635 DDI_DMA_RELAXED_ORDERING;
1636 ha->fcp_cmd_dma_attr.dma_attr_flags |=
1637 DDI_DMA_RELAXED_ORDERING;
1638 ha->fcp_rsp_dma_attr.dma_attr_flags |=
1639 DDI_DMA_RELAXED_ORDERING;
1640 ha->fcp_data_dma_attr.dma_attr_flags |=
1641 DDI_DMA_RELAXED_ORDERING;
1642 }
1643
1644 /* Specify the amount of space needed in each packet */
1645 tran->fca_pkt_size = sizeof (ql_srb_t);
1646
1647 /* command limits are usually dictated by hardware */
1648 tran->fca_cmd_max = ha->osc_max_cnt;
1649
1650 /* dmaattr are static, set elsewhere. */
1651 tran->fca_dma_attr = &ha->io_dma_attr;
1652 tran->fca_dma_fcp_cmd_attr = &ha->fcp_cmd_dma_attr;
1653 tran->fca_dma_fcp_rsp_attr = &ha->fcp_rsp_dma_attr;
1654 tran->fca_dma_fcp_data_attr = &ha->fcp_data_dma_attr;
1655 tran->fca_dma_fcsm_cmd_attr = &ha->fcsm_cmd_dma_attr;
1656 tran->fca_dma_fcsm_rsp_attr = &ha->fcsm_rsp_dma_attr;
1657 tran->fca_dma_fcip_cmd_attr = &ha->fcip_cmd_dma_attr;
1658 tran->fca_dma_fcip_rsp_attr = &ha->fcip_rsp_dma_attr;
1659 tran->fca_acc_attr = &ql_dev_acc_attr;
1660 tran->fca_iblock = &(ha->iblock_cookie);
1661
1662 /* the remaining values are simply function vectors */
1663 tran->fca_bind_port = ql_bind_port;
1664 tran->fca_unbind_port = ql_unbind_port;
1665 tran->fca_init_pkt = ql_init_pkt;
1666 tran->fca_un_init_pkt = ql_un_init_pkt;
1667 tran->fca_els_send = ql_els_send;
1668 tran->fca_get_cap = ql_get_cap;
1669 tran->fca_set_cap = ql_set_cap;
1670 tran->fca_getmap = ql_getmap;
1671 tran->fca_transport = ql_transport;
1672 tran->fca_ub_alloc = ql_ub_alloc;
1673 tran->fca_ub_free = ql_ub_free;
1674 tran->fca_ub_release = ql_ub_release;
1675 tran->fca_abort = ql_abort;
1676 tran->fca_reset = ql_reset;
1677 tran->fca_port_manage = ql_port_manage;
1678 tran->fca_get_device = ql_get_device;
1679
1680 EL(ha, "Transport interface setup. FCA version %d\n",
1681 tran->fca_version);
1682
1683 /* give it to the FC transport */
1684 if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1685 cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1686 instance);
1687 goto attach_failed;
1688 }
1689 progress |= QL_FCA_ATTACH_DONE;
1690
1691 /* Stash the structure so it can be freed at detach */
1692 ha->tran = tran;
1693
1694 /* Acquire global state lock. */
1695 GLOBAL_STATE_LOCK();
1696
1697 /* Add adapter structure to link list. */
1698 ql_add_link_b(&ql_hba, &ha->hba);
1699
1700 /* Determine and populate HBA fru info */
1701 ql_setup_fruinfo(ha);
1702
1703 /* Release global state lock. */
1704 GLOBAL_STATE_UNLOCK();
1705
1706 /* Start one second driver timer. */
1707 GLOBAL_TIMER_LOCK();
1708 if (ql_timer_timeout_id == NULL) {
1709 ql_timer_ticks = drv_usectohz(1000000);
1710 ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1711 ql_timer_ticks);
1712 }
1713 GLOBAL_TIMER_UNLOCK();
1714
1715 /* Setup task_daemon thread. */
1716 (void) snprintf(taskq_name, sizeof (taskq_name),
1717 "qlc_%d_driver_thread", instance);
1718 ha->driver_thread_taskq = ddi_taskq_create(NULL, taskq_name, 1,
1719 TASKQ_DEFAULTPRI, 0);
1720 (void) ddi_taskq_dispatch(ha->driver_thread_taskq,
1721 ql_task_daemon, ha, DDI_SLEEP);
1722 ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
1723
1724 (void) snprintf(taskq_name, sizeof (taskq_name),
1725 "qlc_%d_comp_thd", instance);
1726 ha->completion_taskq = ddi_taskq_create(0, taskq_name,
1727 ha->completion_thds, maxclsyspri, 0);
1728 for (size = 0; size < ha->completion_thds; size++) {
1729 (void) ddi_taskq_dispatch(ha->completion_taskq,
1730 ql_completion_thread, ha, DDI_SLEEP);
1731 }
1732
1733 progress |= QL_TASK_DAEMON_STARTED;
1734
1735 ddi_report_dev(dip);
1736
1737 /* Disable link reset in panic path */
1738 ha->lip_on_panic = 1;
1739
1740 rval = DDI_SUCCESS;
1741 break;
1742
1743 attach_failed:
1744 if (progress & QL_FCA_INIT_FM) {
1745 qlc_fm_fini(ha);
1746 progress &= ~QL_FCA_INIT_FM;
1747 }
1748
1749 if (progress & QL_FCA_ATTACH_DONE) {
1750 (void) fc_fca_detach(dip);
1751 progress &= ~QL_FCA_ATTACH_DONE;
1752 }
1753
1754 if (progress & QL_FCA_TRAN_ALLOCED) {
1755 kmem_free(tran, sizeof (fc_fca_tran_t));
1756 progress &= ~QL_FCA_TRAN_ALLOCED;
1757 }
1758
1759 if (progress & QL_MINOR_NODE_CREATED) {
1760 ddi_remove_minor_node(dip, "devctl");
1761 progress &= ~QL_MINOR_NODE_CREATED;
1762 }
1763
1764 if (progress & QL_KSTAT_CREATED) {
1765 kstat_delete(ha->k_stats);
1766 progress &= ~QL_KSTAT_CREATED;
1767 }
1768
1769 if (progress & QL_N_PORT_INFO_CREATED) {
1770 kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1771 progress &= ~QL_N_PORT_INFO_CREATED;
1772 }
1773
1774 if (progress & QL_PLOGI_PARAMS_CREATED) {
1775 (void) ql_plogi_params_desc_dtor(ha);
1776 progress &= ~QL_PLOGI_PARAMS_CREATED;
1777 }
1778
1779 if (progress & QL_NVRAM_CACHE_CREATED) {
1780 (void) ql_nvram_cache_desc_dtor(ha);
1781 progress &= ~QL_NVRAM_CACHE_CREATED;
1782 }
1783
1784 if (progress & QL_TASK_DAEMON_STARTED) {
1785 if (ha->driver_thread_taskq) {
1786 while (ha->task_daemon_flags &
1787 TASK_DAEMON_ALIVE_FLG) {
1788 /* Delay for 1 tick (10 ms). */
1789 ql_awaken_task_daemon(ha, NULL,
1790 TASK_DAEMON_STOP_FLG, 0);
1791 delay(1);
1792 }
1793 ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1794
1795 ddi_taskq_destroy(ha->driver_thread_taskq);
1796 ha->driver_thread_taskq = NULL;
1797 }
1798 if (ha->completion_taskq) {
1799 ADAPTER_STATE_LOCK(ha);
1800 ha->flags |= COMP_THD_TERMINATE;
1801 ADAPTER_STATE_UNLOCK(ha);
1802
1803 do {
1804 COMP_Q_LOCK(ha);
1805 cv_broadcast(&ha->cv_comp_thread);
1806 COMP_Q_UNLOCK(ha);
1807 ql_delay(ha, 10000);
1808 } while (ha->comp_thds_active != 0);
1809
1810 ddi_taskq_destroy(ha->completion_taskq);
1811 ha->completion_taskq = NULL;
1812 }
1813 progress &= ~QL_TASK_DAEMON_STARTED;
1814 }
1815
1816 if (progress & QL_DB_IOBASE_MAPPED) {
1817 ql_8021_clr_drv_active(ha);
1818 ddi_regs_map_free(&ha->db_dev_handle);
1819 progress &= ~QL_DB_IOBASE_MAPPED;
1820 }
1821 if (progress & QL_IOMAP_IOBASE_MAPPED) {
1822 ddi_regs_map_free(&ha->iomap_dev_handle);
1823 progress &= ~QL_IOMAP_IOBASE_MAPPED;
1824 }
1825 if (progress & QL_REGS_MAPPED) {
1826 if (ha->mbar_dev_handle) {
1827 ddi_regs_map_free(&ha->mbar_dev_handle);
1828 ha->mbar_dev_handle = 0;
1829 }
1830 }
1831
1832 if (progress & QL_CONFIG_SPACE_SETUP) {
1833 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1834 ddi_regs_map_free(&ha->sbus_config_handle);
1835 } else {
1836 pci_config_teardown(&ha->pci_handle);
1837 }
1838 progress &= ~QL_CONFIG_SPACE_SETUP;
1839 }
1840
1841 if (progress & QL_INTR_ADDED) {
1842 ql_disable_intr(ha);
1843 ql_release_intr(ha);
1844 progress &= ~QL_INTR_ADDED;
1845 }
1846
1847 if (progress & QL_MUTEX_CV_INITED) {
1848 ql_destroy_mutex(ha);
1849 progress &= ~QL_MUTEX_CV_INITED;
1850 }
1851
1852 if (progress & QL_HBA_BUFFER_SETUP) {
1853 ql_delete_queues(ha);
1854 progress &= ~QL_HBA_BUFFER_SETUP;
1855 }
1856
1857 if (progress & QL_REGS_MAPPED) {
1858 ddi_regs_map_free(&ha->dev_handle);
1859 if (ha->sbus_fpga_iobase != NULL) {
1860 ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1861 }
1862 progress &= ~QL_REGS_MAPPED;
1863 }
1864
1865 if (progress & QL_SOFT_STATE_ALLOCED) {
1866
1867 ql_fcache_rel(ha->fcache);
1868
1869 kmem_free(ha->adapter_stats,
1870 sizeof (*ha->adapter_stats));
1871
1872 kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1873 QL_UB_LIMIT);
1874
1875 if (ha->outstanding_cmds != NULL) {
1876 kmem_free(ha->outstanding_cmds,
1877 sizeof (*ha->outstanding_cmds) *
1878 ha->osc_max_cnt);
1879 }
1880
1881 if (ha->devpath != NULL) {
1882 kmem_free(ha->devpath,
1883 strlen(ha->devpath) + 1);
1884 }
1885
1886 kmem_free(ha->dev, sizeof (*ha->dev) *
1887 DEVICE_HEAD_LIST_SIZE);
1888
1889 if (ha->xioctl != NULL) {
1890 ql_free_xioctl_resource(ha);
1891 }
1892
1893 if (ha->fw_module != NULL) {
1894 (void) ddi_modclose(ha->fw_module);
1895 }
1896 (void) ql_el_trace_dealloc(ha);
1897
1898 ddi_soft_state_free(ql_state, instance);
1899 progress &= ~QL_SOFT_STATE_ALLOCED;
1900 }
1901
1902 ddi_prop_remove_all(dip);
1903 rval = DDI_FAILURE;
1904 break;
1905
1906 case DDI_RESUME:
1907 rval = DDI_FAILURE;
1908
1909 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1910 if (ha == NULL) {
1911 cmn_err(CE_WARN, "%s(%d): can't get soft state",
1912 QL_NAME, instance);
1913 break;
1914 }
1915
1916 ha->power_level = PM_LEVEL_D3;
1917 if (ha->pm_capable) {
1918 /*
1919 * Get ql_power to do power on initialization
1920 */
1921 if (pm_raise_power(dip, QL_POWER_COMPONENT,
1922 PM_LEVEL_D0) != DDI_SUCCESS) {
1923 cmn_err(CE_WARN, "%s(%d): can't raise adapter"
1924 " power", QL_NAME, instance);
1925 }
1926 }
1927
1928 /*
1929 * There is a bug in DR that prevents PM framework
1930 * from calling ql_power.
1931 */
1932 if (ha->power_level == PM_LEVEL_D3) {
1933 ha->power_level = PM_LEVEL_D0;
1934
1935 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1936 cmn_err(CE_WARN, "%s(%d): can't initialize the"
1937 " adapter", QL_NAME, instance);
1938 }
1939
1940 /* Wake up task_daemon. */
1941 ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1942 0);
1943 }
1944
1945 /* Restart driver timer. */
1946 GLOBAL_TIMER_LOCK();
1947 if (ql_timer_timeout_id == NULL) {
1948 ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1949 ql_timer_ticks);
1950 }
1951 GLOBAL_TIMER_LOCK();
1952
1953 /* Wake up command start routine. */
1954 ADAPTER_STATE_LOCK(ha);
1955 ha->flags &= ~ADAPTER_SUSPENDED;
1956 ADAPTER_STATE_UNLOCK(ha);
1957
1958 rval = DDI_SUCCESS;
1959
1960 /* Restart IP if it was running. */
1961 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1962 (void) ql_initialize_ip(ha);
1963 ql_isp_rcvbuf(ha);
1964 }
1965 break;
1966
1967 default:
1968 cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1969 " %x", QL_NAME, ddi_get_instance(dip), cmd);
1970 rval = DDI_FAILURE;
1971 break;
1972 }
1973
1974 kmem_free(buf, MAXPATHLEN);
1975
1976 if (rval != DDI_SUCCESS) {
1977 /*EMPTY*/
1978 QL_PRINT_2(ha, "failed instance=%d, rval = %xh\n",
1979 ddi_get_instance(dip), rval);
1980 } else {
1981 /*EMPTY*/
1982 QL_PRINT_3(ha, "done\n");
1983 }
1984
1985 return (rval);
1986 }
1987
1988 /*
1989 * ql_detach
1990 * Used to remove all the states associated with a given
1991 * instances of a device node prior to the removal of that
1992 * instance from the system.
1993 *
1994 * Input:
1995 * dip = pointer to device information structure.
1996 * cmd = type of detach.
1997 *
1998 * Returns:
1999 * DDI_SUCCESS or DDI_FAILURE.
2000 *
2001 * Context:
2002 * Kernel context.
2003 */
2004 static int
2005 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2006 {
2007 ql_adapter_state_t *ha, *vha;
2008 ql_tgt_t *tq;
2009 uint16_t index;
2010 ql_link_t *link;
2011 char *buf;
2012 timeout_id_t timer_id = NULL;
2013 int suspend, rval = DDI_SUCCESS;
2014
2015 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2016 if (ha == NULL) {
2017 QL_PRINT_2(NULL, "no adapter, instance=%d\n",
2018 ddi_get_instance(dip));
2019 return (DDI_FAILURE);
2020 }
2021
2022 QL_PRINT_3(ha, "started, cmd=%xh\n", cmd);
2023
2024 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2025
2026 switch (cmd) {
2027 case DDI_DETACH:
2028 ADAPTER_STATE_LOCK(ha);
2029 ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO);
2030 ADAPTER_STATE_UNLOCK(ha);
2031
2032 /* Wait for task thread to see suspend flag. */
2033 while (!(ha->task_daemon_flags & TASK_DAEMON_STALLED_FLG) &&
2034 ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
2035 ql_awaken_task_daemon(ha, NULL, 0, 0);
2036 /* Delay for 1 tick (10 milliseconds). */
2037 delay(1);
2038 }
2039
2040 if (ha->driver_thread_taskq) {
2041 while (ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
2042 /* Delay for 1 tick (10 milliseconds). */
2043 ql_awaken_task_daemon(ha, NULL,
2044 TASK_DAEMON_STOP_FLG, 0);
2045 delay(1);
2046 }
2047 ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
2048
2049 ddi_taskq_destroy(ha->driver_thread_taskq);
2050 ha->driver_thread_taskq = NULL;
2051 }
2052
2053 if (ha->completion_taskq) {
2054 ADAPTER_STATE_LOCK(ha);
2055 ha->flags |= COMP_THD_TERMINATE;
2056 ADAPTER_STATE_UNLOCK(ha);
2057
2058 do {
2059 COMP_Q_LOCK(ha);
2060 cv_broadcast(&ha->cv_comp_thread);
2061 COMP_Q_UNLOCK(ha);
2062 ql_delay(ha, 10000);
2063 } while (ha->comp_thds_active != 0);
2064
2065 ddi_taskq_destroy(ha->completion_taskq);
2066 ha->completion_taskq = NULL;
2067 }
2068
2069 /* Disable driver timer if no adapters. */
2070 GLOBAL_TIMER_LOCK();
2071 if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
2072 ql_hba.last == &ha->hba) {
2073 timer_id = ql_timer_timeout_id;
2074 ql_timer_timeout_id = NULL;
2075 }
2076 GLOBAL_TIMER_UNLOCK();
2077
2078 if (timer_id) {
2079 (void) untimeout(timer_id);
2080 }
2081
2082 GLOBAL_STATE_LOCK();
2083 ql_remove_link(&ql_hba, &ha->hba);
2084 GLOBAL_STATE_UNLOCK();
2085
2086 if (ha->pm_capable) {
2087 if (pm_lower_power(dip, QL_POWER_COMPONENT,
2088 PM_LEVEL_D3) != DDI_SUCCESS) {
2089 cmn_err(CE_WARN, "%s(%d): failed to lower the"
2090 " power", QL_NAME, ha->instance);
2091 }
2092 }
2093
2094 /*
2095 * If pm_lower_power shutdown the adapter, there
2096 * isn't much else to do
2097 */
2098 if (ha->power_level != PM_LEVEL_D3) {
2099 ql_halt(ha, PM_LEVEL_D3);
2100 }
2101
2102 /* Remove virtual ports. */
2103 while ((vha = ha->vp_next) != NULL) {
2104 ql_vport_destroy(vha);
2105 }
2106
2107 /* Free target queues. */
2108 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
2109 link = ha->dev[index].first;
2110 while (link != NULL) {
2111 tq = link->base_address;
2112 link = link->next;
2113 ql_dev_free(ha, tq);
2114 }
2115 }
2116
2117 /*
2118 * Free unsolicited buffers.
2119 * If we are here then there are no ULPs still
2120 * alive that wish to talk to ql so free up
2121 * any SRB_IP_UB_UNUSED buffers that are
2122 * lingering around
2123 */
2124 QL_UB_LOCK(ha);
2125 for (index = 0; index < QL_UB_LIMIT; index++) {
2126 fc_unsol_buf_t *ubp = ha->ub_array[index];
2127
2128 if (ubp != NULL) {
2129 ql_srb_t *sp = ubp->ub_fca_private;
2130
2131 sp->flags |= SRB_UB_FREE_REQUESTED;
2132
2133 while (!(sp->flags & SRB_UB_IN_FCA) ||
2134 (sp->flags & (SRB_UB_CALLBACK |
2135 SRB_UB_ACQUIRED))) {
2136 QL_UB_UNLOCK(ha);
2137 delay(drv_usectohz(100000));
2138 QL_UB_LOCK(ha);
2139 }
2140 ha->ub_array[index] = NULL;
2141
2142 QL_UB_UNLOCK(ha);
2143 ql_free_unsolicited_buffer(ha, ubp);
2144 QL_UB_LOCK(ha);
2145 }
2146 }
2147 QL_UB_UNLOCK(ha);
2148
2149 /* Free any saved RISC code. */
2150 if (ha->risc_code != NULL) {
2151 kmem_free(ha->risc_code, ha->risc_code_size);
2152 ha->risc_code = NULL;
2153 ha->risc_code_size = 0;
2154 }
2155
2156 if (ha->fw_module != NULL) {
2157 (void) ddi_modclose(ha->fw_module);
2158 ha->fw_module = NULL;
2159 }
2160
2161 /* Free resources. */
2162 ddi_prop_remove_all(dip);
2163 (void) fc_fca_detach(dip);
2164 kmem_free(ha->tran, sizeof (fc_fca_tran_t));
2165 ddi_remove_minor_node(dip, "devctl");
2166 if (ha->k_stats != NULL) {
2167 kstat_delete(ha->k_stats);
2168 }
2169
2170 if (CFG_IST(ha, CFG_SBUS_CARD)) {
2171 ddi_regs_map_free(&ha->sbus_config_handle);
2172 } else {
2173 if (CFG_IST(ha, CFG_CTRL_82XX)) {
2174 ql_8021_clr_drv_active(ha);
2175 ddi_regs_map_free(&ha->db_dev_handle);
2176 }
2177 if (ha->iomap_dev_handle != ha->dev_handle) {
2178 ddi_regs_map_free(&ha->iomap_dev_handle);
2179 }
2180 pci_config_teardown(&ha->pci_handle);
2181 }
2182
2183 ql_disable_intr(ha);
2184 ql_release_intr(ha);
2185
2186 ql_free_xioctl_resource(ha);
2187
2188 ql_destroy_mutex(ha);
2189
2190 ql_delete_queues(ha);
2191 ql_free_phys(ha, &ha->fwexttracebuf);
2192 ql_free_phys(ha, &ha->fwfcetracebuf);
2193
2194 ddi_regs_map_free(&ha->dev_handle);
2195 if (ha->sbus_fpga_iobase != NULL) {
2196 ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
2197 }
2198 if (ha->mbar_dev_handle != NULL) {
2199 ddi_regs_map_free(&ha->mbar_dev_handle);
2200 }
2201
2202 ql_fcache_rel(ha->fcache);
2203 if (ha->vcache != NULL) {
2204 kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
2205 }
2206
2207 if (ha->pi_attrs != NULL) {
2208 kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
2209 }
2210
2211 kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
2212
2213 kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
2214
2215 kmem_free(ha->outstanding_cmds,
2216 sizeof (*ha->outstanding_cmds) * ha->osc_max_cnt);
2217
2218 if (ha->n_port != NULL) {
2219 kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
2220 }
2221
2222 if (ha->devpath != NULL) {
2223 kmem_free(ha->devpath, strlen(ha->devpath) + 1);
2224 }
2225
2226 kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
2227
2228 (void) ql_plogi_params_desc_dtor(ha);
2229
2230 (void) ql_nvram_cache_desc_dtor(ha);
2231
2232 (void) qlc_fm_fini(ha);
2233
2234 EL(ha, "detached\n");
2235
2236 (void) ql_el_trace_dealloc(ha);
2237
2238 ddi_soft_state_free(ql_state, (int)ha->instance);
2239
2240 rval = DDI_SUCCESS;
2241
2242 break;
2243
2244 case DDI_SUSPEND:
2245 ADAPTER_STATE_LOCK(ha);
2246 ha->flags |= ADAPTER_SUSPENDED;
2247 ADAPTER_STATE_UNLOCK(ha);
2248
2249 /* Disable driver timer if last adapter. */
2250 GLOBAL_TIMER_LOCK();
2251 if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
2252 ql_hba.last == &ha->hba) {
2253 timer_id = ql_timer_timeout_id;
2254 ql_timer_timeout_id = NULL;
2255 }
2256 GLOBAL_TIMER_UNLOCK();
2257
2258 if (timer_id) {
2259 (void) untimeout(timer_id);
2260 }
2261
2262 if (ha->flags & IP_INITIALIZED) {
2263 (void) ql_shutdown_ip(ha);
2264 }
2265
2266 if ((suspend = ql_suspend_adapter(ha)) != QL_SUCCESS) {
2267 ADAPTER_STATE_LOCK(ha);
2268 ha->flags &= ~ADAPTER_SUSPENDED;
2269 ADAPTER_STATE_UNLOCK(ha);
2270 cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
2271 QL_NAME, ha->instance, suspend);
2272
2273 /* Restart IP if it was running. */
2274 if (ha->flags & IP_ENABLED &&
2275 !(ha->flags & IP_INITIALIZED)) {
2276 (void) ql_initialize_ip(ha);
2277 ql_isp_rcvbuf(ha);
2278 }
2279 rval = DDI_FAILURE;
2280 break;
2281 }
2282
2283 EL(ha, "suspended\n");
2284
2285 break;
2286
2287 default:
2288 rval = DDI_FAILURE;
2289 break;
2290 }
2291
2292 kmem_free(buf, MAXPATHLEN);
2293
2294 if (rval != DDI_SUCCESS) {
2295 EL(ha, "failed, rval = %xh\n", rval);
2296 } else {
2297 /*EMPTY*/
2298 QL_PRINT_3(ha, "done\n");
2299 }
2300
2301 return (rval);
2302 }
2303
2304 /*
2305 * ql_power
2306 * Power a device attached to the system.
2307 *
2308 * Input:
2309 * dip = pointer to device information structure.
2310 * component = device.
2311 * level = power level.
2312 *
2313 * Returns:
2314 * DDI_SUCCESS or DDI_FAILURE.
2315 *
2316 * Context:
2317 * Kernel context.
2318 */
2319 /* ARGSUSED */
2320 static int
2321 ql_power(dev_info_t *dip, int component, int level)
2322 {
2323 int rval = DDI_FAILURE;
2324 off_t csr;
2325 uint8_t saved_pm_val;
2326 ql_adapter_state_t *ha;
2327 char *buf;
2328 char *path;
2329
2330 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2331 if (ha == NULL || ha->pm_capable == 0) {
2332 QL_PRINT_2(ha, "no hba or PM not supported\n");
2333 return (rval);
2334 }
2335
2336 QL_PRINT_10(ha, "started\n");
2337
2338 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2339 path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2340
2341 if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
2342 level != PM_LEVEL_D3)) {
2343 EL(ha, "invalid, component=%xh or level=%xh\n",
2344 component, level);
2345 return (rval);
2346 }
2347
2348 GLOBAL_HW_LOCK();
2349 csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
2350 GLOBAL_HW_UNLOCK();
2351
2352 (void) snprintf(buf, MAXPATHLEN,
2353 "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
2354 ddi_pathname(dip, path));
2355
2356 switch (level) {
2357 case PM_LEVEL_D0: /* power up to D0 state - fully on */
2358
2359 QL_PM_LOCK(ha);
2360 if (ha->power_level == PM_LEVEL_D0) {
2361 QL_PM_UNLOCK(ha);
2362 rval = DDI_SUCCESS;
2363 break;
2364 }
2365
2366 /*
2367 * Enable interrupts now
2368 */
2369 saved_pm_val = ha->power_level;
2370 ha->power_level = PM_LEVEL_D0;
2371 QL_PM_UNLOCK(ha);
2372
2373 GLOBAL_HW_LOCK();
2374
2375 ql_pci_config_put16(ha, csr, PCI_PMCSR_D0);
2376
2377 /*
2378 * Delay after reset, for chip to recover.
2379 * Otherwise causes system PANIC
2380 */
2381 drv_usecwait(200000);
2382
2383 GLOBAL_HW_UNLOCK();
2384
2385 if (ha->config_saved) {
2386 ha->config_saved = 0;
2387 if (QL_RESTORE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2388 QL_PM_LOCK(ha);
2389 ha->power_level = saved_pm_val;
2390 QL_PM_UNLOCK(ha);
2391 cmn_err(CE_WARN, "%s failed to restore "
2392 "config regs", buf);
2393 break;
2394 }
2395 }
2396
2397 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
2398 cmn_err(CE_WARN, "%s adapter initialization failed",
2399 buf);
2400 }
2401
2402 /* Wake up task_daemon. */
2403 ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
2404 TASK_DAEMON_SLEEPING_FLG, 0);
2405
2406 /* Restart IP if it was running. */
2407 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
2408 (void) ql_initialize_ip(ha);
2409 ql_isp_rcvbuf(ha);
2410 }
2411
2412 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
2413 ha->instance, QL_NAME);
2414
2415 rval = DDI_SUCCESS;
2416 break;
2417
2418 case PM_LEVEL_D3: /* power down to D3 state - off */
2419
2420 QL_PM_LOCK(ha);
2421
2422 if (ha->pm_busy || ((ha->task_daemon_flags &
2423 TASK_DAEMON_SLEEPING_FLG) == 0)) {
2424 QL_PM_UNLOCK(ha);
2425 break;
2426 }
2427
2428 if (ha->power_level == PM_LEVEL_D3) {
2429 rval = DDI_SUCCESS;
2430 QL_PM_UNLOCK(ha);
2431 break;
2432 }
2433 QL_PM_UNLOCK(ha);
2434
2435 if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2436 cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
2437 " config regs", QL_NAME, ha->instance, buf);
2438 break;
2439 }
2440 ha->config_saved = 1;
2441
2442 /*
2443 * Don't enable interrupts. Running mailbox commands with
2444 * interrupts enabled could cause hangs since pm_run_scan()
2445 * runs out of a callout thread and on single cpu systems
2446 * cv_reltimedwait_sig(), called from ql_mailbox_command(),
2447 * would not get to run.
2448 */
2449 TASK_DAEMON_LOCK(ha);
2450 ha->task_daemon_flags |= TASK_DAEMON_POWERING_DOWN;
2451 TASK_DAEMON_UNLOCK(ha);
2452
2453 ql_halt(ha, PM_LEVEL_D3);
2454
2455 /*
2456 * Setup ql_intr to ignore interrupts from here on.
2457 */
2458 QL_PM_LOCK(ha);
2459 ha->power_level = PM_LEVEL_D3;
2460 QL_PM_UNLOCK(ha);
2461
2462 /*
2463 * Wait for ISR to complete.
2464 */
2465 INTR_LOCK(ha);
2466 ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
2467 INTR_UNLOCK(ha);
2468
2469 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
2470 ha->instance, QL_NAME);
2471
2472 rval = DDI_SUCCESS;
2473 break;
2474 }
2475
2476 kmem_free(buf, MAXPATHLEN);
2477 kmem_free(path, MAXPATHLEN);
2478
2479 QL_PRINT_10(ha, "done\n");
2480
2481 return (rval);
2482 }
2483
2484 /*
2485 * ql_quiesce
2486 * quiesce a device attached to the system.
2487 *
2488 * Input:
2489 * dip = pointer to device information structure.
2490 *
2491 * Returns:
2492 * DDI_SUCCESS
2493 *
2494 * Context:
2495 * Kernel context.
2496 */
2497 static int
2498 ql_quiesce(dev_info_t *dip)
2499 {
2500 ql_adapter_state_t *ha;
2501 uint32_t timer;
2502 uint32_t stat;
2503
2504 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2505 if (ha == NULL) {
2506 /* Oh well.... */
2507 QL_PRINT_2(NULL, "no adapter, instance=%d\n",
2508 ddi_get_instance(dip));
2509 return (DDI_SUCCESS);
2510 }
2511
2512 QL_PRINT_3(ha, "started\n");
2513
2514 if (CFG_IST(ha, CFG_CTRL_82XX)) {
2515 ql_8021_clr_hw_intr(ha);
2516 ql_8021_clr_fw_intr(ha);
2517 WRT16_IO_REG(ha, mailbox_in[0], MBC_TOGGLE_INTERRUPT);
2518 WRT16_IO_REG(ha, mailbox_in[1], 0);
2519 WRT32_IO_REG(ha, nx_host_int, NX_MBX_CMD);
2520 for (timer = 0; timer < 20000; timer++) {
2521 stat = RD32_IO_REG(ha, risc2host);
2522 if (stat & BIT_15) {
2523 ql_8021_clr_hw_intr(ha);
2524 if ((stat & 0xff) < 0x12) {
2525 ql_8021_clr_fw_intr(ha);
2526 break;
2527 }
2528 ql_8021_clr_fw_intr(ha);
2529 }
2530 drv_usecwait(100);
2531 }
2532 ql_8021_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
2533 WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE);
2534 WRT16_IO_REG(ha, mailbox_in[1], 0);
2535 WRT32_IO_REG(ha, nx_host_int, NX_MBX_CMD);
2536 for (timer = 0; timer < 20000; timer++) {
2537 stat = RD32_IO_REG(ha, risc2host);
2538 if (stat & BIT_15) {
2539 ql_8021_clr_hw_intr(ha);
2540 if ((stat & 0xff) < 0x12) {
2541 ql_8021_clr_fw_intr(ha);
2542 break;
2543 }
2544 ql_8021_clr_fw_intr(ha);
2545 }
2546 drv_usecwait(100);
2547 }
2548 } else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2549 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2550 WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE);
2551 WRT16_IO_REG(ha, mailbox_in[1], 0);
2552 WRT16_IO_REG(ha, mailbox_in[2], 0);
2553 WRT16_IO_REG(ha, mailbox_in[3], 0);
2554 WRT16_IO_REG(ha, mailbox_in[4], 0);
2555 WRT16_IO_REG(ha, mailbox_in[5], 0);
2556 WRT16_IO_REG(ha, mailbox_in[6], 0);
2557 WRT16_IO_REG(ha, mailbox_in[7], 0);
2558 WRT16_IO_REG(ha, mailbox_in[8], 0);
2559 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
2560 for (timer = 0; timer < 30000; timer++) {
2561 stat = RD32_IO_REG(ha, risc2host);
2562 if (stat & BIT_15) {
2563 if ((stat & 0xff) < 0x12) {
2564 WRT32_IO_REG(ha, hccr,
2565 HC24_CLR_RISC_INT);
2566 break;
2567 }
2568 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2569 }
2570 drv_usecwait(100);
2571 }
2572 /* Reset the chip. */
2573 if (CFG_IST(ha, CFG_MWB_4096_SUPPORT)) {
2574 WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2575 MWB_4096_BYTES);
2576 } else {
2577 WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN);
2578 }
2579 drv_usecwait(100);
2580
2581 } else {
2582 /* Disable ISP interrupts. */
2583 WRT16_IO_REG(ha, ictrl, 0);
2584 /* Select RISC module registers. */
2585 WRT16_IO_REG(ha, ctrl_status, 0);
2586 /* Reset ISP semaphore. */
2587 WRT16_IO_REG(ha, semaphore, 0);
2588 /* Reset RISC module. */
2589 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
2590 /* Release RISC module. */
2591 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
2592 }
2593
2594 QL_PRINT_3(ha, "done\n");
2595
2596 return (DDI_SUCCESS);
2597 }
2598
2599 /* ************************************************************************ */
2600 /* Fibre Channel Adapter (FCA) Transport Functions. */
2601 /* ************************************************************************ */
2602
2603 /*
2604 * ql_bind_port
2605 * Handling port binding. The FC Transport attempts to bind an FCA port
2606 * when it is ready to start transactions on the port. The FC Transport
2607 * will call the fca_bind_port() function specified in the fca_transport
2608 * structure it receives. The FCA must fill in the port_info structure
2609 * passed in the call and also stash the information for future calls.
2610 *
2611 * Input:
2612 * dip = pointer to FCA information structure.
2613 * port_info = pointer to port information structure.
2614 * bind_info = pointer to bind information structure.
2615 *
2616 * Returns:
2617 * NULL = failure
2618 *
2619 * Context:
2620 * Kernel context.
2621 */
2622 static opaque_t
2623 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2624 fc_fca_bind_info_t *bind_info)
2625 {
2626 ql_adapter_state_t *ha, *vha;
2627 opaque_t fca_handle = NULL;
2628 port_id_t d_id;
2629 int port_npiv = bind_info->port_npiv;
2630 uchar_t *port_nwwn = bind_info->port_nwwn.raw_wwn;
2631 uchar_t *port_pwwn = bind_info->port_pwwn.raw_wwn;
2632
2633 /* get state info based on the dip */
2634 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2635 if (ha == NULL) {
2636 QL_PRINT_2(ha, "no adapter, instance=%d\n",
2637 ddi_get_instance(dip));
2638 return (NULL);
2639 }
2640 QL_PRINT_10(ha, "started\n");
2641
2642 /* Verify port number is supported. */
2643 if (port_npiv != 0) {
2644 if (!(ha->flags & VP_ENABLED)) {
2645 QL_PRINT_2(ha, "FC_NPIV_NOT_SUPPORTED\n");
2646 port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2647 return (NULL);
2648 }
2649 if (!(ha->flags & POINT_TO_POINT)) {
2650 QL_PRINT_2(ha, "FC_NPIV_WRONG_TOPOLOGY\n");
2651 port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2652 return (NULL);
2653 }
2654 if (!(ha->flags & FDISC_ENABLED)) {
2655 QL_PRINT_2(ha, "switch does not support "
2656 "FDISC\n");
2657 port_info->pi_error = FC_NPIV_FDISC_FAILED;
2658 return (NULL);
2659 }
2660 if (bind_info->port_num >= ha->max_vports) {
2661 QL_PRINT_2(ha, "port number=%d "
2662 "FC_OUTOFBOUNDS\n", bind_info->port_num);
2663 port_info->pi_error = FC_OUTOFBOUNDS;
2664 return (NULL);
2665 }
2666 } else if (bind_info->port_num != 0) {
2667 QL_PRINT_2(ha, "failed, port number=%d is not "
2668 "supported\n", bind_info->port_num);
2669 port_info->pi_error = FC_OUTOFBOUNDS;
2670 return (NULL);
2671 }
2672
2673 /* Locate port context. */
2674 for (vha = ha; vha != NULL; vha = vha->vp_next) {
2675 if (vha->vp_index == bind_info->port_num) {
2676 break;
2677 }
2678 }
2679
2680 /* If virtual port does not exist. */
2681 if (vha == NULL) {
2682 vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2683 }
2684
2685 /* make sure this port isn't already bound */
2686 if (vha->flags & FCA_BOUND) {
2687 port_info->pi_error = FC_ALREADY;
2688 } else {
2689 if (vha->vp_index != 0) {
2690 bcopy(port_nwwn,
2691 vha->loginparams.node_ww_name.raw_wwn, 8);
2692 bcopy(port_pwwn,
2693 vha->loginparams.nport_ww_name.raw_wwn, 8);
2694 }
2695 if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2696 if (ql_vport_enable(vha) != QL_SUCCESS) {
2697 QL_PRINT_2(ha, "failed to enable "
2698 "virtual port=%d\n",
2699 vha->vp_index);
2700 port_info->pi_error = FC_NPIV_FDISC_FAILED;
2701 return (NULL);
2702 }
2703 cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2704 "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2705 "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2706 QL_NAME, ha->instance, vha->vp_index,
2707 port_pwwn[0], port_pwwn[1], port_pwwn[2],
2708 port_pwwn[3], port_pwwn[4], port_pwwn[5],
2709 port_pwwn[6], port_pwwn[7],
2710 port_nwwn[0], port_nwwn[1], port_nwwn[2],
2711 port_nwwn[3], port_nwwn[4], port_nwwn[5],
2712 port_nwwn[6], port_nwwn[7]);
2713 }
2714
2715 /* stash the bind_info supplied by the FC Transport */
2716 vha->bind_info.port_handle = bind_info->port_handle;
2717 vha->bind_info.port_statec_cb = bind_info->port_statec_cb;
2718 vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2719
2720 /* Set port's source ID. */
2721 port_info->pi_s_id.port_id = vha->d_id.b24;
2722
2723 /* copy out the default login parameters */
2724 bcopy((void *)&vha->loginparams,
2725 (void *)&port_info->pi_login_params,
2726 sizeof (la_els_logi_t));
2727
2728 /* Set port's hard address if enabled. */
2729 port_info->pi_hard_addr.hard_addr = 0;
2730 if (bind_info->port_num == 0) {
2731 d_id.b24 = ha->d_id.b24;
2732 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2733 if (ha->init_ctrl_blk.cb24.
2734 firmware_options_1[0] & BIT_0) {
2735 d_id.b.al_pa = ql_index_to_alpa[ha->
2736 init_ctrl_blk.cb24.
2737 hard_address[0]];
2738 port_info->pi_hard_addr.hard_addr =
2739 d_id.b24;
2740 }
2741 } else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2742 BIT_0) {
2743 d_id.b.al_pa = ql_index_to_alpa[ha->
2744 init_ctrl_blk.cb.hard_address[0]];
2745 port_info->pi_hard_addr.hard_addr = d_id.b24;
2746 }
2747
2748 /* Set the node id data */
2749 if (ql_get_rnid_params(ha,
2750 sizeof (port_info->pi_rnid_params.params),
2751 (caddr_t)&port_info->pi_rnid_params.params) ==
2752 QL_SUCCESS) {
2753 port_info->pi_rnid_params.status = FC_SUCCESS;
2754 } else {
2755 port_info->pi_rnid_params.status = FC_FAILURE;
2756 }
2757
2758 /* Populate T11 FC-HBA details */
2759 ql_populate_hba_fru_details(ha, port_info);
2760 ha->pi_attrs = kmem_zalloc(sizeof (fca_port_attrs_t),
2761 KM_SLEEP);
2762 if (ha->pi_attrs != NULL) {
2763 bcopy(&port_info->pi_attrs, ha->pi_attrs,
2764 sizeof (fca_port_attrs_t));
2765 }
2766 } else {
2767 port_info->pi_rnid_params.status = FC_FAILURE;
2768 if (ha->pi_attrs != NULL) {
2769 bcopy(ha->pi_attrs, &port_info->pi_attrs,
2770 sizeof (fca_port_attrs_t));
2771 }
2772 }
2773
2774 /* Generate handle for this FCA. */
2775 fca_handle = (opaque_t)vha;
2776
2777 ADAPTER_STATE_LOCK(ha);
2778 vha->flags |= FCA_BOUND;
2779 ADAPTER_STATE_UNLOCK(ha);
2780 /* Set port's current state. */
2781 port_info->pi_port_state = vha->state;
2782 }
2783
2784 QL_PRINT_10(ha, "done, pi_port_state=%xh, "
2785 "pi_s_id.port_id=%xh\n",
2786 port_info->pi_port_state, port_info->pi_s_id.port_id);
2787
2788 return (fca_handle);
2789 }
2790
2791 /*
2792 * ql_unbind_port
2793 * To unbind a Fibre Channel Adapter from an FC Port driver.
2794 *
2795 * Input:
2796 * fca_handle = handle setup by ql_bind_port().
2797 *
2798 * Context:
2799 * Kernel context.
2800 */
2801 static void
2802 ql_unbind_port(opaque_t fca_handle)
2803 {
2804 ql_adapter_state_t *ha;
2805 ql_tgt_t *tq;
2806 uint32_t flgs;
2807
2808 ha = ql_fca_handle_to_state(fca_handle);
2809 if (ha == NULL) {
2810 /*EMPTY*/
2811 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
2812 (void *)fca_handle);
2813 } else {
2814 QL_PRINT_10(ha, "started\n");
2815
2816 if (!(ha->flags & FCA_BOUND)) {
2817 /*EMPTY*/
2818 QL_PRINT_2(ha, "port already unbound\n");
2819 } else {
2820 if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2821 (void) ql_vport_control(ha, (uint8_t)
2822 (CFG_IST(ha, CFG_FC_TYPE) ?
2823 VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT));
2824 if ((tq = ql_loop_id_to_queue(ha,
2825 FL_PORT_24XX_HDL)) != NULL) {
2826 (void) ql_logout_fabric_port(ha, tq);
2827 }
2828 flgs = FCA_BOUND | VP_ENABLED;
2829 } else {
2830 flgs = FCA_BOUND;
2831 }
2832 ADAPTER_STATE_LOCK(ha);
2833 ha->flags &= ~flgs;
2834 ADAPTER_STATE_UNLOCK(ha);
2835 }
2836
2837 QL_PRINT_10(ha, "done\n");
2838 }
2839 }
2840
2841 /*
2842 * ql_init_pkt
2843 * Initialize FCA portion of packet.
2844 *
2845 * Input:
2846 * fca_handle = handle setup by ql_bind_port().
2847 * pkt = pointer to fc_packet.
2848 *
2849 * Returns:
2850 * FC_SUCCESS - the packet has successfully been initialized.
2851 * FC_UNBOUND - the fca_handle specified is not bound.
2852 * FC_NOMEM - the FCA failed initialization due to an allocation error.
2853 * FC_FAILURE - the FCA failed initialization for undisclosed reasons
2854 *
2855 * Context:
2856 * Kernel context.
2857 */
2858 /* ARGSUSED */
2859 static int
2860 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2861 {
2862 ql_adapter_state_t *ha;
2863 ql_srb_t *sp;
2864 int rval = FC_SUCCESS;
2865
2866 ha = ql_fca_handle_to_state(fca_handle);
2867 if (ha == NULL) {
2868 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
2869 (void *)fca_handle);
2870 return (FC_UNBOUND);
2871 }
2872 QL_PRINT_3(ha, "started\n");
2873
2874 sp = (ql_srb_t *)pkt->pkt_fca_private;
2875 sp->flags = 0;
2876 sp->handle = 0;
2877
2878 /* init cmd links */
2879 sp->cmd.base_address = sp;
2880 sp->cmd.prev = NULL;
2881 sp->cmd.next = NULL;
2882 sp->cmd.head = NULL;
2883
2884 /* init watchdog links */
2885 sp->wdg.base_address = sp;
2886 sp->wdg.prev = NULL;
2887 sp->wdg.next = NULL;
2888 sp->wdg.head = NULL;
2889 sp->pkt = pkt;
2890 sp->ha = ha;
2891 sp->magic_number = QL_FCA_BRAND;
2892 sp->sg_dma.dma_handle = NULL;
2893 #ifndef __sparc
2894 if (CFG_IST(ha, CFG_CTRL_82XX)) {
2895 /* Setup DMA for scatter gather list. */
2896 sp->sg_dma.size = sizeof (cmd6_2400_dma_t);
2897 sp->sg_dma.type = LITTLE_ENDIAN_DMA;
2898 sp->sg_dma.max_cookie_count = 1;
2899 sp->sg_dma.alignment = 64;
2900 if (ql_alloc_phys(ha, &sp->sg_dma, KM_SLEEP) != QL_SUCCESS) {
2901 rval = FC_NOMEM;
2902 }
2903 }
2904 #endif /* __sparc */
2905
2906 QL_PRINT_3(ha, "done\n");
2907
2908 return (rval);
2909 }
2910
2911 /*
2912 * ql_un_init_pkt
2913 * Release all local resources bound to packet.
2914 *
2915 * Input:
2916 * fca_handle = handle setup by ql_bind_port().
2917 * pkt = pointer to fc_packet.
2918 *
2919 * Returns:
2920 * FC_SUCCESS - the packet has successfully been invalidated.
2921 * FC_UNBOUND - the fca_handle specified is not bound.
2922 * FC_BADPACKET - the packet has not been initialized or has
2923 * already been freed by this FCA.
2924 *
2925 * Context:
2926 * Kernel context.
2927 */
2928 static int
2929 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2930 {
2931 ql_adapter_state_t *ha;
2932 int rval;
2933 ql_srb_t *sp;
2934
2935 ha = ql_fca_handle_to_state(fca_handle);
2936 if (ha == NULL) {
2937 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
2938 (void *)fca_handle);
2939 return (FC_UNBOUND);
2940 }
2941 QL_PRINT_3(ha, "started\n");
2942
2943 sp = (ql_srb_t *)pkt->pkt_fca_private;
2944
2945 if (sp->magic_number != QL_FCA_BRAND) {
2946 EL(ha, "failed, FC_BADPACKET\n");
2947 rval = FC_BADPACKET;
2948 } else {
2949 sp->magic_number = NULL;
2950 ql_free_phys(ha, &sp->sg_dma);
2951 rval = FC_SUCCESS;
2952 }
2953
2954 QL_PRINT_3(ha, "done\n");
2955
2956 return (rval);
2957 }
2958
2959 /*
2960 * ql_els_send
2961 * Issue a extended link service request.
2962 *
2963 * Input:
2964 * fca_handle = handle setup by ql_bind_port().
2965 * pkt = pointer to fc_packet.
2966 *
2967 * Returns:
2968 * FC_SUCCESS - the command was successful.
2969 * FC_ELS_FREJECT - the command was rejected by a Fabric.
2970 * FC_ELS_PREJECT - the command was rejected by an N-port.
2971 * FC_TRANSPORT_ERROR - a transport error occurred.
2972 * FC_UNBOUND - the fca_handle specified is not bound.
2973 * FC_ELS_BAD - the FCA can not issue the requested ELS.
2974 *
2975 * Context:
2976 * Kernel context.
2977 */
2978 static int
2979 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2980 {
2981 ql_adapter_state_t *ha;
2982 int rval;
2983 clock_t timer = drv_usectohz(30000000);
2984 ls_code_t els;
2985 la_els_rjt_t rjt;
2986 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
2987
2988 /* Verify proper command. */
2989 ha = ql_cmd_setup(fca_handle, pkt, &rval);
2990 if (ha == NULL) {
2991 QL_PRINT_2(NULL, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2992 rval, fca_handle);
2993 return (FC_INVALID_REQUEST);
2994 }
2995 QL_PRINT_3(ha, "started\n");
2996
2997 /* Wait for suspension to end. */
2998 TASK_DAEMON_LOCK(ha);
2999 while (DRIVER_SUSPENDED(ha)) {
3000 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
3001
3002 /* 30 seconds from now */
3003 if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
3004 &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
3005 /*
3006 * The timeout time 'timer' was
3007 * reached without the condition
3008 * being signaled.
3009 */
3010 pkt->pkt_state = FC_PKT_TRAN_BSY;
3011 pkt->pkt_reason = FC_REASON_XCHG_BSY;
3012
3013 /* Release task daemon lock. */
3014 TASK_DAEMON_UNLOCK(ha);
3015
3016 EL(ha, "QL_SUSPENDED failed=%xh\n",
3017 QL_FUNCTION_TIMEOUT);
3018 return (FC_TRAN_BUSY);
3019 }
3020 }
3021 /* Release task daemon lock. */
3022 TASK_DAEMON_UNLOCK(ha);
3023
3024 /* Setup response header. */
3025 bcopy((void *)&pkt->pkt_cmd_fhdr, (void *)&pkt->pkt_resp_fhdr,
3026 sizeof (fc_frame_hdr_t));
3027
3028 if (pkt->pkt_rsplen) {
3029 bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
3030 }
3031
3032 pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
3033 pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
3034 pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
3035 R_CTL_SOLICITED_CONTROL;
3036 pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
3037 F_CTL_END_SEQ;
3038
3039 sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
3040 SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
3041 SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
3042
3043 sp->flags |= SRB_ELS_PKT;
3044
3045 /* map the type of ELS to a function */
3046 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
3047 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
3048
3049 QL_PRINT_10(ha, "els.ls_code=%xh, d_id=%xh\n", els.ls_code,
3050 pkt->pkt_cmd_fhdr.d_id);
3051
3052 sp->iocb = ha->els_cmd;
3053 sp->req_cnt = 1;
3054
3055 switch (els.ls_code) {
3056 case LA_ELS_RJT:
3057 case LA_ELS_ACC:
3058 pkt->pkt_state = FC_PKT_SUCCESS;
3059 rval = FC_SUCCESS;
3060 break;
3061 case LA_ELS_PLOGI:
3062 case LA_ELS_PDISC:
3063 rval = ql_els_plogi(ha, pkt);
3064 break;
3065 case LA_ELS_FLOGI:
3066 case LA_ELS_FDISC:
3067 rval = ql_els_flogi(ha, pkt);
3068 break;
3069 case LA_ELS_LOGO:
3070 rval = ql_els_logo(ha, pkt);
3071 break;
3072 case LA_ELS_PRLI:
3073 rval = ql_els_prli(ha, pkt);
3074 break;
3075 case LA_ELS_PRLO:
3076 rval = ql_els_prlo(ha, pkt);
3077 break;
3078 case LA_ELS_ADISC:
3079 rval = ql_els_adisc(ha, pkt);
3080 break;
3081 case LA_ELS_LINIT:
3082 rval = ql_els_linit(ha, pkt);
3083 break;
3084 case LA_ELS_LPC:
3085 rval = ql_els_lpc(ha, pkt);
3086 break;
3087 case LA_ELS_LSTS:
3088 rval = ql_els_lsts(ha, pkt);
3089 break;
3090 case LA_ELS_SCR:
3091 rval = ql_els_scr(ha, pkt);
3092 break;
3093 case LA_ELS_RSCN:
3094 rval = ql_els_rscn(ha, pkt);
3095 break;
3096 case LA_ELS_FARP_REQ:
3097 rval = ql_els_farp_req(ha, pkt);
3098 break;
3099 case LA_ELS_FARP_REPLY:
3100 rval = ql_els_farp_reply(ha, pkt);
3101 break;
3102 case LA_ELS_RLS:
3103 rval = ql_els_rls(ha, pkt);
3104 break;
3105 case LA_ELS_RNID:
3106 rval = ql_els_rnid(ha, pkt);
3107 break;
3108 default:
3109 EL(ha, "LA_ELS_RJT, FC_REASON_CMD_UNSUPPORTED=%xh\n",
3110 els.ls_code);
3111 /* Build RJT. */
3112 bzero(&rjt, sizeof (rjt));
3113 rjt.ls_code.ls_code = LA_ELS_RJT;
3114 rjt.reason = FC_REASON_CMD_UNSUPPORTED;
3115
3116 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
3117 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
3118
3119 pkt->pkt_state = FC_PKT_LOCAL_RJT;
3120 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3121 rval = FC_SUCCESS;
3122 break;
3123 }
3124
3125 /*
3126 * Return success if the srb was consumed by an iocb. The packet
3127 * completion callback will be invoked by the response handler.
3128 */
3129 if (rval == QL_CONSUMED) {
3130 rval = FC_SUCCESS;
3131 } else if (rval == FC_SUCCESS &&
3132 !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
3133 /* Do command callback only if no error */
3134 ql_io_comp(sp);
3135 }
3136
3137 if (rval != FC_SUCCESS) {
3138 EL(ha, "rval=%x, ls_code=%xh sent to d_id=%xh, sp=%ph\n",
3139 rval, els.ls_code, pkt->pkt_cmd_fhdr.d_id, sp);
3140 } else {
3141 /*EMPTY*/
3142 QL_PRINT_10(ha, "done\n");
3143 }
3144 return (rval);
3145 }
3146
3147 /*
3148 * ql_get_cap
3149 * Export FCA hardware and software capabilities.
3150 *
3151 * Input:
3152 * fca_handle = handle setup by ql_bind_port().
3153 * cap = pointer to the capabilities string.
3154 * ptr = buffer pointer for return capability.
3155 *
3156 * Returns:
3157 * FC_CAP_ERROR - no such capability
3158 * FC_CAP_FOUND - the capability was returned and cannot be set
3159 * FC_CAP_SETTABLE - the capability was returned and can be set
3160 * FC_UNBOUND - the fca_handle specified is not bound.
3161 *
3162 * Context:
3163 * Kernel context.
3164 */
3165 static int
3166 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
3167 {
3168 ql_adapter_state_t *ha;
3169 int rval;
3170 uint32_t *rptr = (uint32_t *)ptr;
3171
3172 ha = ql_fca_handle_to_state(fca_handle);
3173 if (ha == NULL) {
3174 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3175 (void *)fca_handle);
3176 return (FC_UNBOUND);
3177 }
3178 QL_PRINT_3(ha, "started\n");
3179
3180 if (strcmp(cap, FC_NODE_WWN) == 0) {
3181 bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
3182 ptr, 8);
3183 rval = FC_CAP_FOUND;
3184 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
3185 bcopy((void *)&ha->loginparams, ptr,
3186 sizeof (la_els_logi_t));
3187 rval = FC_CAP_FOUND;
3188 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
3189 *rptr = (uint32_t)QL_UB_LIMIT;
3190 rval = FC_CAP_FOUND;
3191 } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
3192
3193 dev_info_t *psydip = NULL;
3194 #ifdef __sparc
3195 /*
3196 * Disable streaming for certain 2 chip adapters
3197 * below Psycho to handle Psycho byte hole issue.
3198 */
3199 if (ha->flags & MULTI_CHIP_ADAPTER &&
3200 !CFG_IST(ha, CFG_SBUS_CARD)) {
3201 for (psydip = ddi_get_parent(ha->dip); psydip;
3202 psydip = ddi_get_parent(psydip)) {
3203 if (strcmp(ddi_driver_name(psydip),
3204 "pcipsy") == 0) {
3205 break;
3206 }
3207 }
3208 }
3209 #endif /* __sparc */
3210
3211 if (psydip) {
3212 *rptr = (uint32_t)FC_NO_STREAMING;
3213 EL(ha, "No Streaming\n");
3214 } else {
3215 *rptr = (uint32_t)FC_ALLOW_STREAMING;
3216 EL(ha, "Allow Streaming\n");
3217 }
3218 rval = FC_CAP_FOUND;
3219 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
3220 *rptr = ha->loginparams.common_service.rx_bufsize;
3221 rval = FC_CAP_FOUND;
3222 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
3223 *rptr = FC_RESET_RETURN_ALL;
3224 rval = FC_CAP_FOUND;
3225 } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
3226 *rptr = FC_NO_DVMA_SPACE;
3227 rval = FC_CAP_FOUND;
3228 } else {
3229 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
3230 rval = FC_CAP_ERROR;
3231 }
3232
3233 QL_PRINT_3(ha, "done\n");
3234
3235 return (rval);
3236 }
3237
3238 /*
3239 * ql_set_cap
3240 * Allow the FC Transport to set FCA capabilities if possible.
3241 *
3242 * Input:
3243 * fca_handle = handle setup by ql_bind_port().
3244 * cap = pointer to the capabilities string.
3245 * ptr = buffer pointer for capability.
3246 *
3247 * Returns:
3248 * FC_CAP_ERROR - no such capability
3249 * FC_CAP_FOUND - the capability cannot be set by the FC Transport.
3250 * FC_CAP_SETTABLE - the capability was successfully set.
3251 * FC_UNBOUND - the fca_handle specified is not bound.
3252 *
3253 * Context:
3254 * Kernel context.
3255 */
3256 /* ARGSUSED */
3257 static int
3258 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
3259 {
3260 ql_adapter_state_t *ha;
3261 int rval;
3262
3263 ha = ql_fca_handle_to_state(fca_handle);
3264 if (ha == NULL) {
3265 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3266 (void *)fca_handle);
3267 return (FC_UNBOUND);
3268 }
3269 QL_PRINT_3(ha, "started\n");
3270
3271 if (strcmp(cap, FC_NODE_WWN) == 0) {
3272 rval = FC_CAP_FOUND;
3273 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
3274 rval = FC_CAP_FOUND;
3275 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
3276 rval = FC_CAP_FOUND;
3277 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
3278 rval = FC_CAP_FOUND;
3279 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
3280 rval = FC_CAP_FOUND;
3281 } else {
3282 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
3283 rval = FC_CAP_ERROR;
3284 }
3285
3286 QL_PRINT_3(ha, "done\n");
3287
3288 return (rval);
3289 }
3290
3291 /*
3292 * ql_getmap
3293 * Request of Arbitrated Loop (AL-PA) map.
3294 *
3295 * Input:
3296 * fca_handle = handle setup by ql_bind_port().
3297 * mapbuf= buffer pointer for map.
3298 *
3299 * Returns:
3300 * FC_OLDPORT - the specified port is not operating in loop mode.
3301 * FC_OFFLINE - the specified port is not online.
3302 * FC_NOMAP - there is no loop map available for this port.
3303 * FC_UNBOUND - the fca_handle specified is not bound.
3304 * FC_SUCCESS - a valid map has been placed in mapbuf.
3305 *
3306 * Context:
3307 * Kernel context.
3308 */
3309 static int
3310 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
3311 {
3312 ql_adapter_state_t *ha;
3313 clock_t timer = drv_usectohz(30000000);
3314 int rval = FC_SUCCESS;
3315
3316 ha = ql_fca_handle_to_state(fca_handle);
3317 if (ha == NULL) {
3318 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3319 (void *)fca_handle);
3320 return (FC_UNBOUND);
3321 }
3322 QL_PRINT_3(ha, "started\n");
3323
3324 mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
3325 mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
3326
3327 /* Wait for suspension to end. */
3328 TASK_DAEMON_LOCK(ha);
3329 while (DRIVER_SUSPENDED(ha)) {
3330 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
3331
3332 /* 30 seconds from now */
3333 if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
3334 &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
3335 /*
3336 * The timeout time 'timer' was
3337 * reached without the condition
3338 * being signaled.
3339 */
3340
3341 /* Release task daemon lock. */
3342 TASK_DAEMON_UNLOCK(ha);
3343
3344 EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
3345 return (FC_TRAN_BUSY);
3346 }
3347 }
3348 /* Release task daemon lock. */
3349 TASK_DAEMON_UNLOCK(ha);
3350
3351 if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
3352 (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
3353 /*
3354 * Now, since transport drivers cosider this as an
3355 * offline condition, let's wait for few seconds
3356 * for any loop transitions before we reset the.
3357 * chip and restart all over again.
3358 */
3359 ql_delay(ha, 2000000);
3360 EL(ha, "failed, FC_NO_MAP\n");
3361 rval = FC_NO_MAP;
3362 } else {
3363 /*EMPTY*/
3364 QL_PRINT_3(ha, "my_alpa %xh len %xh "
3365 "data %xh %xh %xh %xh\n",
3366 mapbuf->lilp_myalpa, mapbuf->lilp_length,
3367 mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
3368 mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
3369 }
3370
3371 QL_PRINT_3(ha, "done\n");
3372 #if 0
3373 QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
3374 #endif
3375 return (rval);
3376 }
3377
3378 /*
3379 * ql_transport
3380 * Issue an I/O request. Handles all regular requests.
3381 *
3382 * Input:
3383 * fca_handle = handle setup by ql_bind_port().
3384 * pkt = pointer to fc_packet.
3385 *
3386 * Returns:
3387 * FC_SUCCESS - the packet was accepted for transport.
3388 * FC_TRANSPORT_ERROR - a transport error occurred.
3389 * FC_BADPACKET - the packet to be transported had not been
3390 * initialized by this FCA.
3391 * FC_UNBOUND - the fca_handle specified is not bound.
3392 *
3393 * Context:
3394 * Kernel context.
3395 */
3396 static int
3397 ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
3398 {
3399 ql_adapter_state_t *ha;
3400 int rval = FC_TRANSPORT_ERROR;
3401 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
3402
3403 /* Verify proper command. */
3404 ha = ql_cmd_setup(fca_handle, pkt, &rval);
3405 if (ha == NULL) {
3406 QL_PRINT_2(NULL, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
3407 rval, fca_handle);
3408 return (rval);
3409 }
3410 QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
3411
3412 /* Reset SRB flags. */
3413 sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
3414 SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_UB_CALLBACK |
3415 SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
3416 SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
3417 SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
3418 SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
3419 SRB_MS_PKT | SRB_ELS_PKT);
3420
3421 pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
3422 pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
3423 pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
3424 pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
3425 pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
3426
3427 switch (pkt->pkt_cmd_fhdr.r_ctl) {
3428 case R_CTL_COMMAND:
3429 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
3430 sp->flags |= SRB_FCP_CMD_PKT;
3431 rval = ql_fcp_scsi_cmd(ha, pkt, sp);
3432 } else {
3433 pkt->pkt_state = FC_PKT_LOCAL_RJT;
3434 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3435 rval = FC_TRANSPORT_ERROR;
3436 }
3437 break;
3438
3439 default:
3440 /* Setup response header and buffer. */
3441 if (pkt->pkt_rsplen) {
3442 bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
3443 }
3444
3445 switch (pkt->pkt_cmd_fhdr.r_ctl) {
3446 case R_CTL_UNSOL_DATA:
3447 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
3448 if (CFG_IST(ha, CFG_FCIP_SUPPORT) &&
3449 ha->vp_index == 0) {
3450 sp->flags |= SRB_IP_PKT;
3451 rval = ql_fcp_ip_cmd(ha, pkt, sp);
3452 } else {
3453 cmn_err(CE_NOTE, "%s(%d) FC-IP is not "
3454 "supported on this adapter\n",
3455 QL_NAME, ha->instance);
3456 pkt->pkt_state = FC_PKT_LOCAL_RJT;
3457 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3458 rval = FC_TRANSPORT_ERROR;
3459 }
3460 }
3461 break;
3462
3463 case R_CTL_UNSOL_CONTROL:
3464 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
3465 sp->flags |= SRB_GENERIC_SERVICES_PKT;
3466 rval = ql_fc_services(ha, pkt);
3467 } else {
3468 pkt->pkt_state = FC_PKT_LOCAL_RJT;
3469 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3470 rval = FC_TRANSPORT_ERROR;
3471 }
3472 break;
3473
3474 case R_CTL_SOLICITED_DATA:
3475 case R_CTL_STATUS:
3476 default:
3477 pkt->pkt_state = FC_PKT_LOCAL_RJT;
3478 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3479 rval = FC_TRANSPORT_ERROR;
3480 EL(ha, "unknown, r_ctl=%xh\n",
3481 pkt->pkt_cmd_fhdr.r_ctl);
3482 break;
3483 }
3484 }
3485
3486 if (rval != FC_SUCCESS) {
3487 EL(ha, "failed, rval = %xh\n", rval);
3488 } else {
3489 /*EMPTY*/
3490 QL_PRINT_3(ha, "done\n");
3491 }
3492
3493 return (rval);
3494 }
3495
3496 /*
3497 * ql_ub_alloc
3498 * Allocate buffers for unsolicited exchanges.
3499 *
3500 * Input:
3501 * fca_handle = handle setup by ql_bind_port().
3502 * tokens = token array for each buffer.
3503 * size = size of each buffer.
3504 * count = pointer to number of buffers.
3505 * type = the FC-4 type the buffers are reserved for.
3506 * 1 = Extended Link Services, 5 = LLC/SNAP
3507 *
3508 * Returns:
3509 * FC_FAILURE - buffers could not be allocated.
3510 * FC_TOOMANY - the FCA could not allocate the requested
3511 * number of buffers.
3512 * FC_SUCCESS - unsolicited buffers were allocated.
3513 * FC_UNBOUND - the fca_handle specified is not bound.
3514 *
3515 * Context:
3516 * Kernel context.
3517 */
3518 static int
3519 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
3520 uint32_t *count, uint32_t type)
3521 {
3522 ql_adapter_state_t *ha;
3523 caddr_t bufp = NULL;
3524 fc_unsol_buf_t *ubp;
3525 ql_srb_t *sp;
3526 uint32_t index;
3527 uint32_t cnt;
3528 uint32_t ub_array_index = 0;
3529 int rval = FC_SUCCESS;
3530 int ub_updated = FALSE;
3531
3532 /* Check handle. */
3533 ha = ql_fca_handle_to_state(fca_handle);
3534 if (ha == NULL) {
3535 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3536 (void *)fca_handle);
3537 return (FC_UNBOUND);
3538 }
3539 QL_PRINT_3(ha, "started, count = %xh\n", *count);
3540
3541 QL_PM_LOCK(ha);
3542 if (ha->power_level != PM_LEVEL_D0) {
3543 QL_PM_UNLOCK(ha);
3544 QL_PRINT_3(ha, "down done\n");
3545 return (FC_FAILURE);
3546 }
3547 QL_PM_UNLOCK(ha);
3548
3549 /* Check the count. */
3550 if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
3551 *count = 0;
3552 EL(ha, "failed, FC_TOOMANY\n");
3553 rval = FC_TOOMANY;
3554 }
3555
3556 /*
3557 * reset ub_array_index
3558 */
3559 ub_array_index = 0;
3560
3561 /*
3562 * Now proceed to allocate any buffers required
3563 */
3564 for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
3565 /* Allocate all memory needed. */
3566 ubp = (fc_unsol_buf_t *)kmem_zalloc(sizeof (fc_unsol_buf_t),
3567 KM_SLEEP);
3568 if (ubp == NULL) {
3569 EL(ha, "failed, FC_FAILURE\n");
3570 rval = FC_FAILURE;
3571 } else {
3572 sp = kmem_zalloc(sizeof (ql_srb_t), KM_SLEEP);
3573 if (sp == NULL) {
3574 kmem_free(ubp, sizeof (fc_unsol_buf_t));
3575 rval = FC_FAILURE;
3576 } else {
3577 if (type == FC_TYPE_IS8802_SNAP) {
3578 #ifdef __sparc
3579 if (ql_get_dma_mem(ha,
3580 &sp->ub_buffer, size,
3581 BIG_ENDIAN_DMA,
3582 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3583 rval = FC_FAILURE;
3584 kmem_free(ubp,
3585 sizeof (fc_unsol_buf_t));
3586 kmem_free(sp,
3587 sizeof (ql_srb_t));
3588 } else {
3589 bufp = sp->ub_buffer.bp;
3590 sp->ub_size = size;
3591 }
3592 #else
3593 if (ql_get_dma_mem(ha,
3594 &sp->ub_buffer, size,
3595 LITTLE_ENDIAN_DMA,
3596 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3597 rval = FC_FAILURE;
3598 kmem_free(ubp,
3599 sizeof (fc_unsol_buf_t));
3600 kmem_free(sp,
3601 sizeof (ql_srb_t));
3602 } else {
3603 bufp = sp->ub_buffer.bp;
3604 sp->ub_size = size;
3605 }
3606 #endif
3607 } else {
3608 bufp = kmem_zalloc(size, KM_SLEEP);
3609 if (bufp == NULL) {
3610 rval = FC_FAILURE;
3611 kmem_free(ubp,
3612 sizeof (fc_unsol_buf_t));
3613 kmem_free(sp,
3614 sizeof (ql_srb_t));
3615 } else {
3616 sp->ub_size = size;
3617 }
3618 }
3619 }
3620 }
3621
3622 if (rval == FC_SUCCESS) {
3623 /* Find next available slot. */
3624 QL_UB_LOCK(ha);
3625 while (ha->ub_array[ub_array_index] != NULL) {
3626 ub_array_index++;
3627 }
3628
3629 ubp->ub_fca_private = (void *)sp;
3630
3631 /* init cmd links */
3632 sp->cmd.base_address = sp;
3633 sp->cmd.prev = NULL;
3634 sp->cmd.next = NULL;
3635 sp->cmd.head = NULL;
3636
3637 /* init wdg links */
3638 sp->wdg.base_address = sp;
3639 sp->wdg.prev = NULL;
3640 sp->wdg.next = NULL;
3641 sp->wdg.head = NULL;
3642 sp->ha = ha;
3643
3644 ubp->ub_buffer = bufp;
3645 ubp->ub_bufsize = size;
3646 ubp->ub_port_handle = fca_handle;
3647 ubp->ub_token = ub_array_index;
3648
3649 /* Save the token. */
3650 tokens[index] = ub_array_index;
3651
3652 /* Setup FCA private information. */
3653 sp->ub_type = type;
3654 sp->handle = ub_array_index;
3655 sp->flags |= SRB_UB_IN_FCA;
3656
3657 ha->ub_array[ub_array_index] = ubp;
3658 ha->ub_allocated++;
3659 ub_updated = TRUE;
3660 QL_UB_UNLOCK(ha);
3661 }
3662 }
3663
3664 /* IP buffer. */
3665 if (ub_updated) {
3666 if (type == FC_TYPE_IS8802_SNAP &&
3667 CFG_IST(ha, CFG_FCIP_SUPPORT) &&
3668 ha->vp_index == 0) {
3669
3670 ADAPTER_STATE_LOCK(ha);
3671 ha->flags |= IP_ENABLED;
3672 ADAPTER_STATE_UNLOCK(ha);
3673
3674 if (!(ha->flags & IP_INITIALIZED)) {
3675 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
3676 ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3677 LSB(ql_ip_mtu);
3678 ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3679 MSB(ql_ip_mtu);
3680 ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3681 LSB(size);
3682 ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3683 MSB(size);
3684
3685 cnt = CHAR_TO_SHORT(
3686 ha->ip_init_ctrl_blk.cb24.cc[0],
3687 ha->ip_init_ctrl_blk.cb24.cc[1]);
3688
3689 if (cnt < *count) {
3690 ha->ip_init_ctrl_blk.cb24.cc[0]
3691 = LSB(*count);
3692 ha->ip_init_ctrl_blk.cb24.cc[1]
3693 = MSB(*count);
3694 }
3695 } else {
3696 ha->ip_init_ctrl_blk.cb.mtu_size[0] =
3697 LSB(ql_ip_mtu);
3698 ha->ip_init_ctrl_blk.cb.mtu_size[1] =
3699 MSB(ql_ip_mtu);
3700 ha->ip_init_ctrl_blk.cb.buf_size[0] =
3701 LSB(size);
3702 ha->ip_init_ctrl_blk.cb.buf_size[1] =
3703 MSB(size);
3704
3705 cnt = CHAR_TO_SHORT(
3706 ha->ip_init_ctrl_blk.cb.cc[0],
3707 ha->ip_init_ctrl_blk.cb.cc[1]);
3708
3709 if (cnt < *count) {
3710 ha->ip_init_ctrl_blk.cb.cc[0] =
3711 LSB(*count);
3712 ha->ip_init_ctrl_blk.cb.cc[1] =
3713 MSB(*count);
3714 }
3715 }
3716
3717 (void) ql_initialize_ip(ha);
3718 }
3719 ql_isp_rcvbuf(ha);
3720 }
3721 }
3722
3723 if (rval != FC_SUCCESS) {
3724 EL(ha, "failed=%xh\n", rval);
3725 } else {
3726 /*EMPTY*/
3727 QL_PRINT_3(ha, "done\n");
3728 }
3729 return (rval);
3730 }
3731
3732 /*
3733 * ql_ub_free
3734 * Free unsolicited buffers.
3735 *
3736 * Input:
3737 * fca_handle = handle setup by ql_bind_port().
3738 * count = number of buffers.
3739 * tokens = token array for each buffer.
3740 *
3741 * Returns:
3742 * FC_SUCCESS - the requested buffers have been freed.
3743 * FC_UNBOUND - the fca_handle specified is not bound.
3744 * FC_UB_BADTOKEN - an invalid token was encountered.
3745 * No buffers have been released.
3746 *
3747 * Context:
3748 * Kernel context.
3749 */
3750 static int
3751 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3752 {
3753 ql_adapter_state_t *ha;
3754 ql_srb_t *sp;
3755 uint32_t index;
3756 uint64_t ub_array_index;
3757 int rval = FC_SUCCESS;
3758
3759 /* Check handle. */
3760 ha = ql_fca_handle_to_state(fca_handle);
3761 if (ha == NULL) {
3762 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3763 (void *)fca_handle);
3764 return (FC_UNBOUND);
3765 }
3766 QL_PRINT_3(ha, "started\n");
3767
3768 /* Check all returned tokens. */
3769 for (index = 0; index < count; index++) {
3770 fc_unsol_buf_t *ubp;
3771
3772 /* Check the token range. */
3773 if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3774 EL(ha, "failed, FC_UB_BADTOKEN\n");
3775 rval = FC_UB_BADTOKEN;
3776 break;
3777 }
3778
3779 /* Check the unsolicited buffer array. */
3780 QL_UB_LOCK(ha);
3781 ubp = ha->ub_array[ub_array_index];
3782
3783 if (ubp == NULL) {
3784 EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3785 rval = FC_UB_BADTOKEN;
3786 QL_UB_UNLOCK(ha);
3787 break;
3788 }
3789
3790 /* Check the state of the unsolicited buffer. */
3791 sp = ha->ub_array[ub_array_index]->ub_fca_private;
3792 sp->flags |= SRB_UB_FREE_REQUESTED;
3793
3794 while (!(sp->flags & SRB_UB_IN_FCA) ||
3795 (sp->flags & (SRB_UB_CALLBACK | SRB_UB_ACQUIRED))) {
3796 QL_UB_UNLOCK(ha);
3797 delay(drv_usectohz(100000));
3798 QL_UB_LOCK(ha);
3799 }
3800 ha->ub_array[ub_array_index] = NULL;
3801 QL_UB_UNLOCK(ha);
3802 ql_free_unsolicited_buffer(ha, ubp);
3803 }
3804
3805 if (rval == FC_SUCCESS) {
3806 /*
3807 * Signal any pending hardware reset when there are
3808 * no more unsolicited buffers in use.
3809 */
3810 if (ha->ub_allocated == 0) {
3811 QL_UB_LOCK(ha);
3812 cv_broadcast(&ha->pha->cv_ub);
3813 QL_UB_UNLOCK(ha);
3814 }
3815 }
3816
3817 if (rval != FC_SUCCESS) {
3818 EL(ha, "failed=%xh\n", rval);
3819 } else {
3820 /*EMPTY*/
3821 QL_PRINT_3(ha, "done\n");
3822 }
3823 return (rval);
3824 }
3825
3826 /*
3827 * ql_ub_release
3828 * Release unsolicited buffers from FC Transport
3829 * to FCA for future use.
3830 *
3831 * Input:
3832 * fca_handle = handle setup by ql_bind_port().
3833 * count = number of buffers.
3834 * tokens = token array for each buffer.
3835 *
3836 * Returns:
3837 * FC_SUCCESS - the requested buffers have been released.
3838 * FC_UNBOUND - the fca_handle specified is not bound.
3839 * FC_UB_BADTOKEN - an invalid token was encountered.
3840 * No buffers have been released.
3841 *
3842 * Context:
3843 * Kernel context.
3844 */
3845 static int
3846 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3847 {
3848 ql_adapter_state_t *ha;
3849 ql_srb_t *sp;
3850 uint32_t index;
3851 uint64_t ub_array_index;
3852 int rval = FC_SUCCESS;
3853 int ub_ip_updated = FALSE;
3854
3855 /* Check handle. */
3856 ha = ql_fca_handle_to_state(fca_handle);
3857 if (ha == NULL) {
3858 QL_PRINT_2(NULL, ": failed, no adapter=%ph\n",
3859 (void *)fca_handle);
3860 return (FC_UNBOUND);
3861 }
3862 QL_PRINT_3(ha, "started\n");
3863
3864 /* Acquire adapter state lock. */
3865 QL_UB_LOCK(ha);
3866
3867 /* Check all returned tokens. */
3868 for (index = 0; index < count; index++) {
3869 /* Check the token range. */
3870 if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3871 EL(ha, "failed, FC_UB_BADTOKEN\n");
3872 rval = FC_UB_BADTOKEN;
3873 break;
3874 }
3875
3876 /* Check the unsolicited buffer array. */
3877 if (ha->ub_array[ub_array_index] == NULL) {
3878 EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3879 rval = FC_UB_BADTOKEN;
3880 break;
3881 }
3882
3883 /* Check the state of the unsolicited buffer. */
3884 sp = ha->ub_array[ub_array_index]->ub_fca_private;
3885 if (sp->flags & SRB_UB_IN_FCA) {
3886 EL(ha, "failed, FC_UB_BADTOKEN-3\n");
3887 rval = FC_UB_BADTOKEN;
3888 break;
3889 }
3890 }
3891
3892 /* If all tokens checkout, release the buffers. */
3893 if (rval == FC_SUCCESS) {
3894 /* Check all returned tokens. */
3895 for (index = 0; index < count; index++) {
3896 fc_unsol_buf_t *ubp;
3897
3898 ub_array_index = tokens[index];
3899 ubp = ha->ub_array[ub_array_index];
3900 sp = ubp->ub_fca_private;
3901
3902 ubp->ub_resp_flags = 0;
3903 sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
3904 sp->flags |= SRB_UB_IN_FCA;
3905
3906 /* IP buffer. */
3907 if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3908 ub_ip_updated = TRUE;
3909 }
3910 }
3911 }
3912
3913 QL_UB_UNLOCK(ha);
3914
3915 /*
3916 * XXX: We should call ql_isp_rcvbuf() to return a
3917 * buffer to ISP only if the number of buffers fall below
3918 * the low water mark.
3919 */
3920 if (ub_ip_updated) {
3921 ql_isp_rcvbuf(ha);
3922 }
3923
3924 if (rval != FC_SUCCESS) {
3925 EL(ha, "failed, rval = %xh\n", rval);
3926 } else {
3927 /*EMPTY*/
3928 QL_PRINT_3(ha, "done\n");
3929 }
3930 return (rval);
3931 }
3932
3933 /*
3934 * ql_abort
3935 * Abort a packet.
3936 *
3937 * Input:
3938 * fca_handle = handle setup by ql_bind_port().
3939 * pkt = pointer to fc_packet.
3940 * flags = KM_SLEEP flag.
3941 *
3942 * Returns:
3943 * FC_SUCCESS - the packet has successfully aborted.
3944 * FC_ABORTED - the packet has successfully aborted.
3945 * FC_ABORTING - the packet is being aborted.
3946 * FC_ABORT_FAILED - the packet could not be aborted.
3947 * FC_TRANSPORT_ERROR - a transport error occurred while attempting
3948 * to abort the packet.
3949 * FC_BADEXCHANGE - no packet found.
3950 * FC_UNBOUND - the fca_handle specified is not bound.
3951 *
3952 * Context:
3953 * Kernel context.
3954 */
3955 /*ARGSUSED*/
3956 static int
3957 ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3958 {
3959 port_id_t d_id;
3960 ql_link_t *link;
3961 ql_adapter_state_t *ha, *pha;
3962 ql_tgt_t *tq;
3963 ql_lun_t *lq;
3964 int rval = FC_ABORTED;
3965 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
3966
3967 ha = ql_fca_handle_to_state(fca_handle);
3968 if (ha == NULL) {
3969 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3970 (void *)fca_handle);
3971 return (FC_UNBOUND);
3972 }
3973
3974 pha = ha->pha;
3975
3976 QL_PRINT_3(ha, "started\n");
3977
3978 /* Get target queue pointer. */
3979 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3980 tq = ql_d_id_to_queue(ha, d_id);
3981
3982 if ((tq == NULL) || (lq = sp->lun_queue) == NULL ||
3983 (pha->task_daemon_flags & LOOP_DOWN)) {
3984 if (tq == NULL || lq == NULL) {
3985 EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3986 rval = FC_TRANSPORT_ERROR;
3987 } else {
3988 EL(ha, "failed, FC_OFFLINE\n");
3989 rval = FC_OFFLINE;
3990 }
3991 return (rval);
3992 }
3993
3994 /* Acquire target queue lock. */
3995 DEVICE_QUEUE_LOCK(tq);
3996 REQUEST_RING_LOCK(ha);
3997
3998 /* If command not already started. */
3999 if (!(sp->flags & SRB_ISP_STARTED)) {
4000 /* Check pending queue for command. */
4001 sp = NULL;
4002 for (link = pha->pending_cmds.first; link != NULL;
4003 link = link->next) {
4004 sp = link->base_address;
4005 if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
4006 /* Remove srb from q. */
4007 ql_remove_link(&pha->pending_cmds, &sp->cmd);
4008 break;
4009 } else {
4010 sp = NULL;
4011 }
4012 }
4013 REQUEST_RING_UNLOCK(ha);
4014
4015 if (sp == NULL) {
4016 /* Check for cmd on device queue. */
4017 for (link = lq->cmd.first; link != NULL;
4018 link = link->next) {
4019 sp = link->base_address;
4020 if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
4021 /* Remove srb from q. */
4022 ql_remove_link(&lq->cmd, &sp->cmd);
4023 break;
4024 } else {
4025 sp = NULL;
4026 }
4027 }
4028 }
4029 /* Release device lock */
4030 DEVICE_QUEUE_UNLOCK(tq);
4031
4032 /* If command on target queue. */
4033 if (sp != NULL) {
4034 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
4035
4036 /* Set return status */
4037 pkt->pkt_reason = CS_ABORTED;
4038
4039 sp->cmd.next = NULL;
4040 ql_done(&sp->cmd, B_TRUE);
4041 rval = FC_ABORTED;
4042 } else {
4043 EL(ha, "failed, FC_BADEXCHANGE\n");
4044 rval = FC_BADEXCHANGE;
4045 }
4046 } else if (sp->flags & SRB_ISP_COMPLETED) {
4047 /* Release device queue lock. */
4048 REQUEST_RING_UNLOCK(ha);
4049 DEVICE_QUEUE_UNLOCK(tq);
4050 EL(ha, "failed, already done, FC_FAILURE\n");
4051 rval = FC_FAILURE;
4052 } else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
4053 (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
4054 /*
4055 * If here, target data/resp ctio is with Fw.
4056 * Since firmware is supposed to terminate such I/Os
4057 * with an error, we need not do any thing. If FW
4058 * decides not to terminate those IOs and simply keep
4059 * quite then we need to initiate cleanup here by
4060 * calling ql_done.
4061 */
4062 REQUEST_RING_UNLOCK(ha);
4063 DEVICE_QUEUE_UNLOCK(tq);
4064 rval = FC_ABORTED;
4065 } else {
4066 ql_request_q_t *req_q;
4067 request_t *pio;
4068 uint32_t index;
4069
4070 REQUEST_RING_UNLOCK(ha);
4071 DEVICE_QUEUE_UNLOCK(tq);
4072
4073 INTR_LOCK(ha);
4074 sp->flags |= SRB_ABORTING;
4075 if (sp->handle != 0) {
4076 index = sp->handle & OSC_INDEX_MASK;
4077 if (ha->outstanding_cmds[index] == sp) {
4078 ha->outstanding_cmds[index] =
4079 QL_ABORTED_SRB(ha);
4080 }
4081 if (ha->req_q[1] != NULL && sp->rsp_q_number != 0) {
4082 req_q = ha->req_q[1];
4083 } else {
4084 req_q = ha->req_q[0];
4085 }
4086 pio = sp->request_ring_ptr;
4087 if (sp->handle ==
4088 ddi_get32(req_q->req_ring.acc_handle,
4089 &pio->handle)) {
4090 EL(ha, "inflight sp=%ph, handle=%xh, "
4091 "invalidated\n", (void *)sp, sp->handle);
4092 for (index = 0; index < sp->req_cnt; index++) {
4093 ddi_put8(req_q->req_ring.acc_handle,
4094 &pio->entry_type,
4095 ABORTED_ENTRY_TYPE);
4096 pio++;
4097 if (pio == (request_t *)
4098 ((uintptr_t)req_q->req_ring.bp +
4099 req_q->req_ring.size)) {
4100 pio = req_q->req_ring.bp;
4101 }
4102 }
4103 }
4104 /* Decrement outstanding commands on device. */
4105 if (tq->outcnt != 0) {
4106 tq->outcnt--;
4107 }
4108 if (sp->flags & SRB_FCP_CMD_PKT &&
4109 lq->lun_outcnt != 0) {
4110 lq->lun_outcnt--;
4111 }
4112 /* Remove command from watchdog queue. */
4113 if (sp->flags & SRB_WATCHDOG_ENABLED) {
4114 ql_remove_link(&tq->wdg, &sp->wdg);
4115 sp->flags &= ~SRB_WATCHDOG_ENABLED;
4116 }
4117 /* Release device queue lock. */
4118 INTR_UNLOCK(ha);
4119
4120 (void) ql_abort_command(ha, sp);
4121 sp->handle = 0;
4122 } else {
4123 /* Release device queue lock. */
4124 INTR_UNLOCK(ha);
4125 }
4126
4127 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
4128 sp->flags |= SRB_ISP_COMPLETED;
4129 pkt->pkt_reason = CS_ABORTED;
4130 rval = FC_ABORTED;
4131 }
4132
4133 QL_PRINT_3(ha, "done\n");
4134
4135 return (rval);
4136 }
4137
4138 /*
4139 * ql_reset
4140 * Reset link or hardware.
4141 *
4142 * Input:
4143 * fca_handle = handle setup by ql_bind_port().
4144 * cmd = reset type command.
4145 *
4146 * Returns:
4147 * FC_SUCCESS - reset has successfully finished.
4148 * FC_UNBOUND - the fca_handle specified is not bound.
4149 * FC_FAILURE - reset failed.
4150 *
4151 * Context:
4152 * Kernel context.
4153 */
4154 static int
4155 ql_reset(opaque_t fca_handle, uint32_t cmd)
4156 {
4157 ql_adapter_state_t *ha;
4158 int rval = FC_SUCCESS, rval2;
4159
4160 ha = ql_fca_handle_to_state(fca_handle);
4161 if (ha == NULL) {
4162 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
4163 (void *)fca_handle);
4164 return (FC_UNBOUND);
4165 }
4166
4167 QL_PRINT_3(ha, "started, cmd=%d\n", cmd);
4168
4169 if (ha->task_daemon_flags & (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE |
4170 DRIVER_STALL | ISP_ABORT_NEEDED | LOOP_RESYNC_NEEDED)) {
4171 EL(ha, "driver stalled, FC_TRAN_BUSY, dtf=%xh\n",
4172 ha->task_daemon_flags);
4173 return (FC_TRAN_BUSY);
4174 }
4175
4176 switch (cmd) {
4177 case FC_FCA_CORE:
4178 /* dump firmware core if specified. */
4179 if (ha->vp_index == 0) {
4180 if (ql_dump_firmware(ha) != QL_SUCCESS) {
4181 EL(ha, "failed, FC_FAILURE\n");
4182 rval = FC_FAILURE;
4183 }
4184 }
4185 break;
4186 case FC_FCA_LINK_RESET:
4187 if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
4188 if (ql_loop_reset(ha) != QL_SUCCESS) {
4189 EL(ha, "failed, FC_FAILURE-2\n");
4190 rval = FC_FAILURE;
4191 }
4192 }
4193 break;
4194 case FC_FCA_RESET_CORE:
4195 case FC_FCA_RESET:
4196 /* if dump firmware core if specified. */
4197 if (cmd == FC_FCA_RESET_CORE) {
4198 if (ha->vp_index != 0) {
4199 rval2 = ha->pha->task_daemon_flags & LOOP_DOWN
4200 ? QL_SUCCESS : ql_loop_reset(ha);
4201 } else {
4202 rval2 = ql_dump_firmware(ha);
4203 }
4204 if (rval2 != QL_SUCCESS) {
4205 EL(ha, "failed, FC_FAILURE-3\n");
4206 rval = FC_FAILURE;
4207 }
4208 }
4209
4210 /* Free up all unsolicited buffers. */
4211 if (ha->ub_allocated != 0) {
4212 /* Inform to release buffers. */
4213 ha->state = FC_PORT_SPEED_MASK(ha->state);
4214 ha->state |= FC_STATE_RESET_REQUESTED;
4215 if (ha->flags & FCA_BOUND) {
4216 (ha->bind_info.port_statec_cb)
4217 (ha->bind_info.port_handle,
4218 ha->state);
4219 }
4220 }
4221
4222 ha->state = FC_PORT_SPEED_MASK(ha->state);
4223
4224 /* All buffers freed */
4225 if (ha->ub_allocated == 0) {
4226 /* Hardware reset. */
4227 if (cmd == FC_FCA_RESET) {
4228 if (ha->vp_index == 0) {
4229 (void) ql_abort_isp(ha);
4230 } else if (!(ha->pha->task_daemon_flags &
4231 LOOP_DOWN)) {
4232 (void) ql_loop_reset(ha);
4233 }
4234 }
4235
4236 /* Inform that the hardware has been reset */
4237 ha->state |= FC_STATE_RESET;
4238 } else {
4239 /*
4240 * the port driver expects an online if
4241 * buffers are not freed.
4242 */
4243 if (ha->topology & QL_LOOP_CONNECTION) {
4244 ha->state |= FC_STATE_LOOP;
4245 } else {
4246 ha->state |= FC_STATE_ONLINE;
4247 }
4248 }
4249
4250 TASK_DAEMON_LOCK(ha);
4251 ha->task_daemon_flags |= FC_STATE_CHANGE;
4252 TASK_DAEMON_UNLOCK(ha);
4253
4254 ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
4255
4256 break;
4257 default:
4258 EL(ha, "unknown cmd=%xh\n", cmd);
4259 break;
4260 }
4261
4262 if (rval != FC_SUCCESS) {
4263 EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
4264 } else {
4265 /*EMPTY*/
4266 QL_PRINT_3(ha, "done\n");
4267 }
4268
4269 return (rval);
4270 }
4271
4272 /*
4273 * ql_port_manage
4274 * Perform port management or diagnostics.
4275 *
4276 * Input:
4277 * fca_handle = handle setup by ql_bind_port().
4278 * cmd = pointer to command structure.
4279 *
4280 * Returns:
4281 * FC_SUCCESS - the request completed successfully.
4282 * FC_FAILURE - the request did not complete successfully.
4283 * FC_UNBOUND - the fca_handle specified is not bound.
4284 *
4285 * Context:
4286 * Kernel context.
4287 */
4288 static int
4289 ql_port_manage(opaque_t fca_handle, fc_fca_pm_t *cmd)
4290 {
4291 clock_t timer;
4292 uint16_t index;
4293 uint32_t *bp;
4294 port_id_t d_id;
4295 ql_link_t *link;
4296 ql_adapter_state_t *ha, *pha;
4297 ql_tgt_t *tq;
4298 dma_mem_t buffer_xmt, buffer_rcv;
4299 size_t length;
4300 uint32_t cnt;
4301 char buf[80];
4302 lbp_t *lb;
4303 ql_mbx_data_t mr;
4304 app_mbx_cmd_t *mcp;
4305 int i0;
4306 uint8_t *bptr;
4307 int rval2, rval = FC_SUCCESS;
4308 uint32_t opcode;
4309 uint32_t set_flags = 0;
4310 fc_fca_p2p_info_t *p2p_info;
4311
4312 ha = ql_fca_handle_to_state(fca_handle);
4313 if (ha == NULL) {
4314 QL_PRINT_2(NULL, ": failed, no adapter=%ph\n",
4315 (void *)fca_handle);
4316 return (FC_UNBOUND);
4317 }
4318 pha = ha->pha;
4319
4320 #ifdef QL_DEBUG_LEVEL_10
4321 if (cmd->pm_cmd_code != FC_PORT_GET_FW_REV) {
4322 QL_PRINT_10(ha, "started=%xh\n", cmd->pm_cmd_code);
4323 }
4324 #endif
4325
4326 if (ha->task_daemon_flags & (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE |
4327 DRIVER_STALL | ISP_ABORT_NEEDED | LOOP_RESYNC_NEEDED)) {
4328 EL(ha, "driver stalled, FC_TRAN_BUSY, dtf=%xh\n",
4329 ha->task_daemon_flags);
4330 return (FC_TRAN_BUSY);
4331 }
4332
4333 switch (cmd->pm_cmd_code) {
4334 case FC_PORT_BYPASS:
4335 d_id.b24 = *cmd->pm_cmd_buf;
4336 tq = ql_d_id_to_queue(ha, d_id);
4337 if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
4338 EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
4339 rval = FC_FAILURE;
4340 }
4341 break;
4342 case FC_PORT_UNBYPASS:
4343 d_id.b24 = *cmd->pm_cmd_buf;
4344 tq = ql_d_id_to_queue(ha, d_id);
4345 if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
4346 EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
4347 rval = FC_FAILURE;
4348 }
4349 break;
4350 case FC_PORT_GET_FW_REV:
4351 (void) sprintf(buf, "%d.%d.%d", pha->fw_major_version,
4352 pha->fw_minor_version, pha->fw_subminor_version);
4353 length = strlen(buf) + 1;
4354 if (cmd->pm_data_len < length) {
4355 cmd->pm_data_len = length;
4356 EL(ha, "failed, FC_PORT_GET_FW_REV FC_FAILURE\n");
4357 rval = FC_FAILURE;
4358 } else {
4359 (void) strcpy(cmd->pm_data_buf, buf);
4360 }
4361 break;
4362
4363 case FC_PORT_GET_FCODE_REV: {
4364 caddr_t fcode_ver_buf = NULL;
4365
4366 i0 = 0;
4367 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
4368 rval2 = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
4369 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version",
4370 (caddr_t)&fcode_ver_buf, &i0);
4371 length = (uint_t)i0;
4372
4373 if (rval2 != DDI_PROP_SUCCESS) {
4374 EL(ha, "failed, getting version = %xh\n", rval2);
4375 length = 20;
4376 fcode_ver_buf = kmem_alloc(length, KM_SLEEP);
4377 if (fcode_ver_buf != NULL) {
4378 (void) sprintf(fcode_ver_buf,
4379 "NO FCODE FOUND");
4380 }
4381 }
4382
4383 if (cmd->pm_data_len < length) {
4384 EL(ha, "length error, FC_PORT_GET_FCODE_REV "
4385 "dst=%ld, src=%ld\n", cmd->pm_data_len, length);
4386 cmd->pm_data_len = length;
4387 rval = FC_FAILURE;
4388 } else if (fcode_ver_buf != NULL) {
4389 bcopy((void *)fcode_ver_buf, (void *)cmd->pm_data_buf,
4390 length);
4391 }
4392
4393 if (fcode_ver_buf != NULL) {
4394 kmem_free(fcode_ver_buf, length);
4395 }
4396 break;
4397 }
4398
4399 case FC_PORT_GET_DUMP:
4400 QL_DUMP_LOCK(pha);
4401 if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
4402 EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
4403 "length=%lxh\n", cmd->pm_data_len);
4404 cmd->pm_data_len = pha->risc_dump_size;
4405 rval = FC_FAILURE;
4406 } else if (pha->ql_dump_state & QL_DUMPING) {
4407 EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
4408 rval = FC_TRAN_BUSY;
4409 } else if (pha->ql_dump_state & QL_DUMP_VALID) {
4410 (void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
4411 pha->ql_dump_state |= QL_DUMP_UPLOADED;
4412 } else {
4413 EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
4414 rval = FC_FAILURE;
4415 }
4416 QL_DUMP_UNLOCK(pha);
4417 break;
4418 case FC_PORT_FORCE_DUMP:
4419 if (ql_dump_firmware(ha) != QL_SUCCESS) {
4420 EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
4421 rval = FC_FAILURE;
4422 }
4423 break;
4424 case FC_PORT_GET_DUMP_SIZE:
4425 bp = (uint32_t *)cmd->pm_data_buf;
4426 *bp = pha->risc_dump_size;
4427 break;
4428 case FC_PORT_DIAG:
4429 EL(ha, "diag cmd=%xh\n", cmd->pm_cmd_flags);
4430
4431 /* Wait for suspension to end. */
4432 for (timer = 0; timer < 3000 &&
4433 pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
4434 ql_delay(ha, 10000);
4435 }
4436
4437 if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
4438 EL(ha, "failed, FC_TRAN_BUSY-2\n");
4439 rval = FC_TRAN_BUSY;
4440 break;
4441 }
4442
4443 if ((rval2 = ql_stall_driver(ha, 0)) != QL_SUCCESS) {
4444 EL(ha, "stall_driver status=%xh, FC_TRAN_BUSY\n",
4445 rval2);
4446 ql_restart_driver(ha);
4447 rval = FC_TRAN_BUSY;
4448 break;
4449 }
4450
4451 switch (cmd->pm_cmd_flags) {
4452 case QL_DIAG_EXEFMW:
4453 if (ql_start_firmware(ha) != QL_SUCCESS) {
4454 EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
4455 rval = FC_FAILURE;
4456 }
4457 break;
4458 case QL_DIAG_CHKCMDQUE:
4459 for (i0 = 1, cnt = 0; i0 < pha->osc_max_cnt;
4460 i0++) {
4461 cnt += (pha->outstanding_cmds[i0] != NULL);
4462 }
4463 if (cnt != 0) {
4464 EL(ha, "failed, QL_DIAG_CHKCMDQUE "
4465 "FC_FAILURE\n");
4466 rval = FC_FAILURE;
4467 }
4468 break;
4469 case QL_DIAG_FMWCHKSUM:
4470 if (ql_verify_checksum(ha) != QL_SUCCESS) {
4471 EL(ha, "failed, QL_DIAG_FMWCHKSUM "
4472 "FC_FAILURE\n");
4473 rval = FC_FAILURE;
4474 }
4475 break;
4476 case QL_DIAG_SLFTST:
4477 if (ql_online_selftest(ha) != QL_SUCCESS) {
4478 EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
4479 rval = FC_FAILURE;
4480 }
4481 ql_reset_chip(ha);
4482 set_flags |= ISP_ABORT_NEEDED;
4483 break;
4484 case QL_DIAG_REVLVL:
4485 if (cmd->pm_stat_len <
4486 sizeof (ql_adapter_revlvl_t)) {
4487 EL(ha, "failed, QL_DIAG_REVLVL FC_NOMEM, "
4488 "slen=%lxh, rlvllen=%lxh\n",
4489 cmd->pm_stat_len,
4490 sizeof (ql_adapter_revlvl_t));
4491 rval = FC_NOMEM;
4492 } else {
4493 bcopy((void *)&(pha->adapter_stats->revlvl),
4494 cmd->pm_stat_buf,
4495 (size_t)cmd->pm_stat_len);
4496 cmd->pm_stat_len =
4497 sizeof (ql_adapter_revlvl_t);
4498 }
4499 break;
4500 case QL_DIAG_LPBMBX:
4501
4502 if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
4503 EL(ha, "failed, QL_DIAG_LPBMBX "
4504 "FC_INVALID_REQUEST, pmlen=%lxh, "
4505 "reqd=%lxh\n", cmd->pm_data_len,
4506 sizeof (struct app_mbx_cmd));
4507 rval = FC_INVALID_REQUEST;
4508 break;
4509 }
4510 /*
4511 * Don't do the wrap test on a 2200 when the
4512 * firmware is running.
4513 */
4514 if (!CFG_IST(ha, CFG_CTRL_22XX)) {
4515 mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4516 mr.mb[1] = mcp->mb[1];
4517 mr.mb[2] = mcp->mb[2];
4518 mr.mb[3] = mcp->mb[3];
4519 mr.mb[4] = mcp->mb[4];
4520 mr.mb[5] = mcp->mb[5];
4521 mr.mb[6] = mcp->mb[6];
4522 mr.mb[7] = mcp->mb[7];
4523
4524 bcopy(&mr.mb[0], &mr.mb[10],
4525 sizeof (uint16_t) * 8);
4526
4527 if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4528 EL(ha, "failed, QL_DIAG_LPBMBX "
4529 "FC_FAILURE\n");
4530 rval = FC_FAILURE;
4531 break;
4532 } else {
4533 for (i0 = 1; i0 < 8; i0++) {
4534 if (mr.mb[i0] !=
4535 mr.mb[i0 + 10]) {
4536 EL(ha, "failed, "
4537 "QL_DIAG_LPBMBX "
4538 "FC_FAILURE-2\n");
4539 rval = FC_FAILURE;
4540 break;
4541 }
4542 }
4543 }
4544
4545 if (rval == FC_FAILURE) {
4546 (void) ql_flash_errlog(ha,
4547 FLASH_ERRLOG_ISP_ERR, 0,
4548 RD16_IO_REG(ha, hccr),
4549 RD16_IO_REG(ha, istatus));
4550 set_flags |= ISP_ABORT_NEEDED;
4551 }
4552 }
4553 break;
4554 case QL_DIAG_LPBDTA:
4555 /*
4556 * For loopback data, we receive the
4557 * data back in pm_stat_buf. This provides
4558 * the user an opportunity to compare the
4559 * transmitted and received data.
4560 *
4561 * NB: lb->options are:
4562 * 0 --> Ten bit loopback
4563 * 1 --> One bit loopback
4564 * 2 --> External loopback
4565 */
4566 if (cmd->pm_data_len > 65536) {
4567 rval = FC_TOOMANY;
4568 EL(ha, "failed, QL_DIAG_LPBDTA "
4569 "FC_TOOMANY=%lxh\n", cmd->pm_data_len);
4570 break;
4571 }
4572 if (ql_get_dma_mem(ha, &buffer_xmt,
4573 (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4574 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4575 EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4576 rval = FC_NOMEM;
4577 break;
4578 }
4579 if (ql_get_dma_mem(ha, &buffer_rcv,
4580 (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4581 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4582 EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4583 rval = FC_NOMEM;
4584 break;
4585 }
4586 ddi_rep_put8(buffer_xmt.acc_handle,
4587 (uint8_t *)cmd->pm_data_buf,
4588 (uint8_t *)buffer_xmt.bp,
4589 cmd->pm_data_len, DDI_DEV_AUTOINCR);
4590
4591 /* 22xx's adapter must be in loop mode for test. */
4592 if (CFG_IST(ha, CFG_CTRL_22XX)) {
4593 bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4594 if (ha->flags & POINT_TO_POINT ||
4595 (ha->task_daemon_flags & LOOP_DOWN &&
4596 *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4597 cnt = *bptr;
4598 *bptr = (uint8_t)
4599 (*bptr & ~(BIT_6|BIT_5|BIT_4));
4600 (void) ql_abort_isp(ha);
4601 *bptr = (uint8_t)cnt;
4602 }
4603 }
4604
4605 /* Shutdown IP. */
4606 if (pha->flags & IP_INITIALIZED) {
4607 (void) ql_shutdown_ip(pha);
4608 }
4609
4610 lb = (lbp_t *)cmd->pm_cmd_buf;
4611 lb->transfer_count =
4612 (uint32_t)cmd->pm_data_len;
4613 lb->transfer_segment_count = 0;
4614 lb->receive_segment_count = 0;
4615 lb->transfer_data_address =
4616 buffer_xmt.cookie.dmac_address;
4617 lb->receive_data_address =
4618 buffer_rcv.cookie.dmac_address;
4619
4620 if (CFG_IST(ha, CFG_LOOP_POINT_SUPPORT)) {
4621 (void) ql_set_loop_point(ha, lb->options);
4622 }
4623
4624 if (ql_loop_back(ha, 0, lb,
4625 buffer_xmt.cookie.dmac_notused,
4626 buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4627 bzero((void *)cmd->pm_stat_buf,
4628 cmd->pm_stat_len);
4629 ddi_rep_get8(buffer_rcv.acc_handle,
4630 (uint8_t *)cmd->pm_stat_buf,
4631 (uint8_t *)buffer_rcv.bp,
4632 cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4633 rval = FC_SUCCESS;
4634 } else {
4635 EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4636 rval = FC_FAILURE;
4637 }
4638
4639 if (CFG_IST(ha, CFG_LOOP_POINT_SUPPORT)) {
4640 (void) ql_set_loop_point(ha, 0);
4641 }
4642
4643 ql_free_phys(ha, &buffer_xmt);
4644 ql_free_phys(ha, &buffer_rcv);
4645
4646 /* Needed to recover the f/w */
4647 set_flags |= ISP_ABORT_NEEDED;
4648
4649 /* Restart IP if it was shutdown. */
4650 if (pha->flags & IP_ENABLED &&
4651 !(pha->flags & IP_INITIALIZED)) {
4652 (void) ql_initialize_ip(pha);
4653 ql_isp_rcvbuf(pha);
4654 }
4655
4656 break;
4657 case QL_DIAG_ECHO: {
4658 /*
4659 * issue an echo command with a user supplied
4660 * data pattern and destination address
4661 */
4662 echo_t echo; /* temp echo struct */
4663
4664 /* Setup echo cmd & adjust for platform */
4665 opcode = QL_ECHO_CMD;
4666 BIG_ENDIAN_32(&opcode);
4667
4668 /*
4669 * due to limitations in the ql
4670 * firmaware the echo data field is
4671 * limited to 220
4672 */
4673 if ((cmd->pm_cmd_len > QL_ECHO_CMD_LENGTH) ||
4674 (cmd->pm_stat_len > QL_ECHO_CMD_LENGTH)) {
4675 EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY, "
4676 "cmdl1=%lxh, statl2=%lxh\n",
4677 cmd->pm_cmd_len, cmd->pm_stat_len);
4678 rval = FC_TOOMANY;
4679 break;
4680 }
4681
4682 /*
4683 * the input data buffer has the user
4684 * supplied data pattern. The "echoed"
4685 * data will be DMAed into the output
4686 * data buffer. Therefore the length
4687 * of the output buffer must be equal
4688 * to or greater then the input buffer
4689 * length
4690 */
4691 if (cmd->pm_cmd_len > cmd->pm_stat_len) {
4692 EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4693 " cmdl1=%lxh, statl2=%lxh\n",
4694 cmd->pm_cmd_len, cmd->pm_stat_len);
4695 rval = FC_TOOMANY;
4696 break;
4697 }
4698 /* add four bytes for the opcode */
4699 echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4700
4701 /*
4702 * are we 32 or 64 bit addressed???
4703 * We need to get the appropriate
4704 * DMA and set the command options;
4705 * 64 bit (bit 6) or 32 bit
4706 * (no bit 6) addressing.
4707 * while we are at it lets ask for
4708 * real echo (bit 15)
4709 */
4710 echo.options = BIT_15;
4711 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) &&
4712 !(CFG_IST(ha, CFG_FCOE_SUPPORT))) {
4713 echo.options = (uint16_t)
4714 (echo.options | BIT_6);
4715 }
4716
4717 /*
4718 * Set up the DMA mappings for the
4719 * output and input data buffers.
4720 * First the output buffer
4721 */
4722 if (ql_get_dma_mem(ha, &buffer_xmt,
4723 (uint32_t)(cmd->pm_data_len + 4),
4724 LITTLE_ENDIAN_DMA,
4725 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4726 EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4727 rval = FC_NOMEM;
4728 break;
4729 }
4730 echo.transfer_data_address = buffer_xmt.cookie;
4731
4732 /* Next the input buffer */
4733 if (ql_get_dma_mem(ha, &buffer_rcv,
4734 (uint32_t)(cmd->pm_data_len + 4),
4735 LITTLE_ENDIAN_DMA,
4736 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4737 /*
4738 * since we could not allocate
4739 * DMA space for the input
4740 * buffer we need to clean up
4741 * by freeing the DMA space
4742 * we allocated for the output
4743 * buffer
4744 */
4745 ql_free_phys(ha, &buffer_xmt);
4746 EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM-2\n");
4747 rval = FC_NOMEM;
4748 break;
4749 }
4750 echo.receive_data_address = buffer_rcv.cookie;
4751
4752 /*
4753 * copy the 4 byte ECHO op code to the
4754 * allocated DMA space
4755 */
4756 ddi_rep_put8(buffer_xmt.acc_handle, (uint8_t *)&opcode,
4757 (uint8_t *)buffer_xmt.bp, 4, DDI_DEV_AUTOINCR);
4758
4759 /*
4760 * copy the user supplied data to the
4761 * allocated DMA space
4762 */
4763 ddi_rep_put8(buffer_xmt.acc_handle,
4764 (uint8_t *)cmd->pm_cmd_buf,
4765 (uint8_t *)buffer_xmt.bp + 4, cmd->pm_cmd_len,
4766 DDI_DEV_AUTOINCR);
4767
4768 /* Shutdown IP. */
4769 if (pha->flags & IP_INITIALIZED) {
4770 (void) ql_shutdown_ip(pha);
4771 }
4772
4773 /* send the echo */
4774 if (ql_echo(ha, 0, &echo) == QL_SUCCESS) {
4775 ddi_rep_put8(buffer_rcv.acc_handle,
4776 (uint8_t *)buffer_rcv.bp + 4,
4777 (uint8_t *)cmd->pm_stat_buf,
4778 cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4779 } else {
4780 EL(ha, "failed, QL_DIAG_ECHO FC_FAILURE\n");
4781 rval = FC_FAILURE;
4782 }
4783
4784 /* Restart IP if it was shutdown. */
4785 if (pha->flags & IP_ENABLED &&
4786 !(pha->flags & IP_INITIALIZED)) {
4787 (void) ql_initialize_ip(pha);
4788 ql_isp_rcvbuf(pha);
4789 }
4790 /* free up our DMA buffers */
4791 ql_free_phys(ha, &buffer_xmt);
4792 ql_free_phys(ha, &buffer_rcv);
4793 break;
4794 }
4795 default:
4796 EL(ha, "unknown=%xh, FC_PORT_DIAG "
4797 "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4798 rval = FC_INVALID_REQUEST;
4799 break;
4800 }
4801 ql_restart_driver(ha);
4802 break;
4803 case FC_PORT_LINK_STATE:
4804 /* Check for name equal to null. */
4805 for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4806 index++) {
4807 if (cmd->pm_cmd_buf[index] != 0) {
4808 break;
4809 }
4810 }
4811
4812 /* If name not null. */
4813 if (index < 8 && cmd->pm_cmd_len >= 8) {
4814 /* Locate device queue. */
4815 tq = NULL;
4816 for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4817 tq == NULL; index++) {
4818 for (link = ha->dev[index].first; link != NULL;
4819 link = link->next) {
4820 tq = link->base_address;
4821
4822 if (bcmp((void *)&tq->port_name[0],
4823 (void *)cmd->pm_cmd_buf, 8) == 0) {
4824 break;
4825 } else {
4826 tq = NULL;
4827 }
4828 }
4829 }
4830
4831 if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4832 cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4833 cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4834 } else {
4835 cnt = FC_PORT_SPEED_MASK(ha->state) |
4836 FC_STATE_OFFLINE;
4837 cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4838 cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4839 }
4840 } else {
4841 cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4842 cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4843 }
4844 break;
4845 case FC_PORT_INITIALIZE:
4846 if ((rval2 = ql_stall_driver(ha, 0)) != QL_SUCCESS) {
4847 EL(ha, "stall_driver status=%xh, FC_TRAN_BUSY\n",
4848 rval2);
4849 ql_restart_driver(ha);
4850 rval = FC_TRAN_BUSY;
4851 break;
4852 }
4853 if (cmd->pm_cmd_len >= 8) {
4854 tq = NULL;
4855 for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4856 tq == NULL; index++) {
4857 for (link = ha->dev[index].first; link != NULL;
4858 link = link->next) {
4859 tq = link->base_address;
4860
4861 if (bcmp((void *)&tq->port_name[0],
4862 (void *)cmd->pm_cmd_buf, 8) == 0) {
4863 if (!VALID_DEVICE_ID(ha,
4864 tq->loop_id)) {
4865 tq = NULL;
4866 }
4867 break;
4868 } else {
4869 tq = NULL;
4870 }
4871 }
4872 }
4873
4874 if (tq == NULL || ql_target_reset(ha, tq,
4875 ha->loop_reset_delay) != QL_SUCCESS) {
4876 EL(ha, "failed, FC_PORT_INITIALIZE "
4877 "FC_FAILURE\n");
4878 rval = FC_FAILURE;
4879 }
4880 } else {
4881 EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4882 "clen=%lxh\n", cmd->pm_cmd_len);
4883
4884 rval = FC_FAILURE;
4885 }
4886 ql_restart_driver(ha);
4887 break;
4888 case FC_PORT_RLS:
4889 if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4890 EL(ha, "failed, buffer size passed: %lxh, "
4891 "req: %lxh\n", cmd->pm_data_len,
4892 (sizeof (fc_rls_acc_t)));
4893 rval = FC_FAILURE;
4894 } else if (LOOP_NOT_READY(pha)) {
4895 EL(ha, "loop NOT ready\n");
4896 bzero(cmd->pm_data_buf, cmd->pm_data_len);
4897 } else if (ql_get_link_status(ha, ha->loop_id,
4898 cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
4899 EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4900 rval = FC_FAILURE;
4901 #ifdef _BIG_ENDIAN
4902 } else {
4903 fc_rls_acc_t *rls;
4904
4905 rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4906 LITTLE_ENDIAN_32(&rls->rls_link_fail);
4907 LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4908 LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4909 LITTLE_ENDIAN_32(&rls->rls_prim_seq_err);
4910 LITTLE_ENDIAN_32(&rls->rls_invalid_word);
4911 LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4912 #endif /* _BIG_ENDIAN */
4913 }
4914 break;
4915 case FC_PORT_GET_NODE_ID:
4916 if (ql_get_rnid_params(ha, cmd->pm_data_len,
4917 cmd->pm_data_buf) != QL_SUCCESS) {
4918 EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4919 rval = FC_FAILURE;
4920 }
4921 break;
4922 case FC_PORT_SET_NODE_ID:
4923 if (ql_set_rnid_params(ha, cmd->pm_data_len,
4924 cmd->pm_data_buf) != QL_SUCCESS) {
4925 EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4926 rval = FC_FAILURE;
4927 }
4928 break;
4929 case FC_PORT_DOWNLOAD_FCODE:
4930 if ((rval2 = ql_stall_driver(ha, 0)) != QL_SUCCESS) {
4931 EL(ha, "stall_driver status=%xh, FC_TRAN_BUSY\n",
4932 rval2);
4933 ql_restart_driver(ha);
4934 rval = FC_TRAN_BUSY;
4935 break;
4936 }
4937 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
4938 rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4939 (uint32_t)cmd->pm_data_len);
4940 } else {
4941 if (cmd->pm_data_buf[0] == 4 &&
4942 cmd->pm_data_buf[8] == 0 &&
4943 cmd->pm_data_buf[9] == 0x10 &&
4944 cmd->pm_data_buf[10] == 0 &&
4945 cmd->pm_data_buf[11] == 0) {
4946 rval = ql_24xx_load_flash(ha,
4947 (uint8_t *)cmd->pm_data_buf,
4948 (uint32_t)cmd->pm_data_len,
4949 ha->flash_fw_addr << 2);
4950 } else {
4951 rval = ql_24xx_load_flash(ha,
4952 (uint8_t *)cmd->pm_data_buf,
4953 (uint32_t)cmd->pm_data_len, 0);
4954 }
4955 }
4956
4957 if (rval != QL_SUCCESS) {
4958 EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4959 rval = FC_FAILURE;
4960 } else {
4961 rval = FC_SUCCESS;
4962 }
4963 ql_reset_chip(ha);
4964 set_flags |= ISP_ABORT_NEEDED;
4965 ql_restart_driver(ha);
4966 break;
4967
4968 case FC_PORT_GET_P2P_INFO:
4969
4970 bzero(cmd->pm_data_buf, cmd->pm_data_len);
4971 if (cmd->pm_data_len < sizeof (fc_fca_p2p_info_t)) {
4972 EL(ha, "inadequate data length")
4973 rval = FC_NOMEM;
4974 break;
4975 }
4976
4977 p2p_info = (fc_fca_p2p_info_t *)cmd->pm_data_buf;
4978
4979 if ((ha->topology & QL_N_PORT) &&
4980 (ha->flags & POINT_TO_POINT)) {
4981 p2p_info->fca_d_id = ha->d_id.b24;
4982 p2p_info->d_id = ha->n_port->d_id.b24;
4983
4984 bcopy((void *) &ha->n_port->port_name[0],
4985 (caddr_t)&p2p_info->pwwn, 8);
4986 bcopy((void *) &ha->n_port->node_name[0],
4987 (caddr_t)&p2p_info->nwwn, 8);
4988 rval = FC_SUCCESS;
4989
4990 EL(ha, "P2P HID=%xh, d_id=%xh, WWPN=%02x%02x%02x%02x"
4991 "%02x%02x%02x%02x : "
4992 "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
4993 p2p_info->fca_d_id, p2p_info->d_id,
4994 ha->n_port->port_name[0],
4995 ha->n_port->port_name[1], ha->n_port->port_name[2],
4996 ha->n_port->port_name[3], ha->n_port->port_name[4],
4997 ha->n_port->port_name[5], ha->n_port->port_name[6],
4998 ha->n_port->port_name[7], ha->n_port->node_name[0],
4999 ha->n_port->node_name[1], ha->n_port->node_name[2],
5000 ha->n_port->node_name[3], ha->n_port->node_name[4],
5001 ha->n_port->node_name[5], ha->n_port->node_name[6],
5002 ha->n_port->node_name[7]);
5003 break;
5004 } else {
5005 EL(ha, "No p2p info reported in non n2n topology\n");
5006 rval = FC_BADCMD;
5007 }
5008 break;
5009
5010 case FC_PORT_DOWNLOAD_FW:
5011 EL(ha, "unsupported=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
5012 rval = FC_BADCMD;
5013 break;
5014 default:
5015 EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
5016 rval = FC_BADCMD;
5017 break;
5018 }
5019
5020 /* Wait for suspension to end. */
5021 ql_awaken_task_daemon(ha, NULL, set_flags, DRIVER_STALL);
5022 timer = 0;
5023
5024 while (timer++ < 3000 &&
5025 ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
5026 ql_delay(ha, 10000);
5027 }
5028
5029 if (rval != FC_SUCCESS) {
5030 EL(ha, "failed, rval = %xh\n", rval);
5031 } else {
5032 /*EMPTY*/
5033 QL_PRINT_3(ha, "done\n");
5034 }
5035
5036 return (rval);
5037 }
5038
5039 static opaque_t
5040 ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
5041 {
5042 port_id_t id;
5043 ql_adapter_state_t *ha;
5044 ql_tgt_t *tq;
5045
5046 id.r.rsvd_1 = 0;
5047 id.b24 = d_id.port_id;
5048
5049 ha = ql_fca_handle_to_state(fca_handle);
5050 if (ha == NULL) {
5051 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
5052 (void *)fca_handle);
5053 return (NULL);
5054 }
5055 QL_PRINT_3(ha, "started, d_id=%xh\n", id.b24);
5056
5057 tq = ql_d_id_to_queue(ha, id);
5058
5059 if (tq == NULL && id.b24 != 0 && id.b24 != FS_BROADCAST) {
5060 EL(ha, "failed, no tq available for d_id: %xh\n", id.b24);
5061 } else {
5062 /*EMPTY*/
5063 QL_PRINT_3(ha, "done\n");
5064 }
5065 return (tq);
5066 }
5067
5068 /* ************************************************************************ */
5069 /* FCA Driver Local Support Functions. */
5070 /* ************************************************************************ */
5071
5072 /*
5073 * ql_cmd_setup
5074 * Verifies proper command.
5075 *
5076 * Input:
5077 * fca_handle = handle setup by ql_bind_port().
5078 * pkt = pointer to fc_packet.
5079 * rval = pointer for return value.
5080 *
5081 * Returns:
5082 * Adapter state pointer, NULL = failure.
5083 *
5084 * Context:
5085 * Kernel context.
5086 */
5087 static ql_adapter_state_t *
5088 ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
5089 {
5090 ql_adapter_state_t *ha, *pha;
5091 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
5092 ql_tgt_t *tq;
5093 port_id_t d_id;
5094
5095 pkt->pkt_resp_resid = 0;
5096 pkt->pkt_data_resid = 0;
5097
5098 /* check that the handle is assigned by this FCA */
5099 ha = ql_fca_handle_to_state(fca_handle);
5100 if (ha == NULL) {
5101 *rval = FC_UNBOUND;
5102 QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
5103 (void *)fca_handle);
5104 return (NULL);
5105 }
5106 pha = ha->pha;
5107
5108 QL_PRINT_3(ha, "started\n");
5109
5110 if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
5111 return (ha);
5112 }
5113
5114 if (!(pha->flags & ONLINE)) {
5115 pkt->pkt_state = FC_PKT_LOCAL_RJT;
5116 pkt->pkt_reason = FC_REASON_HW_ERROR;
5117 *rval = FC_TRANSPORT_ERROR;
5118 EL(ha, "failed, not online hf=%xh\n", pha->flags);
5119 return (NULL);
5120 }
5121
5122 /* Exit on loop down. */
5123 if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
5124 pha->task_daemon_flags & LOOP_DOWN &&
5125 pha->loop_down_timer <= pha->loop_down_abort_time) {
5126 pkt->pkt_state = FC_PKT_PORT_OFFLINE;
5127 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5128 *rval = FC_OFFLINE;
5129 EL(ha, "failed, loop down tdf=%xh\n", pha->task_daemon_flags);
5130 return (NULL);
5131 }
5132
5133 if (pkt->pkt_cmd_fhdr.r_ctl == R_CTL_COMMAND &&
5134 pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
5135 tq = (ql_tgt_t *)pkt->pkt_fca_device;
5136 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5137 d_id.r.rsvd_1 = 0;
5138 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5139 tq = ql_d_id_to_queue(ha, d_id);
5140
5141 pkt->pkt_fca_device = (opaque_t)tq;
5142 }
5143
5144 if (tq != NULL) {
5145 DEVICE_QUEUE_LOCK(tq);
5146 if (tq->flags & (TQF_RSCN_RCVD |
5147 TQF_NEED_AUTHENTICATION)) {
5148 *rval = FC_DEVICE_BUSY;
5149 DEVICE_QUEUE_UNLOCK(tq);
5150 EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
5151 tq->flags, tq->d_id.b24);
5152 return (NULL);
5153 }
5154 DEVICE_QUEUE_UNLOCK(tq);
5155 }
5156 }
5157
5158 /* Check for packet already running. */
5159 if (sp->handle != 0) {
5160 *rval = FC_DEVICE_BUSY;
5161 cmn_err(CE_WARN, "%s(%d) already running pkt=%p, sp=%p, "
5162 "sp->pkt=%p, sp->hdl=%x, spf=%x, cq=%p\n", QL_NAME,
5163 ha->instance, (void *)pkt, (void *)sp, (void *)sp->pkt,
5164 sp->handle, sp->flags, (void *)sp->cmd.head);
5165 return (NULL);
5166 }
5167 if (ha->rsp_queues_cnt > 1) {
5168 ADAPTER_STATE_LOCK(ha);
5169 sp->rsp_q_number = ha->rsp_q_number++;
5170 if (ha->rsp_q_number == ha->rsp_queues_cnt) {
5171 ha->rsp_q_number = 0;
5172 }
5173 ADAPTER_STATE_UNLOCK(ha);
5174 } else {
5175 sp->rsp_q_number = 0;
5176 }
5177
5178 /*
5179 * Check DMA pointers.
5180 */
5181 *rval = DDI_SUCCESS;
5182 if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
5183 QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
5184
5185 *rval = qlc_fm_check_dma_handle(ha, pkt->pkt_cmd_dma);
5186 if (*rval == DDI_FM_OK) {
5187 *rval = qlc_fm_check_acc_handle(ha,
5188 pkt->pkt_cmd_acc);
5189 }
5190 }
5191
5192 if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
5193 pkt->pkt_rsplen != 0) {
5194 QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
5195
5196 *rval = qlc_fm_check_dma_handle(ha, pkt->pkt_resp_dma);
5197 if (*rval == DDI_FM_OK) {
5198 *rval = qlc_fm_check_acc_handle(ha,
5199 pkt->pkt_resp_acc);
5200 }
5201 }
5202
5203 /*
5204 * Minimum branch conditional; Change it with care.
5205 */
5206 if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
5207 (pkt->pkt_datalen != 0)) != 0) {
5208 QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
5209
5210 *rval = qlc_fm_check_dma_handle(ha, pkt->pkt_data_dma);
5211 if (*rval == DDI_FM_OK) {
5212 *rval = qlc_fm_check_acc_handle(ha,
5213 pkt->pkt_data_acc);
5214 }
5215 }
5216
5217 if (*rval != DDI_FM_OK) {
5218 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5219 pkt->pkt_reason = FC_REASON_DMA_ERROR;
5220 pkt->pkt_expln = FC_EXPLN_NONE;
5221 pkt->pkt_action = FC_ACTION_RETRYABLE;
5222
5223 /* Do command callback. */
5224 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
5225 ql_io_comp(sp);
5226 }
5227 *rval = FC_BADPACKET;
5228 EL(ha, "failed, bad DMA pointers\n");
5229 return (NULL);
5230 }
5231
5232 if (sp->magic_number != QL_FCA_BRAND) {
5233 *rval = FC_BADPACKET;
5234 EL(ha, "failed, magic number=%xh\n", sp->magic_number);
5235 return (NULL);
5236 }
5237 *rval = FC_SUCCESS;
5238
5239 QL_PRINT_3(ha, "done\n");
5240
5241 return (ha);
5242 }
5243
5244 /*
5245 * ql_els_plogi
5246 * Issue a extended link service port login request.
5247 *
5248 * Input:
5249 * ha = adapter state pointer.
5250 * pkt = pointer to fc_packet.
5251 *
5252 * Returns:
5253 * FC_SUCCESS - the packet was accepted for transport.
5254 * FC_TRANSPORT_ERROR - a transport error occurred.
5255 *
5256 * Context:
5257 * Kernel context.
5258 */
5259 static int
5260 ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5261 {
5262 ql_tgt_t *tq = NULL;
5263 port_id_t d_id;
5264 la_els_logi_t acc;
5265 class_svc_param_t *class3_param;
5266 int ret;
5267 int rval = FC_SUCCESS;
5268
5269 QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
5270
5271 TASK_DAEMON_LOCK(ha);
5272 if (!(ha->task_daemon_flags & STATE_ONLINE)) {
5273 TASK_DAEMON_UNLOCK(ha);
5274 QL_PRINT_3(ha, "offline done\n");
5275 return (FC_OFFLINE);
5276 }
5277 TASK_DAEMON_UNLOCK(ha);
5278
5279 bzero(&acc, sizeof (acc));
5280 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5281
5282 ret = QL_SUCCESS;
5283
5284 if (CFG_IST(ha, CFG_N2N_SUPPORT) && ha->topology & QL_N_PORT) {
5285 /*
5286 * In p2p topology it sends a PLOGI after determining
5287 * it has the N_Port login initiative.
5288 */
5289 ret = ql_p2p_plogi(ha, pkt);
5290 }
5291 if (ret == QL_CONSUMED) {
5292 return (ret);
5293 }
5294
5295 switch (ret = ql_login_port(ha, d_id)) {
5296 case QL_SUCCESS:
5297 tq = ql_d_id_to_queue(ha, d_id);
5298 break;
5299
5300 case QL_LOOP_ID_USED:
5301 if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
5302 tq = ql_d_id_to_queue(ha, d_id);
5303 }
5304 break;
5305
5306 default:
5307 break;
5308 }
5309
5310 if (ret != QL_SUCCESS) {
5311 /*
5312 * Invalidate this entry so as to seek a fresh loop ID
5313 * in case firmware reassigns it to something else
5314 */
5315 tq = ql_d_id_to_queue(ha, d_id);
5316 if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
5317 tq->loop_id = PORT_NO_LOOP_ID;
5318 }
5319 } else if (tq) {
5320 (void) ql_get_port_database(ha, tq, PDF_ADISC);
5321 }
5322
5323 if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
5324 (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
5325
5326 /* Build ACC. */
5327 acc.ls_code.ls_code = LA_ELS_ACC;
5328 acc.common_service.fcph_version = 0x2006;
5329 acc.common_service.cmn_features = 0x8800;
5330 acc.common_service.rx_bufsize =
5331 ha->loginparams.common_service.rx_bufsize;
5332 acc.common_service.conc_sequences = 0xff;
5333 acc.common_service.relative_offset = 0x03;
5334 acc.common_service.e_d_tov = 0x7d0;
5335
5336 bcopy((void *)&tq->port_name[0],
5337 (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5338 bcopy((void *)&tq->node_name[0],
5339 (void *)&acc.node_ww_name.raw_wwn[0], 8);
5340
5341 class3_param = (class_svc_param_t *)&acc.class_3;
5342 class3_param->class_valid_svc_opt = 0x8000;
5343 class3_param->recipient_ctl = tq->class3_recipient_ctl;
5344 class3_param->rcv_data_size = tq->class3_rcv_data_size;
5345 class3_param->conc_sequences = tq->class3_conc_sequences;
5346 class3_param->open_sequences_per_exch =
5347 tq->class3_open_sequences_per_exch;
5348
5349 if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
5350 acc.ls_code.ls_code = LA_ELS_RJT;
5351 pkt->pkt_state = FC_PKT_TRAN_BSY;
5352 pkt->pkt_reason = FC_REASON_XCHG_BSY;
5353 EL(ha, "LA_ELS_RJT, FC_REASON_XCHG_BSY\n");
5354 rval = FC_TRAN_BUSY;
5355 } else {
5356 DEVICE_QUEUE_LOCK(tq);
5357 tq->logout_sent = 0;
5358 tq->flags &= ~TQF_NEED_AUTHENTICATION;
5359 if (CFG_IST(ha, CFG_IIDMA_SUPPORT)) {
5360 tq->flags |= TQF_IIDMA_NEEDED;
5361 }
5362 DEVICE_QUEUE_UNLOCK(tq);
5363
5364 if (CFG_IST(ha, CFG_IIDMA_SUPPORT)) {
5365 TASK_DAEMON_LOCK(ha);
5366 ha->task_daemon_flags |= TD_IIDMA_NEEDED;
5367 TASK_DAEMON_UNLOCK(ha);
5368 }
5369
5370 pkt->pkt_state = FC_PKT_SUCCESS;
5371 }
5372 } else {
5373 /* Build RJT. */
5374 acc.ls_code.ls_code = LA_ELS_RJT;
5375
5376 switch (ret) {
5377 case QL_FUNCTION_TIMEOUT:
5378 pkt->pkt_state = FC_PKT_TIMEOUT;
5379 pkt->pkt_reason = FC_REASON_HW_ERROR;
5380 break;
5381
5382 case QL_MEMORY_ALLOC_FAILED:
5383 pkt->pkt_state = FC_PKT_LOCAL_BSY;
5384 pkt->pkt_reason = FC_REASON_NOMEM;
5385 rval = FC_TRAN_BUSY;
5386 break;
5387
5388 case QL_FABRIC_NOT_INITIALIZED:
5389 pkt->pkt_state = FC_PKT_FABRIC_BSY;
5390 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5391 rval = FC_TRAN_BUSY;
5392 break;
5393
5394 default:
5395 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5396 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5397 break;
5398 }
5399
5400 EL(ha, "Plogi unsuccess for %xh state %xh reason %xh "
5401 "ret %xh rval %xh\n", d_id.b24, pkt->pkt_state,
5402 pkt->pkt_reason, ret, rval);
5403 }
5404
5405 if (tq != NULL) {
5406 DEVICE_QUEUE_LOCK(tq);
5407 tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
5408 if (rval == FC_TRAN_BUSY) {
5409 if (tq->d_id.b24 != BROADCAST_ADDR) {
5410 tq->flags |= TQF_NEED_AUTHENTICATION;
5411 }
5412 }
5413 DEVICE_QUEUE_UNLOCK(tq);
5414 }
5415
5416 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5417 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5418
5419 if (rval != FC_SUCCESS) {
5420 EL(ha, "failed, rval = %xh\n", rval);
5421 } else {
5422 /*EMPTY*/
5423 QL_PRINT_3(ha, "done\n");
5424 }
5425 return (rval);
5426 }
5427
5428 /*
5429 * ql_p2p_plogi
5430 * Start an extended link service port login request using
5431 * an ELS Passthru iocb.
5432 *
5433 * Input:
5434 * ha = adapter state pointer.
5435 * pkt = pointer to fc_packet.
5436 *
5437 * Returns:
5438 * QL_CONSUMMED - the iocb was queued for transport.
5439 *
5440 * Context:
5441 * Kernel context.
5442 */
5443 static int
5444 ql_p2p_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5445 {
5446 uint16_t id;
5447 ql_tgt_t tmp;
5448 ql_tgt_t *tq = &tmp;
5449 int rval;
5450 port_id_t d_id;
5451 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
5452 uint16_t loop_id;
5453
5454 tq->d_id.b.al_pa = 0;
5455 tq->d_id.b.area = 0;
5456 tq->d_id.b.domain = 0;
5457
5458 /*
5459 * Verify that the port database hasn't moved beneath our feet by
5460 * switching to the appropriate n_port_handle if necessary. This is
5461 * less unplesant than the error recovery if the wrong one is used.
5462 */
5463 for (id = 0; id <= LAST_LOCAL_LOOP_ID; id++) {
5464 tq->loop_id = id;
5465 rval = ql_get_port_database(ha, tq, PDF_NONE);
5466 EL(ha, "rval=%xh, id=%x\n", rval, id);
5467 /* check all the ones not logged in for possible use */
5468 if (rval == QL_NOT_LOGGED_IN) {
5469 if (tq->master_state == PD_STATE_PLOGI_PENDING) {
5470 ha->n_port->n_port_handle = tq->loop_id;
5471 EL(ha, "loop_id=%xh, master state=%x\n",
5472 tq->loop_id, tq->master_state);
5473 break;
5474 }
5475 /*
5476 * Use a 'port unavailable' entry only
5477 * if we used it before.
5478 */
5479 if (tq->master_state == PD_STATE_PORT_UNAVAILABLE) {
5480 /* if the port_id matches, reuse it */
5481 if (pkt->pkt_cmd_fhdr.d_id == tq->d_id.b24) {
5482 EL(ha, "n_port_handle loop_id=%xh, "
5483 "master state=%xh\n",
5484 tq->loop_id, tq->master_state);
5485 break;
5486 } else if (tq->loop_id ==
5487 ha->n_port->n_port_handle) {
5488 /* avoid a lint error */
5489 uint16_t *hndl;
5490 uint16_t val;
5491
5492 hndl = &ha->n_port->n_port_handle;
5493 val = *hndl;
5494 val++;
5495 val++;
5496 *hndl = val;
5497 }
5498 EL(ha, "rval=%xh, id=%d, n_port_handle loop_id=%xh, "
5499 "master state=%x\n", rval, id, tq->loop_id,
5500 tq->master_state);
5501 }
5502
5503 }
5504 if (rval == QL_SUCCESS) {
5505 if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
5506 ha->n_port->n_port_handle = tq->loop_id;
5507 EL(ha, "n_port_handle =%xh, master state=%x\n",
5508 tq->loop_id, tq->master_state);
5509 break;
5510 }
5511 EL(ha, "rval=%xh, id=%d, n_port_handle loop_id=%xh, "
5512 "master state=%x\n", rval, id, tq->loop_id,
5513 tq->master_state);
5514 }
5515 }
5516 (void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0, DDI_DMA_SYNC_FORDEV);
5517
5518 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5519
5520 /*
5521 * In case fw does not have the loop id ready, driver assume 0 is
5522 * used since this is p2p and there is only one remote port.
5523 */
5524 if (id == LAST_LOCAL_LOOP_ID + 1) {
5525 EL(ha, "out of range loop id; rval=%xh, id=%xh, d_id=%xh\n",
5526 rval, id, d_id.b24);
5527 } else {
5528 EL(ha, "remote port loop_id '%x' has been logged in, d_id=%x\n",
5529 id, d_id.b24);
5530 }
5531
5532 tq = ql_d_id_to_queue(ha, d_id);
5533
5534 /*
5535 * LV could use any d_id it likes.
5536 * tq may not be available yet.
5537 */
5538 if (tq == NULL) {
5539 if (id != LAST_LOCAL_LOOP_ID + 1) {
5540 loop_id = id;
5541 } else {
5542 loop_id = 0;
5543 }
5544 /* Acquire adapter state lock. */
5545 ADAPTER_STATE_LOCK(ha);
5546
5547 tq = ql_dev_init(ha, d_id, loop_id);
5548
5549 ADAPTER_STATE_UNLOCK(ha);
5550 }
5551
5552 /*
5553 * Lun0 should always allocated since tq is
5554 * derived from lun queue in ql_els_passthru_entry
5555 * in the interrupt handler.
5556 */
5557 sp->lun_queue = ql_lun_queue(ha, tq, 0);
5558
5559 DEVICE_QUEUE_LOCK(tq);
5560 ql_timeout_insert(ha, tq, sp);
5561 DEVICE_QUEUE_UNLOCK(tq);
5562
5563 ql_start_iocb(ha, sp);
5564
5565 return (QL_CONSUMED);
5566 }
5567
5568
5569 /*
5570 * ql_els_flogi
5571 * Issue a extended link service fabric login request.
5572 *
5573 * Input:
5574 * ha = adapter state pointer.
5575 * pkt = pointer to fc_packet.
5576 *
5577 * Returns:
5578 * FC_SUCCESS - the packet was accepted for transport.
5579 * FC_TRANSPORT_ERROR - a transport error occurred.
5580 *
5581 * Context:
5582 * Kernel context.
5583 */
5584 static int
5585 ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5586 {
5587 ql_tgt_t *tq = NULL;
5588 port_id_t d_id;
5589 la_els_logi_t acc;
5590 class_svc_param_t *class3_param;
5591 int rval = FC_SUCCESS;
5592 int accept = 0;
5593
5594 QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
5595
5596 bzero(&acc, sizeof (acc));
5597 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5598
5599 if (CFG_IST(ha, CFG_N2N_SUPPORT) && ha->topology & QL_N_PORT) {
5600 /*
5601 * d_id of zero in a FLOGI accept response in a point to point
5602 * topology triggers evaluation of N Port login initiative.
5603 */
5604 pkt->pkt_resp_fhdr.d_id = 0;
5605 /*
5606 * An N_Port already logged in with the firmware
5607 * will have the only database entry.
5608 */
5609 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
5610 tq = ql_loop_id_to_queue(ha, ha->n_port->n_port_handle);
5611 }
5612
5613 if (tq != NULL) {
5614 /*
5615 * If the target port has initiative send
5616 * up a PLOGI about the new device.
5617 */
5618 if (ql_wwn_cmp(ha, (la_wwn_t *)tq->port_name,
5619 (la_wwn_t *)ha->loginparams.nport_ww_name.raw_wwn)
5620 == 1) {
5621 ha->send_plogi_timer = 3;
5622 } else {
5623 ha->send_plogi_timer = 0;
5624 }
5625 pkt->pkt_resp_fhdr.s_id = tq->d_id.b24;
5626 } else {
5627 /*
5628 * An N_Port not logged in with the firmware will not
5629 * have a database entry. We accept anyway and rely
5630 * on a PLOGI from the upper layers to set the d_id
5631 * and s_id.
5632 */
5633 accept = 1;
5634 }
5635 } else {
5636 tq = ql_d_id_to_queue(ha, d_id);
5637 }
5638 if ((tq != NULL) || (accept != NULL)) {
5639 /* Build ACC. */
5640 pkt->pkt_state = FC_PKT_SUCCESS;
5641 class3_param = (class_svc_param_t *)&acc.class_3;
5642
5643 acc.ls_code.ls_code = LA_ELS_ACC;
5644 acc.common_service.fcph_version = 0x2006;
5645 if (ha->topology & QL_N_PORT) {
5646 /* clear F_Port indicator */
5647 acc.common_service.cmn_features = 0x0800;
5648 } else {
5649 acc.common_service.cmn_features = 0x1b00;
5650 }
5651 acc.common_service.rx_bufsize =
5652 ha->loginparams.common_service.rx_bufsize;
5653 acc.common_service.conc_sequences = 0xff;
5654 acc.common_service.relative_offset = 0x03;
5655 acc.common_service.e_d_tov = 0x7d0;
5656 if (accept) {
5657 /* Use the saved N_Port WWNN and WWPN */
5658 if (ha->n_port != NULL) {
5659 bcopy((void *)&ha->n_port->port_name[0],
5660 (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5661 bcopy((void *)&ha->n_port->node_name[0],
5662 (void *)&acc.node_ww_name.raw_wwn[0], 8);
5663 /* mark service options invalid */
5664 class3_param->class_valid_svc_opt = 0x0800;
5665 } else {
5666 EL(ha, "ha->n_port is NULL\n");
5667 /* Build RJT. */
5668 acc.ls_code.ls_code = LA_ELS_RJT;
5669
5670 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5671 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5672 }
5673 } else {
5674 bcopy((void *)&tq->port_name[0],
5675 (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5676 bcopy((void *)&tq->node_name[0],
5677 (void *)&acc.node_ww_name.raw_wwn[0], 8);
5678
5679 class3_param = (class_svc_param_t *)&acc.class_3;
5680 class3_param->class_valid_svc_opt = 0x8800;
5681 class3_param->recipient_ctl = tq->class3_recipient_ctl;
5682 class3_param->rcv_data_size = tq->class3_rcv_data_size;
5683 class3_param->conc_sequences =
5684 tq->class3_conc_sequences;
5685 class3_param->open_sequences_per_exch =
5686 tq->class3_open_sequences_per_exch;
5687 }
5688 } else {
5689 /* Build RJT. */
5690 acc.ls_code.ls_code = LA_ELS_RJT;
5691
5692 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5693 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5694 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5695 }
5696
5697 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5698 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5699
5700 if (rval != FC_SUCCESS) {
5701 EL(ha, "failed, rval = %xh\n", rval);
5702 } else {
5703 /*EMPTY*/
5704 QL_PRINT_3(ha, "done\n");
5705 }
5706 return (rval);
5707 }
5708
5709 /*
5710 * ql_els_logo
5711 * Issue a extended link service logout request.
5712 *
5713 * Input:
5714 * ha = adapter state pointer.
5715 * pkt = pointer to fc_packet.
5716 *
5717 * Returns:
5718 * FC_SUCCESS - the packet was accepted for transport.
5719 * FC_TRANSPORT_ERROR - a transport error occurred.
5720 *
5721 * Context:
5722 * Kernel context.
5723 */
5724 static int
5725 ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5726 {
5727 port_id_t d_id;
5728 ql_tgt_t *tq;
5729 la_els_logo_t acc;
5730
5731 QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
5732
5733 bzero(&acc, sizeof (acc));
5734 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5735
5736 tq = ql_d_id_to_queue(ha, d_id);
5737 if (tq) {
5738 DEVICE_QUEUE_LOCK(tq);
5739 if (tq->d_id.b24 == BROADCAST_ADDR) {
5740 DEVICE_QUEUE_UNLOCK(tq);
5741 return (FC_SUCCESS);
5742 }
5743
5744 tq->flags |= TQF_NEED_AUTHENTICATION;
5745
5746 do {
5747 DEVICE_QUEUE_UNLOCK(tq);
5748 (void) ql_abort_device(ha, tq, 1);
5749
5750 /*
5751 * Wait for commands to drain in F/W (doesn't
5752 * take more than a few milliseconds)
5753 */
5754 ql_delay(ha, 10000);
5755
5756 DEVICE_QUEUE_LOCK(tq);
5757 } while (tq->outcnt);
5758
5759 DEVICE_QUEUE_UNLOCK(tq);
5760 }
5761
5762 if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5763 /* Build ACC. */
5764 acc.ls_code.ls_code = LA_ELS_ACC;
5765
5766 pkt->pkt_state = FC_PKT_SUCCESS;
5767 } else {
5768 /* Build RJT. */
5769 acc.ls_code.ls_code = LA_ELS_RJT;
5770
5771 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5772 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5773 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5774 }
5775
5776 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5777 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5778
5779 QL_PRINT_3(ha, "done\n");
5780
5781 return (FC_SUCCESS);
5782 }
5783
5784 /*
5785 * ql_els_prli
5786 * Issue a extended link service process login request.
5787 *
5788 * Input:
5789 * ha = adapter state pointer.
5790 * pkt = pointer to fc_packet.
5791 *
5792 * Returns:
5793 * FC_SUCCESS - the packet was accepted for transport.
5794 * FC_TRANSPORT_ERROR - a transport error occurred.
5795 *
5796 * Context:
5797 * Kernel context.
5798 */
5799 static int
5800 ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5801 {
5802 ql_tgt_t *tq;
5803 port_id_t d_id;
5804 la_els_prli_t acc;
5805 prli_svc_param_t *param;
5806 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
5807 int rval = FC_SUCCESS;
5808
5809 QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
5810
5811 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5812
5813 tq = ql_d_id_to_queue(ha, d_id);
5814 if (tq != NULL) {
5815 (void) ql_get_port_database(ha, tq, PDF_NONE);
5816
5817 if ((ha->topology & QL_N_PORT) &&
5818 (tq->master_state == PD_STATE_PLOGI_COMPLETED)) {
5819
5820 /* always set lun_queue */
5821 sp->lun_queue = ql_lun_queue(ha, tq, 0);
5822
5823 DEVICE_QUEUE_LOCK(tq);
5824 ql_timeout_insert(ha, tq, sp);
5825 DEVICE_QUEUE_UNLOCK(tq);
5826 ql_start_iocb(ha, sp);
5827 rval = QL_CONSUMED;
5828 } else {
5829 /* Build ACC. */
5830 bzero(&acc, sizeof (acc));
5831 acc.ls_code = LA_ELS_ACC;
5832 acc.page_length = 0x10;
5833 acc.payload_length = tq->prli_payload_length;
5834
5835 param = (prli_svc_param_t *)&acc.service_params[0];
5836 param->type = 0x08;
5837 param->rsvd = 0x00;
5838 param->process_assoc_flags = tq->prli_svc_param_word_0;
5839 param->process_flags = tq->prli_svc_param_word_3;
5840
5841 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5842 (uint8_t *)pkt->pkt_resp, sizeof (acc),
5843 DDI_DEV_AUTOINCR);
5844
5845 pkt->pkt_state = FC_PKT_SUCCESS;
5846 }
5847 } else {
5848 /* in case of P2P, tq might not have been created yet */
5849 if (ha->topology & QL_N_PORT) {
5850
5851 /* Acquire adapter state lock. */
5852 ADAPTER_STATE_LOCK(ha);
5853 tq = ql_dev_init(ha, d_id, ha->n_port->n_port_handle);
5854 ADAPTER_STATE_UNLOCK(ha);
5855
5856 /* always alloc lun #0 */
5857 sp->lun_queue = ql_lun_queue(ha, tq, 0);
5858 bcopy((void *)&ha->n_port->port_name[0],
5859 (void *) &tq->port_name[0], 8);
5860 bcopy((void *)&ha->n_port->node_name[0],
5861 (void *) &tq->node_name[0], 8);
5862
5863 DEVICE_QUEUE_LOCK(tq);
5864 ql_timeout_insert(ha, tq, sp);
5865 DEVICE_QUEUE_UNLOCK(tq);
5866
5867 ql_start_iocb(ha, sp);
5868 rval = QL_CONSUMED;
5869
5870 } else {
5871
5872 la_els_rjt_t rjt;
5873
5874 /* Build RJT. */
5875 bzero(&rjt, sizeof (rjt));
5876 rjt.ls_code.ls_code = LA_ELS_RJT;
5877
5878 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5879 (uint8_t *)pkt->pkt_resp, sizeof (rjt),
5880 DDI_DEV_AUTOINCR);
5881
5882 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5883 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5884 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5885 }
5886 }
5887
5888 if ((rval != FC_SUCCESS) && (rval != QL_CONSUMED)) {
5889 EL(ha, "failed, rval = %xh\n", rval);
5890 } else {
5891 /*EMPTY*/
5892 QL_PRINT_3(ha, "done\n");
5893 }
5894 return (rval);
5895 }
5896
5897 /*
5898 * ql_els_prlo
5899 * Issue a extended link service process logout request.
5900 *
5901 * Input:
5902 * ha = adapter state pointer.
5903 * pkt = pointer to fc_packet.
5904 *
5905 * Returns:
5906 * FC_SUCCESS - the packet was accepted for transport.
5907 * FC_TRANSPORT_ERROR - a transport error occurred.
5908 *
5909 * Context:
5910 * Kernel context.
5911 */
5912 /* ARGSUSED */
5913 static int
5914 ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5915 {
5916 la_els_prli_t acc;
5917
5918 QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
5919
5920 /* Build ACC. */
5921 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5922 (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5923
5924 acc.ls_code = LA_ELS_ACC;
5925 acc.service_params[2] = 1;
5926
5927 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5928 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5929
5930 pkt->pkt_state = FC_PKT_SUCCESS;
5931
5932 QL_PRINT_3(ha, "done\n");
5933
5934 return (FC_SUCCESS);
5935 }
5936
5937 /*
5938 * ql_els_adisc
5939 * Issue a extended link service address discovery request.
5940 *
5941 * Input:
5942 * ha = adapter state pointer.
5943 * pkt = pointer to fc_packet.
5944 *
5945 * Returns:
5946 * FC_SUCCESS - the packet was accepted for transport.
5947 * FC_TRANSPORT_ERROR - a transport error occurred.
5948 *
5949 * Context:
5950 * Kernel context.
5951 */
5952 static int
5953 ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5954 {
5955 ql_dev_id_list_t *list;
5956 uint32_t list_size;
5957 ql_link_t *link;
5958 ql_tgt_t *tq;
5959 ql_lun_t *lq;
5960 port_id_t d_id;
5961 la_els_adisc_t acc;
5962 uint16_t index, loop_id;
5963 ql_mbx_data_t mr;
5964
5965 QL_PRINT_3(ha, "started\n");
5966
5967 bzero(&acc, sizeof (acc));
5968 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5969
5970 /*
5971 * MBC_GET_PORT_DATABASE causes ADISC to go out to
5972 * the device from the firmware
5973 */
5974 index = ql_alpa_to_index[d_id.b.al_pa];
5975 tq = NULL;
5976 for (link = ha->dev[index].first; link != NULL; link = link->next) {
5977 tq = link->base_address;
5978 if (tq->d_id.b24 == d_id.b24) {
5979 break;
5980 } else {
5981 tq = NULL;
5982 }
5983 }
5984
5985 if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5986 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
5987 list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
5988
5989 if (list != NULL &&
5990 ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
5991 QL_SUCCESS) {
5992
5993 for (index = 0; index < mr.mb[1]; index++) {
5994 ql_dev_list(ha, list, index, &d_id, &loop_id);
5995
5996 if (tq->d_id.b24 == d_id.b24) {
5997 tq->loop_id = loop_id;
5998 break;
5999 }
6000 }
6001 } else {
6002 cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
6003 QL_NAME, ha->instance, d_id.b24);
6004 tq = NULL;
6005 }
6006 if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
6007 cmn_err(CE_WARN, "!%s(%d) no loop_id for adisc %xh",
6008 QL_NAME, ha->instance, tq->d_id.b24);
6009 tq = NULL;
6010 }
6011
6012 if (list != NULL) {
6013 kmem_free(list, list_size);
6014 }
6015 }
6016
6017 if ((tq != NULL) && (VALID_DEVICE_ID(ha, tq->loop_id)) &&
6018 ql_get_port_database(ha, tq, PDF_ADISC) == QL_SUCCESS) {
6019
6020 /* Build ACC. */
6021
6022 DEVICE_QUEUE_LOCK(tq);
6023 tq->flags &= ~TQF_NEED_AUTHENTICATION;
6024 if (tq->prli_svc_param_word_3 & PRLI_W3_RETRY) {
6025 for (link = tq->lun_queues.first; link != NULL;
6026 link = link->next) {
6027 lq = link->base_address;
6028
6029 if (lq->cmd.first != NULL) {
6030 ql_next(ha, lq);
6031 DEVICE_QUEUE_LOCK(tq);
6032 }
6033 }
6034 }
6035 DEVICE_QUEUE_UNLOCK(tq);
6036
6037 acc.ls_code.ls_code = LA_ELS_ACC;
6038 acc.hard_addr.hard_addr = tq->hard_addr.b24;
6039
6040 bcopy((void *)&tq->port_name[0],
6041 (void *)&acc.port_wwn.raw_wwn[0], 8);
6042 bcopy((void *)&tq->node_name[0],
6043 (void *)&acc.node_wwn.raw_wwn[0], 8);
6044
6045 acc.nport_id.port_id = tq->d_id.b24;
6046
6047 pkt->pkt_state = FC_PKT_SUCCESS;
6048 } else {
6049 /* Build RJT. */
6050 acc.ls_code.ls_code = LA_ELS_RJT;
6051
6052 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6053 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6054 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6055 }
6056
6057 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6058 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6059
6060 QL_PRINT_3(ha, "done\n");
6061
6062 return (FC_SUCCESS);
6063 }
6064
6065 /*
6066 * ql_els_linit
6067 * Issue a extended link service loop initialize request.
6068 *
6069 * Input:
6070 * ha = adapter state pointer.
6071 * pkt = pointer to fc_packet.
6072 *
6073 * Returns:
6074 * FC_SUCCESS - the packet was accepted for transport.
6075 * FC_TRANSPORT_ERROR - a transport error occurred.
6076 *
6077 * Context:
6078 * Kernel context.
6079 */
6080 static int
6081 ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
6082 {
6083 ddi_dma_cookie_t *cp;
6084 uint32_t cnt;
6085 conv_num_t n;
6086 port_id_t d_id;
6087
6088 QL_PRINT_3(ha, "started\n");
6089
6090 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6091 if (ha->topology & QL_FABRIC_CONNECTION) {
6092 fc_linit_req_t els;
6093 lfa_cmd_t lfa;
6094
6095 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
6096 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
6097
6098 /* Setup LFA mailbox command data. */
6099 bzero((void *)&lfa, sizeof (lfa_cmd_t));
6100
6101 lfa.resp_buffer_length[0] = 4;
6102
6103 cp = pkt->pkt_resp_cookie;
6104 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
6105 n.size64 = (uint64_t)cp->dmac_laddress;
6106 LITTLE_ENDIAN_64(&n.size64);
6107 } else {
6108 n.size32[0] = LSD(cp->dmac_laddress);
6109 LITTLE_ENDIAN_32(&n.size32[0]);
6110 n.size32[1] = MSD(cp->dmac_laddress);
6111 LITTLE_ENDIAN_32(&n.size32[1]);
6112 }
6113
6114 /* Set buffer address. */
6115 for (cnt = 0; cnt < 8; cnt++) {
6116 lfa.resp_buffer_address[cnt] = n.size8[cnt];
6117 }
6118
6119 lfa.subcommand_length[0] = 4;
6120 n.size32[0] = d_id.b24;
6121 LITTLE_ENDIAN_32(&n.size32[0]);
6122 lfa.addr[0] = n.size8[0];
6123 lfa.addr[1] = n.size8[1];
6124 lfa.addr[2] = n.size8[2];
6125 lfa.subcommand[1] = 0x70;
6126 lfa.payload[2] = els.func;
6127 lfa.payload[4] = els.lip_b3;
6128 lfa.payload[5] = els.lip_b4;
6129
6130 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
6131 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6132 } else {
6133 pkt->pkt_state = FC_PKT_SUCCESS;
6134 }
6135 } else {
6136 fc_linit_resp_t rjt;
6137
6138 /* Build RJT. */
6139 bzero(&rjt, sizeof (rjt));
6140 rjt.ls_code.ls_code = LA_ELS_RJT;
6141
6142 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
6143 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
6144
6145 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6146 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6147 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6148 }
6149
6150 QL_PRINT_3(ha, "done\n");
6151
6152 return (FC_SUCCESS);
6153 }
6154
6155 /*
6156 * ql_els_lpc
6157 * Issue a extended link service loop control request.
6158 *
6159 * Input:
6160 * ha = adapter state pointer.
6161 * pkt = pointer to fc_packet.
6162 *
6163 * Returns:
6164 * FC_SUCCESS - the packet was accepted for transport.
6165 * FC_TRANSPORT_ERROR - a transport error occurred.
6166 *
6167 * Context:
6168 * Kernel context.
6169 */
6170 static int
6171 ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
6172 {
6173 ddi_dma_cookie_t *cp;
6174 uint32_t cnt;
6175 conv_num_t n;
6176 port_id_t d_id;
6177
6178 QL_PRINT_3(ha, "started\n");
6179
6180 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6181 if (ha->topology & QL_FABRIC_CONNECTION) {
6182 ql_lpc_t els;
6183 lfa_cmd_t lfa;
6184
6185 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
6186 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
6187
6188 /* Setup LFA mailbox command data. */
6189 bzero((void *)&lfa, sizeof (lfa_cmd_t));
6190
6191 lfa.resp_buffer_length[0] = 4;
6192
6193 cp = pkt->pkt_resp_cookie;
6194 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
6195 n.size64 = (uint64_t)(cp->dmac_laddress);
6196 LITTLE_ENDIAN_64(&n.size64);
6197 } else {
6198 n.size32[0] = cp->dmac_address;
6199 LITTLE_ENDIAN_32(&n.size32[0]);
6200 n.size32[1] = 0;
6201 }
6202
6203 /* Set buffer address. */
6204 for (cnt = 0; cnt < 8; cnt++) {
6205 lfa.resp_buffer_address[cnt] = n.size8[cnt];
6206 }
6207
6208 lfa.subcommand_length[0] = 20;
6209 n.size32[0] = d_id.b24;
6210 LITTLE_ENDIAN_32(&n.size32[0]);
6211 lfa.addr[0] = n.size8[0];
6212 lfa.addr[1] = n.size8[1];
6213 lfa.addr[2] = n.size8[2];
6214 lfa.subcommand[1] = 0x71;
6215 lfa.payload[4] = els.port_control;
6216 bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 16);
6217
6218 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
6219 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6220 } else {
6221 pkt->pkt_state = FC_PKT_SUCCESS;
6222 }
6223 } else {
6224 ql_lpc_resp_t rjt;
6225
6226 /* Build RJT. */
6227 bzero(&rjt, sizeof (rjt));
6228 rjt.ls_code.ls_code = LA_ELS_RJT;
6229
6230 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
6231 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
6232
6233 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6234 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6235 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6236 }
6237
6238 QL_PRINT_3(ha, "done\n");
6239
6240 return (FC_SUCCESS);
6241 }
6242
6243 /*
6244 * ql_els_lsts
6245 * Issue a extended link service loop status request.
6246 *
6247 * Input:
6248 * ha = adapter state pointer.
6249 * pkt = pointer to fc_packet.
6250 *
6251 * Returns:
6252 * FC_SUCCESS - the packet was accepted for transport.
6253 * FC_TRANSPORT_ERROR - a transport error occurred.
6254 *
6255 * Context:
6256 * Kernel context.
6257 */
6258 static int
6259 ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
6260 {
6261 ddi_dma_cookie_t *cp;
6262 uint32_t cnt;
6263 conv_num_t n;
6264 port_id_t d_id;
6265
6266 QL_PRINT_3(ha, "started\n");
6267
6268 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6269 if (ha->topology & QL_FABRIC_CONNECTION) {
6270 fc_lsts_req_t els;
6271 lfa_cmd_t lfa;
6272
6273 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
6274 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
6275
6276 /* Setup LFA mailbox command data. */
6277 bzero((void *)&lfa, sizeof (lfa_cmd_t));
6278
6279 lfa.resp_buffer_length[0] = 84;
6280
6281 cp = pkt->pkt_resp_cookie;
6282 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
6283 n.size64 = cp->dmac_laddress;
6284 LITTLE_ENDIAN_64(&n.size64);
6285 } else {
6286 n.size32[0] = cp->dmac_address;
6287 LITTLE_ENDIAN_32(&n.size32[0]);
6288 n.size32[1] = 0;
6289 }
6290
6291 /* Set buffer address. */
6292 for (cnt = 0; cnt < 8; cnt++) {
6293 lfa.resp_buffer_address[cnt] = n.size8[cnt];
6294 }
6295
6296 lfa.subcommand_length[0] = 2;
6297 n.size32[0] = d_id.b24;
6298 LITTLE_ENDIAN_32(&n.size32[0]);
6299 lfa.addr[0] = n.size8[0];
6300 lfa.addr[1] = n.size8[1];
6301 lfa.addr[2] = n.size8[2];
6302 lfa.subcommand[1] = 0x72;
6303
6304 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
6305 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6306 } else {
6307 pkt->pkt_state = FC_PKT_SUCCESS;
6308 }
6309 } else {
6310 fc_lsts_resp_t rjt;
6311
6312 /* Build RJT. */
6313 bzero(&rjt, sizeof (rjt));
6314 rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
6315
6316 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
6317 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
6318
6319 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6320 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6321 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6322 }
6323
6324 QL_PRINT_3(ha, "done\n");
6325
6326 return (FC_SUCCESS);
6327 }
6328
6329 /*
6330 * ql_els_scr
6331 * Issue a extended link service state change registration request.
6332 *
6333 * Input:
6334 * ha = adapter state pointer.
6335 * pkt = pointer to fc_packet.
6336 *
6337 * Returns:
6338 * FC_SUCCESS - the packet was accepted for transport.
6339 * FC_TRANSPORT_ERROR - a transport error occurred.
6340 *
6341 * Context:
6342 * Kernel context.
6343 */
6344 static int
6345 ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
6346 {
6347 fc_scr_resp_t acc;
6348
6349 QL_PRINT_3(ha, "started\n");
6350
6351 bzero(&acc, sizeof (acc));
6352 if (ha->topology & QL_FABRIC_CONNECTION) {
6353 fc_scr_req_t els;
6354
6355 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
6356 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
6357
6358 if (ql_send_change_request(ha, els.scr_func) ==
6359 QL_SUCCESS) {
6360 /* Build ACC. */
6361 acc.scr_acc = LA_ELS_ACC;
6362
6363 pkt->pkt_state = FC_PKT_SUCCESS;
6364 } else {
6365 /* Build RJT. */
6366 acc.scr_acc = LA_ELS_RJT;
6367
6368 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6369 pkt->pkt_reason = FC_REASON_HW_ERROR;
6370 EL(ha, "LA_ELS_RJT, FC_REASON_HW_ERROR\n");
6371 }
6372 } else {
6373 /* Build RJT. */
6374 acc.scr_acc = LA_ELS_RJT;
6375
6376 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6377 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6378 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6379 }
6380
6381 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6382 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6383
6384 QL_PRINT_3(ha, "done\n");
6385
6386 return (FC_SUCCESS);
6387 }
6388
6389 /*
6390 * ql_els_rscn
6391 * Issue a extended link service register state
6392 * change notification request.
6393 *
6394 * Input:
6395 * ha = adapter state pointer.
6396 * pkt = pointer to fc_packet.
6397 *
6398 * Returns:
6399 * FC_SUCCESS - the packet was accepted for transport.
6400 * FC_TRANSPORT_ERROR - a transport error occurred.
6401 *
6402 * Context:
6403 * Kernel context.
6404 */
6405 static int
6406 ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
6407 {
6408 ql_rscn_resp_t acc;
6409
6410 QL_PRINT_3(ha, "started\n");
6411
6412 bzero(&acc, sizeof (acc));
6413 if (ha->topology & QL_FABRIC_CONNECTION) {
6414 /* Build ACC. */
6415 acc.scr_acc = LA_ELS_ACC;
6416
6417 pkt->pkt_state = FC_PKT_SUCCESS;
6418 } else {
6419 /* Build RJT. */
6420 acc.scr_acc = LA_ELS_RJT;
6421
6422 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6423 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6424 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6425 }
6426
6427 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6428 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6429
6430 QL_PRINT_3(ha, "done\n");
6431
6432 return (FC_SUCCESS);
6433 }
6434
6435 /*
6436 * ql_els_farp_req
6437 * Issue FC Address Resolution Protocol (FARP)
6438 * extended link service request.
6439 *
6440 * Note: not supported.
6441 *
6442 * Input:
6443 * ha = adapter state pointer.
6444 * pkt = pointer to fc_packet.
6445 *
6446 * Returns:
6447 * FC_SUCCESS - the packet was accepted for transport.
6448 * FC_TRANSPORT_ERROR - a transport error occurred.
6449 *
6450 * Context:
6451 * Kernel context.
6452 */
6453 /* ARGSUSED */
6454 static int
6455 ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
6456 {
6457 ql_acc_rjt_t acc;
6458
6459 QL_PRINT_3(ha, "started\n");
6460
6461 bzero(&acc, sizeof (acc));
6462
6463 /* Build ACC. */
6464 acc.ls_code.ls_code = LA_ELS_ACC;
6465
6466 pkt->pkt_state = FC_PKT_SUCCESS;
6467
6468 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6469 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6470
6471 QL_PRINT_3(ha, "done\n");
6472
6473 return (FC_SUCCESS);
6474 }
6475
6476 /*
6477 * ql_els_farp_reply
6478 * Issue FC Address Resolution Protocol (FARP)
6479 * extended link service reply.
6480 *
6481 * Note: not supported.
6482 *
6483 * Input:
6484 * ha = adapter state pointer.
6485 * pkt = pointer to fc_packet.
6486 *
6487 * Returns:
6488 * FC_SUCCESS - the packet was accepted for transport.
6489 * FC_TRANSPORT_ERROR - a transport error occurred.
6490 *
6491 * Context:
6492 * Kernel context.
6493 */
6494 /* ARGSUSED */
6495 static int
6496 ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
6497 {
6498 ql_acc_rjt_t acc;
6499
6500 QL_PRINT_3(ha, "started\n");
6501
6502 bzero(&acc, sizeof (acc));
6503
6504 /* Build ACC. */
6505 acc.ls_code.ls_code = LA_ELS_ACC;
6506
6507 pkt->pkt_state = FC_PKT_SUCCESS;
6508
6509 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6510 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6511
6512 QL_PRINT_3(ha, "done\n");
6513
6514 return (FC_SUCCESS);
6515 }
6516
6517 static int
6518 ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
6519 {
6520 uchar_t *rnid_acc;
6521 port_id_t d_id;
6522 ql_link_t *link;
6523 ql_tgt_t *tq;
6524 uint16_t index;
6525 la_els_rnid_acc_t acc;
6526 la_els_rnid_t *req;
6527 size_t req_len;
6528
6529 QL_PRINT_3(ha, "started\n");
6530
6531 req_len = FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
6532 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6533 index = ql_alpa_to_index[d_id.b.al_pa];
6534
6535 tq = NULL;
6536 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6537 tq = link->base_address;
6538 if (tq->d_id.b24 == d_id.b24) {
6539 break;
6540 } else {
6541 tq = NULL;
6542 }
6543 }
6544
6545 /* Allocate memory for rnid status block */
6546 rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
6547
6548 bzero(&acc, sizeof (acc));
6549
6550 req = (la_els_rnid_t *)pkt->pkt_cmd;
6551 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6552 (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
6553 (caddr_t)rnid_acc) != QL_SUCCESS)) {
6554
6555 kmem_free(rnid_acc, req_len);
6556 acc.ls_code.ls_code = LA_ELS_RJT;
6557
6558 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6559 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6560
6561 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6562 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6563 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6564
6565 return (FC_FAILURE);
6566 }
6567
6568 acc.ls_code.ls_code = LA_ELS_ACC;
6569 bcopy(rnid_acc, &acc.hdr, sizeof (fc_rnid_hdr_t));
6570 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6571 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6572
6573 kmem_free(rnid_acc, req_len);
6574 pkt->pkt_state = FC_PKT_SUCCESS;
6575
6576 QL_PRINT_3(ha, "done\n");
6577
6578 return (FC_SUCCESS);
6579 }
6580
6581 static int
6582 ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
6583 {
6584 fc_rls_acc_t *rls_acc;
6585 port_id_t d_id;
6586 ql_link_t *link;
6587 ql_tgt_t *tq;
6588 uint16_t index;
6589 la_els_rls_acc_t acc;
6590
6591 QL_PRINT_3(ha, "started\n");
6592
6593 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6594 index = ql_alpa_to_index[d_id.b.al_pa];
6595
6596 tq = NULL;
6597 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6598 tq = link->base_address;
6599 if (tq->d_id.b24 == d_id.b24) {
6600 break;
6601 } else {
6602 tq = NULL;
6603 }
6604 }
6605
6606 /* Allocate memory for link error status block */
6607 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
6608
6609 bzero(&acc, sizeof (la_els_rls_acc_t));
6610
6611 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6612 (ql_get_link_status(ha, tq->loop_id, sizeof (*rls_acc),
6613 (caddr_t)rls_acc, 0) != QL_SUCCESS)) {
6614
6615 kmem_free(rls_acc, sizeof (*rls_acc));
6616 acc.ls_code.ls_code = LA_ELS_RJT;
6617
6618 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6619 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6620
6621 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6622 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6623 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6624
6625 return (FC_FAILURE);
6626 }
6627
6628 LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
6629 LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
6630 LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
6631 LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
6632 LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
6633
6634 acc.ls_code.ls_code = LA_ELS_ACC;
6635 acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
6636 acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
6637 acc.rls_link_params.rls_sig_loss = rls_acc->rls_sig_loss;
6638 acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
6639 acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
6640 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6641 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6642
6643 kmem_free(rls_acc, sizeof (*rls_acc));
6644 pkt->pkt_state = FC_PKT_SUCCESS;
6645
6646 QL_PRINT_3(ha, "done\n");
6647
6648 return (FC_SUCCESS);
6649 }
6650
6651 static int
6652 ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
6653 {
6654 port_id_t d_id;
6655 ql_srb_t *sp;
6656 fc_unsol_buf_t *ubp;
6657 ql_link_t *link, *next_link;
6658 int rval = FC_SUCCESS;
6659 int cnt = 5;
6660
6661 QL_PRINT_3(ha, "started\n");
6662
6663 /*
6664 * we need to ensure that q->outcnt == 0, otherwise
6665 * any cmd completed with PKT_PORT_OFFLINE after PLOGI
6666 * will confuse ulps.
6667 */
6668
6669 DEVICE_QUEUE_LOCK(tq);
6670 do {
6671 /*
6672 * wait for the cmds to get drained. If they
6673 * don't get drained then the transport will
6674 * retry PLOGI after few secs.
6675 */
6676 if (tq->outcnt != 0) {
6677 rval = FC_TRAN_BUSY;
6678 DEVICE_QUEUE_UNLOCK(tq);
6679 ql_delay(ha, 10000);
6680 DEVICE_QUEUE_LOCK(tq);
6681 cnt--;
6682 if (!cnt) {
6683 cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
6684 " for %xh outcount %xh", QL_NAME,
6685 ha->instance, tq->d_id.b24, tq->outcnt);
6686 }
6687 } else {
6688 rval = FC_SUCCESS;
6689 break;
6690 }
6691 } while (cnt > 0);
6692 DEVICE_QUEUE_UNLOCK(tq);
6693
6694 /*
6695 * return, if busy or if the plogi was asynchronous.
6696 */
6697 if ((rval != FC_SUCCESS) ||
6698 (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
6699 pkt->pkt_comp)) {
6700 QL_PRINT_3(ha, "done, busy or async\n");
6701 return (rval);
6702 }
6703
6704 /*
6705 * Let us give daemon sufficient time and hopefully
6706 * when transport retries PLOGI, it would have flushed
6707 * callback queue.
6708 */
6709 TASK_DAEMON_LOCK(ha);
6710 for (link = ha->unsol_callback_queue.first; link != NULL;
6711 link = next_link) {
6712 next_link = link->next;
6713 sp = link->base_address;
6714 if (sp->flags & SRB_UB_CALLBACK) {
6715 ubp = ha->ub_array[sp->handle];
6716 d_id.b24 = ubp->ub_frame.s_id;
6717 } else {
6718 d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
6719 }
6720 if (tq->d_id.b24 == d_id.b24) {
6721 cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
6722 ha->instance, tq->d_id.b24);
6723 rval = FC_TRAN_BUSY;
6724 break;
6725 }
6726 }
6727 TASK_DAEMON_UNLOCK(ha);
6728
6729 QL_PRINT_3(ha, "done\n");
6730
6731 return (rval);
6732 }
6733
6734 /*
6735 * ql_login_port
6736 * Logs in a device if not already logged in.
6737 *
6738 * Input:
6739 * ha = adapter state pointer.
6740 * d_id = 24 bit port ID.
6741 * DEVICE_QUEUE_LOCK must be released.
6742 *
6743 * Returns:
6744 * QL local function return status code.
6745 *
6746 * Context:
6747 * Kernel context.
6748 */
6749 static int
6750 ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6751 {
6752 ql_adapter_state_t *vha;
6753 ql_link_t *link;
6754 uint16_t index;
6755 ql_tgt_t *tq, *tq2;
6756 uint16_t loop_id, first_loop_id, last_loop_id;
6757 int rval = QL_SUCCESS;
6758
6759 QL_PRINT_3(ha, "started, d_id=%xh\n", d_id.b24);
6760
6761 /* Do not login vports */
6762 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6763 if (vha->d_id.b24 == d_id.b24) {
6764 EL(ha, "failed=%xh, d_id=%xh vp_index=%xh\n",
6765 QL_FUNCTION_FAILED, d_id.b24, vha->vp_index);
6766 return (QL_FUNCTION_FAILED);
6767 }
6768 }
6769
6770 /* Get head queue index. */
6771 index = ql_alpa_to_index[d_id.b.al_pa];
6772
6773 /* Check for device already has a queue. */
6774 tq = NULL;
6775 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6776 tq = link->base_address;
6777 if (tq->d_id.b24 == d_id.b24) {
6778 loop_id = tq->loop_id;
6779 break;
6780 } else {
6781 tq = NULL;
6782 }
6783 }
6784
6785 /* Let's stop issuing any IO and unsolicited logo */
6786 if ((tq != NULL) && (!(ddi_in_panic()))) {
6787 DEVICE_QUEUE_LOCK(tq);
6788 tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6789 tq->flags &= ~TQF_RSCN_RCVD;
6790 DEVICE_QUEUE_UNLOCK(tq);
6791 }
6792 if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6793 !(tq->flags & TQF_FABRIC_DEVICE)) {
6794 loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6795 }
6796
6797 /* Special case for Nameserver */
6798 if (d_id.b24 == FS_NAME_SERVER) {
6799 if (!(ha->topology & QL_FABRIC_CONNECTION)) {
6800 EL(ha, "failed=%xh, d_id=%xh no fabric\n",
6801 QL_FUNCTION_FAILED, d_id.b24);
6802 return (QL_FUNCTION_FAILED);
6803 }
6804
6805 loop_id = (uint16_t)(CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
6806 SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6807 if (tq == NULL) {
6808 ADAPTER_STATE_LOCK(ha);
6809 tq = ql_dev_init(ha, d_id, loop_id);
6810 ADAPTER_STATE_UNLOCK(ha);
6811 if (tq == NULL) {
6812 EL(ha, "failed=%xh, d_id=%xh\n",
6813 QL_FUNCTION_FAILED, d_id.b24);
6814 return (QL_FUNCTION_FAILED);
6815 }
6816 }
6817 if (!(CFG_IST(ha, CFG_CTRL_82XX))) {
6818 rval = ql_login_fabric_port(ha, tq, loop_id);
6819 if (rval == QL_SUCCESS) {
6820 tq->loop_id = loop_id;
6821 tq->flags |= TQF_FABRIC_DEVICE;
6822 (void) ql_get_port_database(ha, tq, PDF_NONE);
6823 }
6824 }
6825 /* Check for device already logged in. */
6826 } else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6827 if (tq->flags & TQF_FABRIC_DEVICE) {
6828 rval = ql_login_fabric_port(ha, tq, loop_id);
6829 if (rval == QL_PORT_ID_USED) {
6830 rval = QL_SUCCESS;
6831 }
6832 } else if (LOCAL_LOOP_ID(loop_id)) {
6833 rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6834 (tq->flags & TQF_INITIATOR_DEVICE ?
6835 LLF_NONE : LLF_PLOGI));
6836 if (rval == QL_SUCCESS) {
6837 DEVICE_QUEUE_LOCK(tq);
6838 tq->loop_id = loop_id;
6839 DEVICE_QUEUE_UNLOCK(tq);
6840 }
6841 }
6842 } else if (ha->topology & QL_FABRIC_CONNECTION) {
6843 /* Locate unused loop ID. */
6844 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
6845 first_loop_id = 0;
6846 last_loop_id = LAST_N_PORT_HDL;
6847 } else if (ha->topology & QL_F_PORT) {
6848 first_loop_id = 0;
6849 last_loop_id = SNS_LAST_LOOP_ID;
6850 } else {
6851 first_loop_id = SNS_FIRST_LOOP_ID;
6852 last_loop_id = SNS_LAST_LOOP_ID;
6853 }
6854
6855 /* Acquire adapter state lock. */
6856 ADAPTER_STATE_LOCK(ha);
6857
6858 tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6859 if (tq == NULL) {
6860 EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6861 d_id.b24);
6862
6863 ADAPTER_STATE_UNLOCK(ha);
6864
6865 return (QL_FUNCTION_FAILED);
6866 }
6867
6868 rval = QL_FUNCTION_FAILED;
6869 loop_id = ha->pha->free_loop_id++;
6870 for (index = (uint16_t)(last_loop_id - first_loop_id); index;
6871 index--) {
6872 if (loop_id < first_loop_id ||
6873 loop_id > last_loop_id) {
6874 loop_id = first_loop_id;
6875 ha->pha->free_loop_id = (uint16_t)
6876 (loop_id + 1);
6877 }
6878
6879 /* Bypass if loop ID used. */
6880 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6881 tq2 = ql_loop_id_to_queue(vha, loop_id);
6882 if (tq2 != NULL && tq2 != tq) {
6883 break;
6884 }
6885 }
6886 if (vha != NULL || RESERVED_LOOP_ID(ha, loop_id) ||
6887 loop_id == ha->loop_id) {
6888 loop_id = ha->pha->free_loop_id++;
6889 continue;
6890 }
6891
6892 ADAPTER_STATE_UNLOCK(ha);
6893 rval = ql_login_fabric_port(ha, tq, loop_id);
6894
6895 /*
6896 * If PORT_ID_USED is returned
6897 * the login_fabric_port() updates
6898 * with the correct loop ID
6899 */
6900 switch (rval) {
6901 case QL_PORT_ID_USED:
6902 /*
6903 * use f/w handle and try to
6904 * login again.
6905 */
6906 ADAPTER_STATE_LOCK(ha);
6907 ha->pha->free_loop_id--;
6908 ADAPTER_STATE_UNLOCK(ha);
6909 loop_id = tq->loop_id;
6910 break;
6911
6912 case QL_SUCCESS:
6913 tq->flags |= TQF_FABRIC_DEVICE;
6914 (void) ql_get_port_database(ha,
6915 tq, PDF_NONE);
6916 index = 1;
6917 break;
6918
6919 case QL_LOOP_ID_USED:
6920 tq->loop_id = PORT_NO_LOOP_ID;
6921 ADAPTER_STATE_LOCK(ha);
6922 loop_id = ha->pha->free_loop_id++;
6923 ADAPTER_STATE_UNLOCK(ha);
6924 break;
6925
6926 case QL_ALL_IDS_IN_USE:
6927 tq->loop_id = PORT_NO_LOOP_ID;
6928 index = 1;
6929 break;
6930
6931 default:
6932 tq->loop_id = PORT_NO_LOOP_ID;
6933 index = 1;
6934 break;
6935 }
6936
6937 ADAPTER_STATE_LOCK(ha);
6938 }
6939
6940 ADAPTER_STATE_UNLOCK(ha);
6941 } else {
6942 rval = QL_FUNCTION_FAILED;
6943 }
6944
6945 if (rval != QL_SUCCESS) {
6946 EL(ha, "failed, rval=%xh, d_id=%xh\n",
6947 rval, d_id.b24);
6948 } else {
6949 EL(ha, "d_id=%xh, loop_id=%xh, "
6950 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6951 tq->loop_id, tq->port_name[0], tq->port_name[1],
6952 tq->port_name[2], tq->port_name[3], tq->port_name[4],
6953 tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6954 }
6955 return (rval);
6956 }
6957
6958 /*
6959 * ql_login_fabric_port
6960 * Issue login fabric port mailbox command.
6961 *
6962 * Input:
6963 * ha: adapter state pointer.
6964 * tq: target queue pointer.
6965 * loop_id: FC Loop ID.
6966 *
6967 * Returns:
6968 * ql local function return status code.
6969 *
6970 * Context:
6971 * Kernel context.
6972 */
6973 static int
6974 ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6975 {
6976 int rval;
6977 int index;
6978 int retry = 0;
6979 port_id_t d_id;
6980 ql_tgt_t *newq;
6981 ql_mbx_data_t mr;
6982
6983 QL_PRINT_3(ha, "started, d_id=%xh\n", tq->d_id.b24);
6984
6985 /*
6986 * QL_PARAMETER_ERROR also means the firmware is not able to allocate
6987 * PCB entry due to resource issues, or collision.
6988 */
6989 do {
6990 rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6991 if ((rval == QL_PARAMETER_ERROR) ||
6992 ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6993 mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6994 retry++;
6995 drv_usecwait(ha->plogi_params->retry_dly_usec);
6996 } else {
6997 break;
6998 }
6999 } while (retry < ha->plogi_params->retry_cnt);
7000
7001 switch (rval) {
7002 case QL_SUCCESS:
7003 tq->loop_id = loop_id;
7004 break;
7005
7006 case QL_PORT_ID_USED:
7007 /*
7008 * This Loop ID should NOT be in use in drivers
7009 */
7010 newq = ql_loop_id_to_queue(ha, mr.mb[1]);
7011
7012 if (newq != NULL && newq != tq && tq->logout_sent == 0) {
7013 cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
7014 "dup loop_id=%xh, d_id=%xh", ha->instance,
7015 newq->loop_id, newq->d_id.b24);
7016 ql_send_logo(ha, newq, NULL);
7017 }
7018
7019 tq->loop_id = mr.mb[1];
7020 break;
7021
7022 case QL_LOOP_ID_USED:
7023 d_id.b.al_pa = LSB(mr.mb[2]);
7024 d_id.b.area = MSB(mr.mb[2]);
7025 d_id.b.domain = LSB(mr.mb[1]);
7026
7027 newq = ql_d_id_to_queue(ha, d_id);
7028 if (newq && (newq->loop_id != loop_id)) {
7029 /*
7030 * This should NEVER ever happen; but this
7031 * code is needed to bail out when the worst
7032 * case happens - or as used to happen before
7033 */
7034 QL_PRINT_2(ha, "Loop ID is now "
7035 "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
7036 "new pairs: [%xh, unknown] and [%xh, %xh]\n",
7037 tq->d_id.b24, loop_id,
7038 newq->d_id.b24, newq->loop_id, tq->d_id.b24,
7039 newq->d_id.b24, loop_id);
7040
7041 if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
7042 ADAPTER_STATE_LOCK(ha);
7043
7044 index = ql_alpa_to_index[newq->d_id.b.al_pa];
7045 ql_add_link_b(&ha->dev[index], &newq->device);
7046
7047 newq->d_id.b24 = d_id.b24;
7048
7049 index = ql_alpa_to_index[d_id.b.al_pa];
7050 ql_add_link_b(&ha->dev[index], &newq->device);
7051
7052 ADAPTER_STATE_UNLOCK(ha);
7053 }
7054
7055 (void) ql_get_port_database(ha, newq, PDF_NONE);
7056
7057 }
7058
7059 /*
7060 * Invalidate the loop ID for the
7061 * us to obtain a new one.
7062 */
7063 tq->loop_id = PORT_NO_LOOP_ID;
7064 break;
7065
7066 case QL_ALL_IDS_IN_USE:
7067 rval = QL_FUNCTION_FAILED;
7068 EL(ha, "no loop id's available\n");
7069 break;
7070
7071 default:
7072 if (rval == QL_COMMAND_ERROR) {
7073 switch (mr.mb[1]) {
7074 case 2:
7075 case 3:
7076 rval = QL_MEMORY_ALLOC_FAILED;
7077 break;
7078
7079 case 0xd:
7080 case 4:
7081 rval = QL_FUNCTION_TIMEOUT;
7082 break;
7083 case 1:
7084 case 5:
7085 case 7:
7086 rval = QL_FABRIC_NOT_INITIALIZED;
7087 break;
7088 default:
7089 EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
7090 break;
7091 }
7092 } else {
7093 cmn_err(CE_WARN, "%s(%d): login fabric port failed"
7094 " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
7095 ha->instance, tq->d_id.b24, rval, mr.mb[1]);
7096 }
7097 break;
7098 }
7099
7100 if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
7101 rval != QL_LOOP_ID_USED) {
7102 EL(ha, "failed=%xh\n", rval);
7103 } else {
7104 /*EMPTY*/
7105 QL_PRINT_3(ha, "done\n");
7106 }
7107 return (rval);
7108 }
7109
7110 /*
7111 * ql_logout_port
7112 * Logs out a device if possible.
7113 *
7114 * Input:
7115 * ha: adapter state pointer.
7116 * d_id: 24 bit port ID.
7117 *
7118 * Returns:
7119 * QL local function return status code.
7120 *
7121 * Context:
7122 * Kernel context.
7123 */
7124 static int
7125 ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
7126 {
7127 ql_link_t *link;
7128 ql_tgt_t *tq;
7129 uint16_t index;
7130
7131 QL_PRINT_3(ha, "started\n");
7132
7133 /* Get head queue index. */
7134 index = ql_alpa_to_index[d_id.b.al_pa];
7135
7136 /* Get device queue. */
7137 tq = NULL;
7138 for (link = ha->dev[index].first; link != NULL; link = link->next) {
7139 tq = link->base_address;
7140 if (tq->d_id.b24 == d_id.b24) {
7141 break;
7142 } else {
7143 tq = NULL;
7144 }
7145 }
7146
7147 if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
7148 (void) ql_logout_fabric_port(ha, tq);
7149 tq->loop_id = PORT_NO_LOOP_ID;
7150 }
7151
7152 QL_PRINT_3(ha, "done\n");
7153
7154 return (QL_SUCCESS);
7155 }
7156
7157 /*
7158 * ql_dev_init
7159 * Initialize/allocate device queue.
7160 *
7161 * Input:
7162 * ha: adapter state pointer.
7163 * d_id: device destination ID
7164 * loop_id: device loop ID
7165 * ADAPTER_STATE_LOCK must be already obtained.
7166 *
7167 * Returns:
7168 * NULL = failure
7169 *
7170 * Context:
7171 * Kernel context.
7172 */
7173 ql_tgt_t *
7174 ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
7175 {
7176 ql_link_t *link;
7177 uint16_t index;
7178 ql_tgt_t *tq;
7179
7180 QL_PRINT_3(ha, "started, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
7181
7182 index = ql_alpa_to_index[d_id.b.al_pa];
7183
7184 /* If device queue exists, set proper loop ID. */
7185 for (link = ha->dev[index].first; link != NULL; link = link->next) {
7186 tq = link->base_address;
7187 if (tq->d_id.b24 == d_id.b24) {
7188 tq->loop_id = loop_id;
7189
7190 /* Reset port down retry count. */
7191 tq->port_down_retry_count = ha->port_down_retry_count;
7192 tq->qfull_retry_count = ha->qfull_retry_count;
7193
7194 break;
7195 }
7196 }
7197
7198 /* If device does not have queue. */
7199 if (link == NULL) {
7200 tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
7201 if (tq != NULL) {
7202 /*
7203 * mutex to protect the device queue,
7204 * does not block interrupts.
7205 */
7206 mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
7207 ha->intr_pri);
7208
7209 tq->d_id.b24 = d_id.b24;
7210 tq->loop_id = loop_id;
7211 tq->device.base_address = tq;
7212 tq->iidma_rate = IIDMA_RATE_INIT;
7213
7214 /* Reset port down retry count. */
7215 tq->port_down_retry_count = ha->port_down_retry_count;
7216 tq->qfull_retry_count = ha->qfull_retry_count;
7217
7218 /* Add device to device queue. */
7219 ql_add_link_b(&ha->dev[index], &tq->device);
7220 }
7221 }
7222
7223 if (tq == NULL) {
7224 EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
7225 } else {
7226 /*EMPTY*/
7227 QL_PRINT_3(ha, "done\n");
7228 }
7229 return (tq);
7230 }
7231
7232 /*
7233 * ql_dev_free
7234 * Remove queue from device list and frees resources used by queue.
7235 *
7236 * Input:
7237 * ha: adapter state pointer.
7238 * tq: target queue pointer.
7239 * ADAPTER_STATE_LOCK must be already obtained.
7240 *
7241 * Context:
7242 * Kernel context.
7243 */
7244 void
7245 ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
7246 {
7247 ql_link_t *link;
7248 uint16_t index;
7249 ql_lun_t *lq;
7250
7251 QL_PRINT_3(ha, "started\n");
7252
7253 for (link = tq->lun_queues.first; link != NULL; link = link->next) {
7254 lq = link->base_address;
7255 if (lq->cmd.first != NULL) {
7256 EL(ha, "cmd %ph pending in lq=%ph, lun=%xh\n",
7257 lq->cmd.first, lq, lq->lun_no);
7258 return;
7259 }
7260 }
7261
7262 if (tq->outcnt == 0) {
7263 /* Get head queue index. */
7264 index = ql_alpa_to_index[tq->d_id.b.al_pa];
7265 for (link = ha->dev[index].first; link != NULL;
7266 link = link->next) {
7267 if (link->base_address == tq) {
7268 ql_remove_link(&ha->dev[index], link);
7269
7270 link = tq->lun_queues.first;
7271 while (link != NULL) {
7272 lq = link->base_address;
7273 link = link->next;
7274
7275 ql_remove_link(&tq->lun_queues,
7276 &lq->link);
7277 kmem_free(lq, sizeof (ql_lun_t));
7278 }
7279
7280 mutex_destroy(&tq->mutex);
7281 kmem_free(tq, sizeof (ql_tgt_t));
7282 break;
7283 }
7284 }
7285 }
7286
7287 QL_PRINT_3(ha, "done\n");
7288 }
7289
7290 /*
7291 * ql_lun_queue
7292 * Allocate LUN queue if does not exists.
7293 *
7294 * Input:
7295 * ha: adapter state pointer.
7296 * tq: target queue.
7297 * lun_addr: LUN number.
7298 *
7299 * Returns:
7300 * NULL = failure
7301 *
7302 * Context:
7303 * Kernel context.
7304 */
7305 static ql_lun_t *
7306 ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint64_t lun_addr)
7307 {
7308 ql_lun_t *lq;
7309 ql_link_t *link;
7310 uint16_t lun_no, lun_no_tmp;
7311 fcp_ent_addr_t *fcp_ent_addr = (fcp_ent_addr_t *)&lun_addr;
7312
7313 QL_PRINT_3(ha, "started\n");
7314
7315 /* Fast path. */
7316 if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_addr ==
7317 lun_addr) {
7318 QL_PRINT_3(ha, "fast done\n");
7319 return (tq->last_lun_queue);
7320 }
7321
7322 /* If device queue exists, set proper loop ID. */
7323 for (link = tq->lun_queues.first; link != NULL; link = link->next) {
7324 lq = link->base_address;
7325 if (lq->lun_addr == lun_addr) {
7326 QL_PRINT_3(ha, "found done\n");
7327 tq->last_lun_queue = lq;
7328 return (lq);
7329 }
7330 }
7331
7332 /* Check the LUN addressing levels. */
7333 if (fcp_ent_addr->ent_addr_1 != 0 || fcp_ent_addr->ent_addr_2 != 0 ||
7334 fcp_ent_addr->ent_addr_3 != 0) {
7335 EL(ha, "Unsupported LUN Addressing level=0x%llxh", lun_addr);
7336 }
7337
7338 lun_no_tmp = CHAR_TO_SHORT(lobyte(fcp_ent_addr->ent_addr_0),
7339 hibyte(fcp_ent_addr->ent_addr_0));
7340
7341 lun_no = lun_no_tmp & ~(QL_LUN_AM_MASK << 8);
7342
7343 if (lun_no_tmp & (QL_LUN_AM_LUN << 8)) {
7344 EL(ha, "Unsupported first level LUN Addressing method=%xh, "
7345 "lun=%d(%xh)\n", lun_no_tmp & (QL_LUN_AM_MASK << 8),
7346 lun_no, lun_no_tmp);
7347 }
7348
7349 /* Create and initialize LUN queue. */
7350 lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
7351 if (lq != NULL) {
7352 lq->link.base_address = lq;
7353 lq->target_queue = tq;
7354 lq->lun_addr = lun_addr;
7355 lq->lun_no = lun_no;
7356
7357 DEVICE_QUEUE_LOCK(tq);
7358 ql_add_link_b(&tq->lun_queues, &lq->link);
7359 DEVICE_QUEUE_UNLOCK(tq);
7360 tq->last_lun_queue = lq;
7361 }
7362
7363 QL_PRINT_3(ha, "done\n");
7364
7365 return (lq);
7366 }
7367
7368 /*
7369 * ql_fcp_scsi_cmd
7370 * Process fibre channel (FCP) SCSI protocol commands.
7371 *
7372 * Input:
7373 * ha = adapter state pointer.
7374 * pkt = pointer to fc_packet.
7375 * sp = srb pointer.
7376 *
7377 * Returns:
7378 * FC_SUCCESS - the packet was accepted for transport.
7379 * FC_TRANSPORT_ERROR - a transport error occurred.
7380 *
7381 * Context:
7382 * Kernel context.
7383 */
7384 static int
7385 ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
7386 {
7387 port_id_t d_id;
7388 ql_tgt_t *tq;
7389 uint64_t *ptr;
7390 uint64_t fcp_ent_addr = 0;
7391
7392 QL_PRINT_3(ha, "started\n");
7393
7394 tq = (ql_tgt_t *)pkt->pkt_fca_device;
7395 if (tq == NULL) {
7396 d_id.r.rsvd_1 = 0;
7397 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7398 tq = ql_d_id_to_queue(ha, d_id);
7399 }
7400
7401 sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
7402 fcp_ent_addr = *(uint64_t *)(&sp->fcp->fcp_ent_addr);
7403 if (tq != NULL &&
7404 (sp->lun_queue = ql_lun_queue(ha, tq, fcp_ent_addr)) != NULL) {
7405
7406 /*
7407 * zero out FCP response; 24 Bytes
7408 */
7409 ptr = (uint64_t *)pkt->pkt_resp;
7410 *ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
7411
7412 /* Handle task management function. */
7413 if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
7414 sp->fcp->fcp_cntl.cntl_clr_aca |
7415 sp->fcp->fcp_cntl.cntl_reset_tgt |
7416 sp->fcp->fcp_cntl.cntl_reset_lun |
7417 sp->fcp->fcp_cntl.cntl_clr_tsk |
7418 sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
7419 ql_task_mgmt(ha, tq, pkt, sp);
7420 } else {
7421 ha->pha->xioctl->IosRequested++;
7422 ha->pha->xioctl->BytesRequested += (uint32_t)
7423 sp->fcp->fcp_data_len;
7424
7425 /*
7426 * Setup for commands with data transfer
7427 */
7428 sp->iocb = ha->fcp_cmd;
7429 sp->req_cnt = 1;
7430 if (sp->fcp->fcp_data_len != 0) {
7431 /*
7432 * FCP data is bound to pkt_data_dma
7433 */
7434 if (sp->fcp->fcp_cntl.cntl_write_data) {
7435 (void) ddi_dma_sync(pkt->pkt_data_dma,
7436 0, 0, DDI_DMA_SYNC_FORDEV);
7437 }
7438
7439 /* Setup IOCB count. */
7440 if (pkt->pkt_data_cookie_cnt > ha->cmd_segs &&
7441 (!CFG_IST(ha, CFG_CTRL_82XX) ||
7442 sp->sg_dma.dma_handle == NULL)) {
7443 uint32_t cnt;
7444
7445 cnt = pkt->pkt_data_cookie_cnt -
7446 ha->cmd_segs;
7447 sp->req_cnt = (uint16_t)
7448 (cnt / ha->cmd_cont_segs);
7449 if (cnt % ha->cmd_cont_segs) {
7450 sp->req_cnt = (uint16_t)
7451 (sp->req_cnt + 2);
7452 } else {
7453 sp->req_cnt++;
7454 }
7455 }
7456 }
7457 QL_PRINT_3(ha, "done\n");
7458
7459 return (ql_start_cmd(ha, tq, pkt, sp));
7460 }
7461 } else {
7462 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7463 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7464
7465 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7466 ql_io_comp(sp);
7467 }
7468 }
7469
7470 QL_PRINT_3(ha, "done\n");
7471
7472 return (FC_SUCCESS);
7473 }
7474
7475 /*
7476 * ql_task_mgmt
7477 * Task management function processor.
7478 *
7479 * Input:
7480 * ha: adapter state pointer.
7481 * tq: target queue pointer.
7482 * pkt: pointer to fc_packet.
7483 * sp: SRB pointer.
7484 *
7485 * Context:
7486 * Kernel context.
7487 */
7488 static void
7489 ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7490 ql_srb_t *sp)
7491 {
7492 fcp_rsp_t *fcpr;
7493 struct fcp_rsp_info *rsp;
7494 ql_lun_t *lq = sp->lun_queue;
7495
7496 QL_PRINT_3(ha, "started\n");
7497
7498 fcpr = (fcp_rsp_t *)pkt->pkt_resp;
7499 rsp = (struct fcp_rsp_info *)(pkt->pkt_resp + sizeof (fcp_rsp_t));
7500
7501 bzero(fcpr, pkt->pkt_rsplen);
7502
7503 fcpr->fcp_u.fcp_status.rsp_len_set = 1;
7504 fcpr->fcp_response_len = 8;
7505
7506 if (sp->fcp->fcp_cntl.cntl_clr_aca) {
7507 if (ql_clear_aca(ha, tq, lq) != QL_SUCCESS) {
7508 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7509 }
7510 } else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
7511 if (ql_lun_reset(ha, tq, lq) != QL_SUCCESS) {
7512 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7513 }
7514 } else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
7515 if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
7516 QL_SUCCESS) {
7517 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7518 }
7519 } else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
7520 if (ql_clear_task_set(ha, tq, lq) != QL_SUCCESS) {
7521 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7522 }
7523 } else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
7524 if (ql_abort_task_set(ha, tq, lq) != QL_SUCCESS) {
7525 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7526 }
7527 } else {
7528 rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
7529 }
7530
7531 pkt->pkt_state = FC_PKT_SUCCESS;
7532
7533 /* Do command callback. */
7534 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7535 ql_io_comp(sp);
7536 }
7537
7538 QL_PRINT_3(ha, "done\n");
7539 }
7540
7541 /*
7542 * ql_fcp_ip_cmd
7543 * Process fibre channel (FCP) Internet (IP) protocols commands.
7544 *
7545 * Input:
7546 * ha: adapter state pointer.
7547 * pkt: pointer to fc_packet.
7548 * sp: SRB pointer.
7549 *
7550 * Returns:
7551 * FC_SUCCESS - the packet was accepted for transport.
7552 * FC_TRANSPORT_ERROR - a transport error occurred.
7553 *
7554 * Context:
7555 * Kernel context.
7556 */
7557 static int
7558 ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
7559 {
7560 port_id_t d_id;
7561 ql_tgt_t *tq;
7562
7563 QL_PRINT_3(ha, "started\n");
7564
7565 tq = (ql_tgt_t *)pkt->pkt_fca_device;
7566 if (tq == NULL) {
7567 d_id.r.rsvd_1 = 0;
7568 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7569 tq = ql_d_id_to_queue(ha, d_id);
7570 }
7571
7572 if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
7573 /*
7574 * IP data is bound to pkt_cmd_dma
7575 */
7576 (void) ddi_dma_sync(pkt->pkt_cmd_dma,
7577 0, 0, DDI_DMA_SYNC_FORDEV);
7578
7579 /* Setup IOCB count. */
7580 sp->iocb = ha->ip_cmd;
7581 if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
7582 uint32_t cnt;
7583
7584 cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
7585 sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
7586 if (cnt % ha->cmd_cont_segs) {
7587 sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7588 } else {
7589 sp->req_cnt++;
7590 }
7591 } else {
7592 sp->req_cnt = 1;
7593 }
7594 QL_PRINT_3(ha, "done\n");
7595
7596 return (ql_start_cmd(ha, tq, pkt, sp));
7597 } else {
7598 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7599 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7600
7601 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7602 ql_io_comp(sp);
7603 }
7604
7605 QL_PRINT_3(ha, "done\n");
7606
7607 return (FC_SUCCESS);
7608 }
7609
7610 /*
7611 * ql_fc_services
7612 * Process fibre channel services (name server).
7613 *
7614 * Input:
7615 * ha: adapter state pointer.
7616 * pkt: pointer to fc_packet.
7617 *
7618 * Returns:
7619 * FC_SUCCESS - the packet was accepted for transport.
7620 * FC_TRANSPORT_ERROR - a transport error occurred.
7621 *
7622 * Context:
7623 * Kernel context.
7624 */
7625 static int
7626 ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
7627 {
7628 uint32_t cnt;
7629 fc_ct_header_t hdr;
7630 la_els_rjt_t rjt;
7631 port_id_t d_id;
7632 ql_tgt_t *tq;
7633 ql_srb_t *sp;
7634 int rval;
7635
7636 QL_PRINT_3(ha, "started\n");
7637
7638 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
7639 (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
7640
7641 bzero(&rjt, sizeof (rjt));
7642
7643 /* Do some sanity checks */
7644 cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
7645 sizeof (fc_ct_header_t));
7646 if (cnt > (uint32_t)pkt->pkt_rsplen) {
7647 EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
7648 pkt->pkt_rsplen);
7649 return (FC_ELS_MALFORMED);
7650 }
7651
7652 switch (hdr.ct_fcstype) {
7653 case FCSTYPE_DIRECTORY:
7654 case FCSTYPE_MGMTSERVICE:
7655
7656 /* An FCA must make sure that the header is in big endian */
7657 ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
7658
7659 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7660 tq = ql_d_id_to_queue(ha, d_id);
7661 sp = (ql_srb_t *)pkt->pkt_fca_private;
7662
7663 if (tq == NULL ||
7664 (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
7665 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7666 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7667 rval = QL_SUCCESS;
7668 break;
7669 }
7670
7671 if (tq->flags & TQF_LOGIN_NEEDED) {
7672 DEVICE_QUEUE_LOCK(tq);
7673 tq->flags &= ~TQF_LOGIN_NEEDED;
7674 DEVICE_QUEUE_UNLOCK(tq);
7675 (void) ql_login_fport(ha, tq, tq->loop_id, LFF_NONE,
7676 NULL);
7677 }
7678 /*
7679 * Services data is bound to pkt_cmd_dma
7680 */
7681 (void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
7682 DDI_DMA_SYNC_FORDEV);
7683
7684 sp->flags |= SRB_MS_PKT;
7685 sp->retry_count = 32;
7686
7687 /* Setup IOCB count. */
7688 sp->iocb = ha->ms_cmd;
7689 if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
7690 cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
7691 sp->req_cnt =
7692 (uint16_t)(cnt / ha->cmd_cont_segs);
7693 if (cnt % ha->cmd_cont_segs) {
7694 sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7695 } else {
7696 sp->req_cnt++;
7697 }
7698 } else {
7699 sp->req_cnt = 1;
7700 }
7701 rval = ql_start_cmd(ha, tq, pkt, sp);
7702
7703 QL_PRINT_3(ha, "done, ql_start_cmd=%xh\n", rval);
7704
7705 return (rval);
7706
7707 default:
7708 EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7709 rval = QL_FUNCTION_PARAMETER_ERROR;
7710 break;
7711 }
7712
7713 if (rval != QL_SUCCESS) {
7714 /* Build RJT. */
7715 rjt.ls_code.ls_code = LA_ELS_RJT;
7716 rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7717
7718 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7719 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7720
7721 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7722 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7723 EL(ha, "LA_ELS_RJT, FC_REASON_UNSUPPORTED\n");
7724 }
7725
7726 /* Do command callback. */
7727 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7728 ql_io_comp((ql_srb_t *)pkt->pkt_fca_private);
7729 }
7730
7731 QL_PRINT_3(ha, "done\n");
7732
7733 return (FC_SUCCESS);
7734 }
7735
7736 /*
7737 * ql_cthdr_endian
7738 * Change endianess of ct passthrough header and payload.
7739 *
7740 * Input:
7741 * acc_handle: DMA buffer access handle.
7742 * ct_hdr: Pointer to header.
7743 * restore: Restore first flag.
7744 *
7745 * Context:
7746 * Interrupt or Kernel context, no mailbox commands allowed.
7747 */
7748 void
7749 ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7750 boolean_t restore)
7751 {
7752 uint8_t i, *bp;
7753 fc_ct_header_t hdr;
7754 uint32_t *hdrp = (uint32_t *)&hdr;
7755
7756 ddi_rep_get8(acc_handle, (uint8_t *)&hdr,
7757 (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7758
7759 if (restore) {
7760 for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7761 *hdrp = BE_32(*hdrp);
7762 hdrp++;
7763 }
7764 }
7765
7766 if (hdr.ct_fcstype == FCSTYPE_DIRECTORY) {
7767 bp = (uint8_t *)ct_hdr + sizeof (fc_ct_header_t);
7768
7769 switch (hdr.ct_cmdrsp) {
7770 case NS_GA_NXT:
7771 case NS_GPN_ID:
7772 case NS_GNN_ID:
7773 case NS_GCS_ID:
7774 case NS_GFT_ID:
7775 case NS_GSPN_ID:
7776 case NS_GPT_ID:
7777 case NS_GID_FT:
7778 case NS_GID_PT:
7779 case NS_RPN_ID:
7780 case NS_RNN_ID:
7781 case NS_RSPN_ID:
7782 case NS_DA_ID:
7783 BIG_ENDIAN_32(bp);
7784 break;
7785 case NS_RFT_ID:
7786 case NS_RCS_ID:
7787 case NS_RPT_ID:
7788 BIG_ENDIAN_32(bp);
7789 bp += 4;
7790 BIG_ENDIAN_32(bp);
7791 break;
7792 case NS_GNN_IP:
7793 case NS_GIPA_IP:
7794 BIG_ENDIAN(bp, 16);
7795 break;
7796 case NS_RIP_NN:
7797 bp += 8;
7798 BIG_ENDIAN(bp, 16);
7799 break;
7800 case NS_RIPA_NN:
7801 bp += 8;
7802 BIG_ENDIAN_64(bp);
7803 break;
7804 default:
7805 break;
7806 }
7807 }
7808
7809 if (restore == B_FALSE) {
7810 for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7811 *hdrp = BE_32(*hdrp);
7812 hdrp++;
7813 }
7814 }
7815
7816 ddi_rep_put8(acc_handle, (uint8_t *)&hdr,
7817 (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7818 }
7819
7820 /*
7821 * ql_start_cmd
7822 * Finishes starting fibre channel protocol (FCP) command.
7823 *
7824 * Input:
7825 * ha: adapter state pointer.
7826 * tq: target queue pointer.
7827 * pkt: pointer to fc_packet.
7828 * sp: SRB pointer.
7829 *
7830 * Context:
7831 * Kernel context.
7832 */
7833 static int
7834 ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7835 ql_srb_t *sp)
7836 {
7837 int rval = FC_SUCCESS;
7838 time_t poll_wait = 0;
7839 ql_lun_t *lq = sp->lun_queue;
7840
7841 QL_PRINT_3(ha, "started\n");
7842
7843 sp->handle = 0;
7844
7845 /* Set poll for finish. */
7846 if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7847 sp->flags |= SRB_POLL;
7848 if (pkt->pkt_timeout == 0) {
7849 pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7850 }
7851 }
7852
7853 /* Acquire device queue lock. */
7854 DEVICE_QUEUE_LOCK(tq);
7855
7856 /*
7857 * If we need authentication, report device busy to
7858 * upper layers to retry later
7859 */
7860 if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7861 DEVICE_QUEUE_UNLOCK(tq);
7862 EL(ha, "failed, FC_DEVICE_BUSY=%xh, d_id=%xh\n", tq->flags,
7863 tq->d_id.b24);
7864 return (FC_DEVICE_BUSY);
7865 }
7866
7867 /* Insert command onto watchdog queue. */
7868 if (!(pkt->pkt_tran_flags & FC_TRAN_DUMPING)) {
7869 ql_timeout_insert(ha, tq, sp);
7870 } else {
7871 /*
7872 * Run dump requests in polled mode as kernel threads
7873 * and interrupts may have been disabled.
7874 */
7875 sp->flags |= SRB_POLL;
7876 sp->init_wdg_q_time = 0;
7877 sp->isp_timeout = 0;
7878 }
7879
7880 /* If a polling command setup wait time. */
7881 if (sp->flags & SRB_POLL) {
7882 if (sp->flags & SRB_WATCHDOG_ENABLED) {
7883 poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7884 } else {
7885 poll_wait = pkt->pkt_timeout;
7886 }
7887 }
7888
7889 if (ha->pha->flags & ABORT_CMDS_LOOP_DOWN_TMO &&
7890 (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7891 /* Set ending status. */
7892 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7893
7894 /* Call done routine to handle completions. */
7895 sp->cmd.next = NULL;
7896 DEVICE_QUEUE_UNLOCK(tq);
7897 ql_done(&sp->cmd, B_FALSE);
7898 } else {
7899 if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7900 int do_lip = 0;
7901
7902 DEVICE_QUEUE_UNLOCK(tq);
7903
7904 ADAPTER_STATE_LOCK(ha);
7905 if ((do_lip = ha->pha->lip_on_panic) == 0) {
7906 ha->pha->lip_on_panic++;
7907 }
7908 ADAPTER_STATE_UNLOCK(ha);
7909
7910 if (!do_lip) {
7911
7912 /*
7913 * That Qlogic F/W performs PLOGI, PRLI, etc
7914 * is helpful here. If a PLOGI fails for some
7915 * reason, you would get CS_PORT_LOGGED_OUT
7916 * or some such error; and we should get a
7917 * careful polled mode login kicked off inside
7918 * of this driver itself. You don't have FC
7919 * transport's services as all threads are
7920 * suspended, interrupts disabled, and so
7921 * on. Right now we do re-login if the packet
7922 * state isn't FC_PKT_SUCCESS.
7923 */
7924 (void) ql_abort_isp(ha);
7925 }
7926
7927 ql_start_iocb(ha, sp);
7928 } else {
7929 /* Add the command to the device queue */
7930 if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7931 ql_add_link_t(&lq->cmd, &sp->cmd);
7932 } else {
7933 ql_add_link_b(&lq->cmd, &sp->cmd);
7934 }
7935
7936 sp->flags |= SRB_IN_DEVICE_QUEUE;
7937
7938 /* Check whether next message can be processed */
7939 ql_next(ha, lq);
7940 }
7941 }
7942
7943 /* If polling, wait for finish. */
7944 if (poll_wait) {
7945 if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS &&
7946 pkt->pkt_state == FC_PKT_SUCCESS) {
7947 pkt->pkt_state = FC_PKT_TIMEOUT;
7948 pkt->pkt_reason = FC_REASON_HW_ERROR;
7949 }
7950
7951 if (pkt->pkt_state != FC_PKT_SUCCESS) {
7952 EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7953 rval = FC_TRANSPORT_ERROR;
7954 }
7955
7956 if (ddi_in_panic()) {
7957 if (pkt->pkt_state != FC_PKT_SUCCESS) {
7958 port_id_t d_id;
7959
7960 /*
7961 * successful LOGIN implies by design
7962 * that PRLI also succeeded for disks
7963 * Note also that there is no special
7964 * mailbox command to send PRLI.
7965 */
7966 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7967 (void) ql_login_port(ha, d_id);
7968 }
7969 }
7970
7971 (void) qlc_fm_check_pkt_dma_handle(ha, sp);
7972 /*
7973 * This should only happen during CPR dumping
7974 */
7975 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7976 pkt->pkt_comp) {
7977 sp->flags &= ~SRB_POLL;
7978 (*pkt->pkt_comp)(pkt);
7979 }
7980 }
7981
7982 QL_PRINT_3(ha, "done\n");
7983
7984 return (rval);
7985 }
7986
7987 /*
7988 * ql_poll_cmd
7989 * Polls commands for completion.
7990 *
7991 * Input:
7992 * ha = adapter state pointer.
7993 * sp = SRB command pointer.
7994 * poll_wait = poll wait time in seconds.
7995 *
7996 * Returns:
7997 * QL local function return status code.
7998 *
7999 * Context:
8000 * Kernel context.
8001 */
8002 static int
8003 ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
8004 {
8005 uint32_t index;
8006 int rval = QL_SUCCESS;
8007 time_t msecs_left = poll_wait * 100; /* 10ms inc */
8008 ql_adapter_state_t *ha = vha->pha;
8009
8010 QL_PRINT_3(ha, "started\n");
8011
8012 while (sp->flags & SRB_POLL) {
8013
8014 if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
8015 ha->idle_timer >= 15 || ddi_in_panic() ||
8016 curthread->t_flag & T_INTR_THREAD) {
8017
8018 /* If waiting for restart, do it now. */
8019 if (ha->port_retry_timer != 0) {
8020 ADAPTER_STATE_LOCK(ha);
8021 ha->port_retry_timer = 0;
8022 ADAPTER_STATE_UNLOCK(ha);
8023
8024 TASK_DAEMON_LOCK(ha);
8025 ha->task_daemon_flags |= PORT_RETRY_NEEDED;
8026 TASK_DAEMON_UNLOCK(ha);
8027 }
8028
8029 ADAPTER_STATE_LOCK(ha);
8030 ha->flags |= POLL_INTR;
8031 ADAPTER_STATE_UNLOCK(ha);
8032
8033 if (INTERRUPT_PENDING(ha)) {
8034 (void) ql_isr_aif((caddr_t)ha, 0);
8035 INTR_LOCK(ha);
8036 ha->intr_claimed = TRUE;
8037 INTR_UNLOCK(ha);
8038 }
8039 if (ha->flags & NO_INTR_HANDSHAKE) {
8040 for (index = 0; index < ha->rsp_queues_cnt;
8041 index++) {
8042 (void) ql_isr_aif((caddr_t)ha,
8043 (caddr_t)((uintptr_t)(index + 1)));
8044 }
8045 }
8046
8047 ADAPTER_STATE_LOCK(ha);
8048 ha->flags &= ~POLL_INTR;
8049 ADAPTER_STATE_UNLOCK(ha);
8050
8051 /*
8052 * Call task thread function in case the
8053 * daemon is not running.
8054 */
8055 TASK_DAEMON_LOCK(ha);
8056
8057 if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
8058 QL_TASK_PENDING(ha)) {
8059 ql_task_thread(ha);
8060 }
8061
8062 TASK_DAEMON_UNLOCK(ha);
8063 }
8064
8065 if (msecs_left == 0) {
8066 if (rval == QL_SUCCESS) {
8067 EL(ha, "timeout\n");
8068 rval = QL_FUNCTION_TIMEOUT;
8069 if (ql_abort_io(ha, sp) == QL_SUCCESS) {
8070 sp->pkt->pkt_reason = CS_ABORTED;
8071 sp->cmd.next = NULL;
8072 ql_done(&sp->cmd, B_FALSE);
8073 break;
8074 }
8075 sp->flags |= SRB_COMMAND_TIMEOUT;
8076 EL(ha, "abort failed, isp_abort_needed\n");
8077 ql_awaken_task_daemon(ha, NULL,
8078 ISP_ABORT_NEEDED, 0);
8079 msecs_left = 30 * 100;
8080 } else {
8081 break;
8082 }
8083 }
8084
8085 /*
8086 * Polling interval is 10 milli seconds; Increasing
8087 * the polling interval to seconds since disk IO
8088 * timeout values are ~60 seconds is tempting enough,
8089 * but CPR dump time increases, and so will the crash
8090 * dump time; Don't toy with the settings without due
8091 * consideration for all the scenarios that will be
8092 * impacted.
8093 */
8094 ql_delay(ha, 10000);
8095 msecs_left -= 10;
8096 }
8097
8098 QL_PRINT_3(ha, "done\n");
8099
8100 return (rval);
8101 }
8102
8103 /*
8104 * ql_next
8105 * Retrieve and process next job in the device queue.
8106 *
8107 * Input:
8108 * ha: adapter state pointer.
8109 * lq: LUN queue pointer.
8110 * DEVICE_QUEUE_LOCK must be already obtained.
8111 *
8112 * Output:
8113 * Releases DEVICE_QUEUE_LOCK upon exit.
8114 *
8115 * Context:
8116 * Interrupt or Kernel context, no mailbox commands allowed.
8117 */
8118 void
8119 ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
8120 {
8121 ql_srb_t *sp;
8122 ql_link_t *link;
8123 ql_tgt_t *tq = lq->target_queue;
8124 ql_adapter_state_t *ha = vha->pha;
8125
8126 QL_PRINT_3(ha, "started\n");
8127
8128 if (ddi_in_panic()) {
8129 DEVICE_QUEUE_UNLOCK(tq);
8130 QL_PRINT_3(ha, "panic/active exit\n");
8131 return;
8132 }
8133
8134 while ((link = lq->cmd.first) != NULL) {
8135 sp = link->base_address;
8136
8137 /* Exit if can not start commands. */
8138 if (DRIVER_SUSPENDED(ha) ||
8139 (ha->flags & ONLINE) == 0 ||
8140 !VALID_DEVICE_ID(ha, tq->loop_id) ||
8141 tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
8142 TQF_QUEUE_SUSPENDED)) {
8143 EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
8144 "haf=%xh, loop_id=%xh sp=%ph\n", tq->d_id.b24,
8145 ha->task_daemon_flags, tq->flags, sp->flags,
8146 ha->flags, tq->loop_id, sp);
8147 break;
8148 }
8149
8150 /*
8151 * Find out the LUN number for untagged command use.
8152 * If there is an untagged command pending for the LUN,
8153 * we would not submit another untagged command
8154 * or if reached LUN execution throttle.
8155 */
8156 if (sp->flags & SRB_FCP_CMD_PKT) {
8157 if (lq->flags & LQF_UNTAGGED_PENDING ||
8158 lq->lun_outcnt >= ha->execution_throttle) {
8159 QL_PRINT_8(ha, "break, d_id=%xh, "
8160 "lf=%xh, lun_outcnt=%xh\n",
8161 tq->d_id.b24, lq->flags, lq->lun_outcnt);
8162 break;
8163 }
8164 if (sp->fcp->fcp_cntl.cntl_qtype ==
8165 FCP_QTYPE_UNTAGGED) {
8166 /*
8167 * Set the untagged-flag for the LUN
8168 * so that no more untagged commands
8169 * can be submitted for this LUN.
8170 */
8171 lq->flags |= LQF_UNTAGGED_PENDING;
8172 }
8173
8174 /* Count command as sent. */
8175 lq->lun_outcnt++;
8176 }
8177
8178 /* Remove srb from device queue. */
8179 ql_remove_link(&lq->cmd, &sp->cmd);
8180 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8181
8182 tq->outcnt++;
8183
8184 ql_start_iocb(vha, sp);
8185 }
8186
8187 /* Release device queue lock. */
8188 DEVICE_QUEUE_UNLOCK(tq);
8189
8190 QL_PRINT_3(ha, "done\n");
8191 }
8192
8193 /*
8194 * ql_done
8195 * Process completed commands.
8196 *
8197 * Input:
8198 * link: first command link in chain.
8199 * cmplt: do command complete call back.
8200 *
8201 * Context:
8202 * Interrupt or Kernel context, no mailbox commands allowed.
8203 */
8204 void
8205 ql_done(ql_link_t *link, boolean_t cmplt)
8206 {
8207 ql_adapter_state_t *ha;
8208 ql_link_t *next_link;
8209 ql_srb_t *sp;
8210 ql_tgt_t *tq;
8211 ql_lun_t *lq;
8212 uint64_t set_flags;
8213
8214 QL_PRINT_3(NULL, "started\n");
8215
8216 for (; link != NULL; link = next_link) {
8217 next_link = link->next;
8218 sp = link->base_address;
8219 link->prev = link->next = NULL;
8220 link->head = NULL;
8221 ha = sp->ha;
8222 set_flags = 0;
8223
8224 if (sp->flags & SRB_UB_CALLBACK) {
8225 QL_UB_LOCK(ha);
8226 if (sp->flags & SRB_UB_IN_ISP) {
8227 if (ha->ub_outcnt != 0) {
8228 ha->ub_outcnt--;
8229 }
8230 if (ha->flags & IP_ENABLED) {
8231 set_flags |= NEED_UNSOLICITED_BUFFERS;
8232 }
8233 }
8234 QL_UB_UNLOCK(ha);
8235 ql_awaken_task_daemon(ha, sp, set_flags, 0);
8236 } else {
8237 /* Free outstanding command slot. */
8238 INTR_LOCK(ha);
8239 if (sp->handle != 0) {
8240 EL(ha, "free sp=%ph, sp->hdl=%xh\n",
8241 (void *)sp, sp->handle);
8242 ha->pha->outstanding_cmds[
8243 sp->handle & OSC_INDEX_MASK] = NULL;
8244 sp->handle = 0;
8245 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
8246 }
8247 INTR_UNLOCK(ha);
8248
8249 /* Acquire device queue lock. */
8250 lq = sp->lun_queue;
8251 tq = lq->target_queue;
8252 DEVICE_QUEUE_LOCK(tq);
8253
8254 /* Decrement outstanding commands on device. */
8255 if (tq->outcnt != 0) {
8256 tq->outcnt--;
8257 }
8258
8259 if (sp->flags & SRB_FCP_CMD_PKT) {
8260 if (sp->fcp->fcp_cntl.cntl_qtype ==
8261 FCP_QTYPE_UNTAGGED) {
8262 /*
8263 * Clear the flag for this LUN so that
8264 * untagged commands can be submitted
8265 * for it.
8266 */
8267 lq->flags &= ~LQF_UNTAGGED_PENDING;
8268 }
8269
8270 if (lq->lun_outcnt != 0) {
8271 lq->lun_outcnt--;
8272 }
8273 }
8274
8275 /* Reset port down retry count on good completion. */
8276 if (sp->pkt->pkt_reason == CS_COMPLETE) {
8277 tq->port_down_retry_count =
8278 ha->port_down_retry_count;
8279 tq->qfull_retry_count = ha->qfull_retry_count;
8280 }
8281
8282
8283 /* Alter aborted status for fast timeout feature */
8284 if (CFG_IST(ha, CFG_FAST_TIMEOUT) &&
8285 (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
8286 !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
8287 sp->flags & SRB_RETRY &&
8288 (sp->flags & SRB_WATCHDOG_ENABLED &&
8289 sp->wdg_q_time > 1)) {
8290 EL(ha, "fast abort modify change\n");
8291 sp->flags &= ~(SRB_RETRY);
8292 sp->pkt->pkt_reason = CS_TIMEOUT;
8293 }
8294
8295 /* Place request back on top of target command queue */
8296 if ((sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
8297 !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
8298 sp->flags & SRB_RETRY &&
8299 (sp->flags & SRB_WATCHDOG_ENABLED &&
8300 sp->wdg_q_time > 1)) {
8301 sp->flags &= ~(SRB_ISP_STARTED |
8302 SRB_ISP_COMPLETED | SRB_RETRY);
8303
8304 /* Reset watchdog timer */
8305 sp->wdg_q_time = sp->init_wdg_q_time;
8306
8307 /* Issue marker command on reset status. */
8308 if (!(ha->task_daemon_flags & LOOP_DOWN) &&
8309 (sp->pkt->pkt_reason == CS_RESET ||
8310 (CFG_IST(ha, CFG_ISP_FW_TYPE_2) &&
8311 sp->pkt->pkt_reason == CS_ABORTED))) {
8312 (void) ql_marker(ha, tq->loop_id, 0,
8313 MK_SYNC_ID);
8314 }
8315
8316 ql_add_link_t(&lq->cmd, &sp->cmd);
8317 sp->flags |= SRB_IN_DEVICE_QUEUE;
8318 ql_next(ha, lq);
8319 } else {
8320 /* Remove command from watchdog queue. */
8321 if (sp->flags & SRB_WATCHDOG_ENABLED) {
8322 ql_remove_link(&tq->wdg, &sp->wdg);
8323 sp->flags &= ~SRB_WATCHDOG_ENABLED;
8324 }
8325
8326 if (lq->cmd.first != NULL) {
8327 ql_next(ha, lq);
8328 } else {
8329 /* Release LU queue specific lock. */
8330 DEVICE_QUEUE_UNLOCK(tq);
8331 if (ha->pha->pending_cmds.first !=
8332 NULL) {
8333 ql_start_iocb(ha, NULL);
8334 }
8335 }
8336
8337 /* Sync buffers if required. */
8338 if (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT)) {
8339 (void) ddi_dma_sync(
8340 sp->pkt->pkt_resp_dma,
8341 0, 0, DDI_DMA_SYNC_FORCPU);
8342 }
8343
8344 /* Map ISP completion codes. */
8345 sp->pkt->pkt_expln = FC_EXPLN_NONE;
8346 sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
8347 switch (sp->pkt->pkt_reason) {
8348 case CS_COMPLETE:
8349 sp->pkt->pkt_state = FC_PKT_SUCCESS;
8350 break;
8351 case CS_RESET:
8352 sp->pkt->pkt_state =
8353 FC_PKT_PORT_OFFLINE;
8354 sp->pkt->pkt_reason =
8355 FC_REASON_ABORTED;
8356 break;
8357 case CS_RESOUCE_UNAVAILABLE:
8358 sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
8359 sp->pkt->pkt_reason =
8360 FC_REASON_PKT_BUSY;
8361 break;
8362
8363 case CS_TIMEOUT:
8364 sp->pkt->pkt_state = FC_PKT_TIMEOUT;
8365 sp->pkt->pkt_reason =
8366 FC_REASON_HW_ERROR;
8367 break;
8368 case CS_DATA_OVERRUN:
8369 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
8370 sp->pkt->pkt_reason =
8371 FC_REASON_OVERRUN;
8372 break;
8373 case CS_PORT_UNAVAILABLE:
8374 case CS_PORT_LOGGED_OUT:
8375 sp->pkt->pkt_state =
8376 FC_PKT_PORT_OFFLINE;
8377 sp->pkt->pkt_reason =
8378 FC_REASON_LOGIN_REQUIRED;
8379 ql_send_logo(ha, tq, NULL);
8380 break;
8381 case CS_PORT_CONFIG_CHG:
8382 sp->pkt->pkt_state =
8383 FC_PKT_PORT_OFFLINE;
8384 sp->pkt->pkt_reason =
8385 FC_REASON_OFFLINE;
8386 break;
8387 case CS_QUEUE_FULL:
8388 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
8389 sp->pkt->pkt_reason = FC_REASON_QFULL;
8390 break;
8391
8392 case CS_ABORTED:
8393 DEVICE_QUEUE_LOCK(tq);
8394 if (tq->flags & (TQF_RSCN_RCVD |
8395 TQF_NEED_AUTHENTICATION)) {
8396 sp->pkt->pkt_state =
8397 FC_PKT_PORT_OFFLINE;
8398 sp->pkt->pkt_reason =
8399 FC_REASON_LOGIN_REQUIRED;
8400 } else {
8401 sp->pkt->pkt_state =
8402 FC_PKT_LOCAL_RJT;
8403 sp->pkt->pkt_reason =
8404 FC_REASON_ABORTED;
8405 }
8406 DEVICE_QUEUE_UNLOCK(tq);
8407 break;
8408
8409 case CS_TRANSPORT:
8410 case CS_DEV_NOT_READY:
8411 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
8412 sp->pkt->pkt_reason =
8413 FC_PKT_TRAN_ERROR;
8414 break;
8415
8416 case CS_DATA_UNDERRUN:
8417 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
8418 sp->pkt->pkt_reason =
8419 FC_REASON_UNDERRUN;
8420 break;
8421 case CS_DMA_ERROR:
8422 case CS_BAD_PAYLOAD:
8423 case CS_UNKNOWN:
8424 case CS_CMD_FAILED:
8425 default:
8426 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
8427 sp->pkt->pkt_reason =
8428 FC_REASON_HW_ERROR;
8429 break;
8430 }
8431
8432 (void) qlc_fm_check_pkt_dma_handle(ha, sp);
8433
8434 /* Now call the pkt completion callback */
8435 if (sp->flags & SRB_POLL) {
8436 sp->flags &= ~SRB_POLL;
8437 } else if (cmplt == B_TRUE &&
8438 sp->pkt->pkt_comp) {
8439 (sp->pkt->pkt_comp)(sp->pkt);
8440 } else {
8441 ql_io_comp(sp);
8442 }
8443 }
8444 }
8445 }
8446
8447 QL_PRINT_3(ha, "done\n");
8448 }
8449
8450 /*
8451 * ql_awaken_task_daemon
8452 * Adds command completion callback to callback queue and/or
8453 * awakens task daemon thread.
8454 *
8455 * Input:
8456 * ha: adapter state pointer.
8457 * sp: srb pointer.
8458 * set_flags: task daemon flags to set.
8459 * reset_flags: task daemon flags to reset.
8460 *
8461 * Context:
8462 * Interrupt or Kernel context, no mailbox commands allowed.
8463 */
8464 void
8465 ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
8466 uint64_t set_flags, uint64_t reset_flags)
8467 {
8468 ql_adapter_state_t *ha = vha->pha;
8469
8470 QL_PRINT_3(ha, "started, sp=%p set_flags=%llx reset_flags=%llx\n",
8471 sp, set_flags, reset_flags);
8472
8473 /* Acquire task daemon lock. */
8474 TASK_DAEMON_LOCK(ha);
8475
8476 if (set_flags) {
8477 ha->task_daemon_flags |= set_flags;
8478 }
8479 if (reset_flags) {
8480 ha->task_daemon_flags &= ~reset_flags;
8481 }
8482
8483 if (!(ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG)) {
8484 EL(ha, "done, not alive dtf=%xh\n", ha->task_daemon_flags);
8485 TASK_DAEMON_UNLOCK(ha);
8486 return;
8487 }
8488
8489 if (sp != NULL) {
8490 if (sp->flags & SRB_UB_CALLBACK) {
8491 ql_add_link_b(&ha->unsol_callback_queue, &sp->cmd);
8492 } else {
8493 EL(ha, "sp=%p, spf=%xh is not SRB_UB_CALLBACK",
8494 sp->flags);
8495 }
8496 }
8497
8498 if (!ha->driver_thread_awake) {
8499 QL_PRINT_3(ha, "driver_thread_awake\n");
8500 cv_broadcast(&ha->cv_task_daemon);
8501 }
8502
8503 TASK_DAEMON_UNLOCK(ha);
8504
8505 QL_PRINT_3(ha, "done\n");
8506 }
8507
8508 /*
8509 * ql_task_daemon
8510 * Thread that is awaken by the driver when a
8511 * background needs to be done.
8512 *
8513 * Input:
8514 * arg = adapter state pointer.
8515 *
8516 * Context:
8517 * Kernel context.
8518 */
8519 static void
8520 ql_task_daemon(void *arg)
8521 {
8522 ql_adapter_state_t *ha = (void *)arg;
8523
8524 QL_PRINT_3(ha, "started\n");
8525
8526 /* Acquire task daemon lock. */
8527 TASK_DAEMON_LOCK(ha);
8528
8529 while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
8530 ql_task_thread(ha);
8531
8532 /*
8533 * Before we wait on the conditional variable, we
8534 * need to check if STOP_FLG is set for us to terminate
8535 */
8536 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
8537 break;
8538 }
8539
8540 QL_PRINT_3(ha, "Going to sleep\n");
8541 ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
8542
8543 /* If killed, stop task daemon */
8544 if (cv_wait_sig(&ha->cv_task_daemon,
8545 &ha->task_daemon_mutex) == 0) {
8546 QL_PRINT_10(ha, "killed\n");
8547 break;
8548 }
8549
8550 QL_PRINT_3(ha, "Awakened\n");
8551 ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
8552 }
8553
8554 ha->task_daemon_flags &= ~(TASK_DAEMON_SLEEPING_FLG |
8555 TASK_DAEMON_ALIVE_FLG);
8556
8557 TASK_DAEMON_UNLOCK(ha);
8558
8559 QL_PRINT_3(ha, "done\n");
8560 }
8561
8562 /*
8563 * ql_task_thread
8564 * Thread run by daemon.
8565 *
8566 * Input:
8567 * ha = adapter state pointer.
8568 * TASK_DAEMON_LOCK must be acquired prior to call.
8569 *
8570 * Context:
8571 * Kernel context.
8572 */
8573 static void
8574 ql_task_thread(ql_adapter_state_t *ha)
8575 {
8576 boolean_t loop_again;
8577 ql_srb_t *sp;
8578 ql_link_t *link;
8579 caddr_t msg;
8580 ql_adapter_state_t *vha;
8581
8582 ha->driver_thread_awake++;
8583 do {
8584 loop_again = B_FALSE;
8585
8586 if (ha->sf != ha->flags ||
8587 (ha->task_daemon_flags & ~DTF_EL_MSG_SKIP_FLGS) != ha->df ||
8588 ha->cf != ha->cfg_flags) {
8589 ha->sf = ha->flags;
8590 ha->df = ha->task_daemon_flags & ~DTF_EL_MSG_SKIP_FLGS;
8591 ha->cf = ha->cfg_flags;
8592 EL(ha, "df=%xh, sf=%xh, cf=%xh\n",
8593 ha->df, ha->sf, ha->cf);
8594 }
8595
8596 QL_PM_LOCK(ha);
8597 if (ha->power_level != PM_LEVEL_D0) {
8598 QL_PM_UNLOCK(ha);
8599 ha->task_daemon_flags |= DRIVER_STALL |
8600 TASK_DAEMON_STALLED_FLG;
8601 break;
8602 }
8603 QL_PM_UNLOCK(ha);
8604
8605 if (ha->flags & ADAPTER_SUSPENDED) {
8606 ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8607 break;
8608 }
8609
8610 /* Handle FW IDC events. */
8611 while (ha->flags & (IDC_STALL_NEEDED | IDC_RESTART_NEEDED |
8612 IDC_ACK_NEEDED)) {
8613 TASK_DAEMON_UNLOCK(ha);
8614 ql_idc(ha);
8615 TASK_DAEMON_LOCK(ha);
8616 loop_again = B_TRUE;
8617 }
8618
8619 if (ha->task_daemon_flags &
8620 (TASK_DAEMON_STOP_FLG | DRIVER_STALL) ||
8621 !(ha->flags & ONLINE)) {
8622 ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8623 break;
8624 }
8625 ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
8626
8627 /* Store error log. */
8628 if (ha->errlog[0] != 0 &&
8629 !(ha->task_daemon_flags & ISP_ABORT_NEEDED)) {
8630 TASK_DAEMON_UNLOCK(ha);
8631 (void) ql_flash_errlog(ha, ha->errlog[0],
8632 ha->errlog[1], ha->errlog[2], ha->errlog[3]);
8633 ha->errlog[0] = 0;
8634 TASK_DAEMON_LOCK(ha);
8635 loop_again = B_TRUE;
8636 }
8637
8638 /* Idle Check. */
8639 if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
8640 ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
8641 if (!DRIVER_SUSPENDED(ha)) {
8642 TASK_DAEMON_UNLOCK(ha);
8643 ql_idle_check(ha);
8644 TASK_DAEMON_LOCK(ha);
8645 loop_again = B_TRUE;
8646 }
8647 }
8648
8649 /* Crystal+ port#0 bypass transition */
8650 if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
8651 ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
8652 TASK_DAEMON_UNLOCK(ha);
8653 (void) ql_initiate_lip(ha);
8654 TASK_DAEMON_LOCK(ha);
8655 loop_again = B_TRUE;
8656 }
8657
8658 /* Abort queues needed. */
8659 if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
8660 ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
8661 if (ha->flags & ABORT_CMDS_LOOP_DOWN_TMO) {
8662 TASK_DAEMON_UNLOCK(ha);
8663 ql_abort_queues(ha);
8664 TASK_DAEMON_LOCK(ha);
8665 loop_again = B_TRUE;
8666 }
8667 }
8668
8669 /* Not suspended, awaken waiting routines. */
8670 if (!DRIVER_SUSPENDED(ha) &&
8671 ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
8672 ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
8673 cv_broadcast(&ha->cv_dr_suspended);
8674 loop_again = B_TRUE;
8675 }
8676
8677 /* Handle RSCN changes. */
8678 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8679 if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
8680 vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
8681 TASK_DAEMON_UNLOCK(ha);
8682 (void) ql_handle_rscn_update(vha);
8683 TASK_DAEMON_LOCK(ha);
8684 loop_again = B_TRUE;
8685 }
8686 }
8687
8688 /* Handle state changes. */
8689 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8690 if (vha->task_daemon_flags & FC_STATE_CHANGE &&
8691 !(ha->task_daemon_flags &
8692 TASK_DAEMON_POWERING_DOWN)) {
8693 /* Report state change. */
8694 EL(vha, "state change = %xh\n", vha->state);
8695 vha->task_daemon_flags &= ~FC_STATE_CHANGE;
8696
8697 if (vha->task_daemon_flags &
8698 COMMAND_WAIT_NEEDED) {
8699 vha->task_daemon_flags &=
8700 ~COMMAND_WAIT_NEEDED;
8701 if (!(ha->task_daemon_flags &
8702 COMMAND_WAIT_ACTIVE)) {
8703 ha->task_daemon_flags |=
8704 COMMAND_WAIT_ACTIVE;
8705 TASK_DAEMON_UNLOCK(ha);
8706 ql_cmd_wait(ha);
8707 TASK_DAEMON_LOCK(ha);
8708 ha->task_daemon_flags &=
8709 ~COMMAND_WAIT_ACTIVE;
8710 loop_again = B_TRUE;
8711 }
8712 }
8713
8714 msg = NULL;
8715 if (FC_PORT_STATE_MASK(vha->state) ==
8716 FC_STATE_OFFLINE) {
8717 if (vha->task_daemon_flags &
8718 STATE_ONLINE) {
8719 if (ha->topology &
8720 QL_LOOP_CONNECTION) {
8721 msg = "Loop OFFLINE";
8722 } else {
8723 msg = "Link OFFLINE";
8724 }
8725 }
8726 vha->task_daemon_flags &=
8727 ~STATE_ONLINE;
8728 } else if (FC_PORT_STATE_MASK(vha->state) ==
8729 FC_STATE_LOOP) {
8730 if (!(vha->task_daemon_flags &
8731 STATE_ONLINE)) {
8732 msg = "Loop ONLINE";
8733 }
8734 vha->task_daemon_flags |= STATE_ONLINE;
8735 } else if (FC_PORT_STATE_MASK(vha->state) ==
8736 FC_STATE_ONLINE) {
8737 if (!(vha->task_daemon_flags &
8738 STATE_ONLINE)) {
8739 msg = "Link ONLINE";
8740 }
8741 vha->task_daemon_flags |= STATE_ONLINE;
8742 } else {
8743 msg = "Unknown Link state";
8744 }
8745
8746 if (msg != NULL) {
8747 cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8748 "%s", QL_NAME, ha->instance,
8749 vha->vp_index, msg);
8750 }
8751
8752 if (vha->flags & FCA_BOUND) {
8753 QL_PRINT_10(vha, "statec_"
8754 "cb state=%xh\n",
8755 vha->state);
8756 TASK_DAEMON_UNLOCK(ha);
8757 (vha->bind_info.port_statec_cb)
8758 (vha->bind_info.port_handle,
8759 vha->state);
8760 TASK_DAEMON_LOCK(ha);
8761 loop_again = B_TRUE;
8762 }
8763 }
8764 }
8765
8766 if (ha->task_daemon_flags & NEED_UNSOLICITED_BUFFERS &&
8767 ha->task_daemon_flags & FIRMWARE_UP) {
8768 /*
8769 * The firmware needs more unsolicited
8770 * buffers. We cannot allocate any new
8771 * buffers unless the ULP module requests
8772 * for new buffers. All we can do here is
8773 * to give received buffers from the pool
8774 * that is already allocated
8775 */
8776 ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8777 TASK_DAEMON_UNLOCK(ha);
8778 ql_isp_rcvbuf(ha);
8779 TASK_DAEMON_LOCK(ha);
8780 loop_again = B_TRUE;
8781 }
8782
8783 if (ha->task_daemon_flags & WATCHDOG_NEEDED) {
8784 ha->task_daemon_flags &= ~WATCHDOG_NEEDED;
8785 TASK_DAEMON_UNLOCK(ha);
8786 ql_watchdog(ha);
8787 TASK_DAEMON_LOCK(ha);
8788 loop_again = B_TRUE;
8789 }
8790
8791 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8792 TASK_DAEMON_UNLOCK(ha);
8793 (void) ql_abort_isp(ha);
8794 TASK_DAEMON_LOCK(ha);
8795 loop_again = B_TRUE;
8796 }
8797
8798 if (!(ha->task_daemon_flags & (COMMAND_WAIT_NEEDED |
8799 ABORT_QUEUES_NEEDED | ISP_ABORT_NEEDED | LOOP_DOWN)) &&
8800 ha->task_daemon_flags & FIRMWARE_UP) {
8801 if (ha->task_daemon_flags & MARKER_NEEDED) {
8802 if (!(ha->task_daemon_flags & MARKER_ACTIVE)) {
8803 ha->task_daemon_flags |= MARKER_ACTIVE;
8804 ha->task_daemon_flags &= ~MARKER_NEEDED;
8805 TASK_DAEMON_UNLOCK(ha);
8806 for (vha = ha; vha != NULL;
8807 vha = vha->vp_next) {
8808 (void) ql_marker(vha, 0, 0,
8809 MK_SYNC_ALL);
8810 }
8811 TASK_DAEMON_LOCK(ha);
8812 ha->task_daemon_flags &= ~MARKER_ACTIVE;
8813 TASK_DAEMON_UNLOCK(ha);
8814 ql_restart_queues(ha);
8815 TASK_DAEMON_LOCK(ha);
8816 loop_again = B_TRUE;
8817 } else {
8818 ha->task_daemon_flags &= ~MARKER_NEEDED;
8819 }
8820 }
8821
8822 if (ha->task_daemon_flags & LOOP_RESYNC_NEEDED) {
8823 if (!(ha->task_daemon_flags &
8824 LOOP_RESYNC_ACTIVE)) {
8825 ha->task_daemon_flags |=
8826 LOOP_RESYNC_ACTIVE;
8827 TASK_DAEMON_UNLOCK(ha);
8828 ql_loop_resync(ha);
8829 TASK_DAEMON_LOCK(ha);
8830 loop_again = B_TRUE;
8831 }
8832 }
8833 }
8834
8835 /* Port retry needed. */
8836 if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8837 ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8838 ADAPTER_STATE_LOCK(ha);
8839 ha->port_retry_timer = 0;
8840 ADAPTER_STATE_UNLOCK(ha);
8841
8842 TASK_DAEMON_UNLOCK(ha);
8843 ql_restart_queues(ha);
8844 TASK_DAEMON_LOCK(ha);
8845 loop_again = B_TRUE;
8846 }
8847
8848 /* iiDMA setting needed? */
8849 if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8850 ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8851 TASK_DAEMON_UNLOCK(ha);
8852 ql_iidma(ha);
8853 TASK_DAEMON_LOCK(ha);
8854 loop_again = B_TRUE;
8855 }
8856
8857 if (ha->task_daemon_flags & SEND_PLOGI) {
8858 ha->task_daemon_flags &= ~SEND_PLOGI;
8859 TASK_DAEMON_UNLOCK(ha);
8860 (void) ql_n_port_plogi(ha);
8861 TASK_DAEMON_LOCK(ha);
8862 loop_again = B_TRUE;
8863 }
8864
8865 if (ha->unsol_callback_queue.first != NULL) {
8866 sp = (ha->unsol_callback_queue.first)->base_address;
8867 link = &sp->cmd;
8868 ql_remove_link(&ha->unsol_callback_queue, link);
8869 TASK_DAEMON_UNLOCK(ha);
8870 ql_unsol_callback(sp);
8871 TASK_DAEMON_LOCK(ha);
8872 loop_again = B_TRUE;
8873 }
8874
8875 if (ha->task_daemon_flags & IDC_POLL_NEEDED) {
8876 ha->task_daemon_flags &= ~IDC_POLL_NEEDED;
8877 TASK_DAEMON_UNLOCK(ha);
8878 ql_8021_idc_poll(ha);
8879 TASK_DAEMON_LOCK(ha);
8880 loop_again = B_TRUE;
8881 }
8882
8883 if (ha->task_daemon_flags & LED_BLINK) {
8884 ha->task_daemon_flags &= ~LED_BLINK;
8885 TASK_DAEMON_UNLOCK(ha);
8886 ql_blink_led(ha);
8887 TASK_DAEMON_LOCK(ha);
8888 loop_again = B_TRUE;
8889 }
8890
8891 } while (loop_again == B_TRUE);
8892
8893 if (ha->driver_thread_awake) {
8894 ha->driver_thread_awake--;
8895 }
8896 QL_PRINT_3(ha, "done\n");
8897 }
8898
8899 /*
8900 * ql_idle_check
8901 * Test for adapter is alive and well.
8902 *
8903 * Input:
8904 * ha: adapter state pointer.
8905 *
8906 * Context:
8907 * Kernel context.
8908 */
8909 static void
8910 ql_idle_check(ql_adapter_state_t *ha)
8911 {
8912 int rval;
8913 ql_mbx_data_t mr;
8914
8915 QL_PRINT_3(ha, "started\n");
8916
8917 /* Firmware Ready Test. */
8918 rval = ql_get_firmware_state(ha, &mr);
8919 if (!DRIVER_SUSPENDED(ha) &&
8920 (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8921 EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8922 TASK_DAEMON_LOCK(ha);
8923 if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8924 EL(ha, "fstate_ready, isp_abort_needed\n");
8925 ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8926 }
8927 TASK_DAEMON_UNLOCK(ha);
8928 }
8929
8930 QL_PRINT_3(ha, "done\n");
8931 }
8932
8933 /*
8934 * ql_unsol_callback
8935 * Handle unsolicited buffer callbacks.
8936 *
8937 * Input:
8938 * ha = adapter state pointer.
8939 * sp = srb pointer.
8940 *
8941 * Context:
8942 * Kernel context.
8943 */
8944 static void
8945 ql_unsol_callback(ql_srb_t *sp)
8946 {
8947 fc_affected_id_t *af;
8948 fc_unsol_buf_t *ubp;
8949 uchar_t r_ctl;
8950 uchar_t ls_code;
8951 ql_tgt_t *tq;
8952 ql_adapter_state_t *ha = sp->ha, *pha = sp->ha->pha;
8953
8954 QL_PRINT_3(ha, "started\n");
8955
8956 ubp = ha->ub_array[sp->handle];
8957 r_ctl = ubp->ub_frame.r_ctl;
8958 ls_code = ubp->ub_buffer[0];
8959
8960 if (sp->lun_queue == NULL) {
8961 tq = NULL;
8962 } else {
8963 tq = sp->lun_queue->target_queue;
8964 }
8965
8966 QL_UB_LOCK(ha);
8967 if (sp->flags & SRB_UB_FREE_REQUESTED ||
8968 pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8969 sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8970 SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8971 sp->flags |= SRB_UB_IN_FCA;
8972 QL_UB_UNLOCK(ha);
8973 return;
8974 }
8975
8976 /* Process RSCN */
8977 if (sp->flags & SRB_UB_RSCN) {
8978 int sendup;
8979
8980 /*
8981 * Defer RSCN posting until commands return
8982 */
8983 QL_UB_UNLOCK(ha);
8984
8985 af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8986
8987 /* Abort outstanding commands */
8988 sendup = ql_process_rscn(ha, af);
8989 if (sendup == 0) {
8990
8991 TASK_DAEMON_LOCK(ha);
8992 ql_add_link_b(&pha->unsol_callback_queue, &sp->cmd);
8993 TASK_DAEMON_UNLOCK(ha);
8994
8995 /*
8996 * Wait for commands to drain in F/W (doesn't take
8997 * more than a few milliseconds)
8998 */
8999 ql_delay(ha, 10000);
9000
9001 QL_PRINT_2(ha, "done rscn_sendup=0, "
9002 "fmt=%xh, d_id=%xh\n",
9003 af->aff_format, af->aff_d_id);
9004 return;
9005 }
9006
9007 QL_UB_LOCK(ha);
9008
9009 EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
9010 af->aff_format, af->aff_d_id);
9011 }
9012
9013 /* Process UNSOL LOGO */
9014 if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
9015 QL_UB_UNLOCK(ha);
9016
9017 if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
9018 TASK_DAEMON_LOCK(ha);
9019 ql_add_link_b(&pha->unsol_callback_queue, &sp->cmd);
9020 TASK_DAEMON_UNLOCK(ha);
9021 QL_PRINT_2(ha, "logo_sendup=0, d_id=%xh"
9022 "\n", tq->d_id.b24);
9023 return;
9024 }
9025
9026 QL_UB_LOCK(ha);
9027 EL(ha, "sending unsol logout for %xh to transport\n",
9028 ubp->ub_frame.s_id);
9029 }
9030
9031 if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_PLOGI)) {
9032 EL(ha, "sending unsol plogi for %xh to transport\n",
9033 ubp->ub_frame.s_id);
9034 }
9035
9036 sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
9037 SRB_UB_FCP);
9038
9039 if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9040 (void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
9041 ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
9042 }
9043 QL_UB_UNLOCK(ha);
9044
9045 (ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
9046 ubp, sp->ub_type);
9047
9048 QL_PRINT_3(ha, "done\n");
9049 }
9050
9051 /*
9052 * ql_send_logo
9053 *
9054 * Input:
9055 * ha: adapter state pointer.
9056 * tq: target queue pointer.
9057 * done_q: done queue pointer.
9058 *
9059 * Context:
9060 * Interrupt or Kernel context, no mailbox commands allowed.
9061 */
9062 void
9063 ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
9064 {
9065 fc_unsol_buf_t *ubp;
9066 ql_srb_t *sp;
9067 la_els_logo_t *payload;
9068 ql_adapter_state_t *ha = vha->pha;
9069
9070 QL_PRINT_3(ha, "started, d_id=%xh\n", tq->d_id.b24);
9071
9072 if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == FS_BROADCAST)) {
9073 EL(ha, "no device, d_id=%xh\n", tq->d_id.b24);
9074 return;
9075 }
9076
9077 if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
9078 tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
9079
9080 /* Locate a buffer to use. */
9081 ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
9082 if (ubp == NULL) {
9083 EL(vha, "Failed, get_unsolicited_buffer\n");
9084 return;
9085 }
9086
9087 DEVICE_QUEUE_LOCK(tq);
9088 tq->flags |= TQF_NEED_AUTHENTICATION;
9089 tq->logout_sent++;
9090 DEVICE_QUEUE_UNLOCK(tq);
9091
9092 sp = ubp->ub_fca_private;
9093
9094 /* Set header. */
9095 ubp->ub_frame.d_id = vha->d_id.b24;
9096 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
9097 ubp->ub_frame.s_id = tq->d_id.b24;
9098 ubp->ub_frame.rsvd = 0;
9099 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
9100 F_CTL_SEQ_INITIATIVE;
9101 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
9102 ubp->ub_frame.seq_cnt = 0;
9103 ubp->ub_frame.df_ctl = 0;
9104 ubp->ub_frame.seq_id = 0;
9105 ubp->ub_frame.rx_id = 0xffff;
9106 ubp->ub_frame.ox_id = 0xffff;
9107
9108 /* set payload. */
9109 payload = (la_els_logo_t *)ubp->ub_buffer;
9110 bzero(payload, sizeof (la_els_logo_t));
9111 /* Make sure ls_code in payload is always big endian */
9112 ubp->ub_buffer[0] = LA_ELS_LOGO;
9113 ubp->ub_buffer[1] = 0;
9114 ubp->ub_buffer[2] = 0;
9115 ubp->ub_buffer[3] = 0;
9116 bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
9117 &payload->nport_ww_name.raw_wwn[0], 8);
9118 payload->nport_id.port_id = tq->d_id.b24;
9119
9120 QL_UB_LOCK(ha);
9121 sp->flags |= SRB_UB_CALLBACK;
9122 QL_UB_UNLOCK(ha);
9123 if (tq->lun_queues.first != NULL) {
9124 sp->lun_queue = (tq->lun_queues.first)->base_address;
9125 } else {
9126 sp->lun_queue = ql_lun_queue(vha, tq, 0);
9127 }
9128 if (done_q) {
9129 ql_add_link_b(done_q, &sp->cmd);
9130 } else {
9131 ql_awaken_task_daemon(ha, sp, 0, 0);
9132 }
9133 }
9134
9135 QL_PRINT_3(ha, "done\n");
9136 }
9137
9138 static int
9139 ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9140 {
9141 port_id_t d_id;
9142 ql_srb_t *sp;
9143 ql_link_t *link;
9144 int sendup = 1;
9145
9146 QL_PRINT_3(ha, "started\n");
9147
9148 DEVICE_QUEUE_LOCK(tq);
9149 if (tq->outcnt) {
9150 DEVICE_QUEUE_UNLOCK(tq);
9151 sendup = 0;
9152 (void) ql_abort_device(ha, tq, 1);
9153 ql_delay(ha, 10000);
9154 } else {
9155 DEVICE_QUEUE_UNLOCK(tq);
9156 TASK_DAEMON_LOCK(ha);
9157
9158 for (link = ha->pha->unsol_callback_queue.first; link != NULL;
9159 link = link->next) {
9160 sp = link->base_address;
9161 if (sp->flags & SRB_UB_CALLBACK) {
9162 continue;
9163 }
9164 d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
9165
9166 if (tq->d_id.b24 == d_id.b24) {
9167 sendup = 0;
9168 break;
9169 }
9170 }
9171
9172 TASK_DAEMON_UNLOCK(ha);
9173 }
9174
9175 QL_PRINT_3(ha, "done\n");
9176
9177 return (sendup);
9178 }
9179
9180 static int
9181 ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
9182 {
9183 fc_unsol_buf_t *ubp;
9184 ql_srb_t *sp;
9185 la_els_logi_t *payload;
9186 class_svc_param_t *class3_param;
9187
9188 QL_PRINT_3(ha, "started\n");
9189
9190 if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
9191 LOOP_DOWN)) {
9192 EL(ha, "Failed, tqf=%xh\n", tq->flags);
9193 return (QL_FUNCTION_FAILED);
9194 }
9195
9196 /* Locate a buffer to use. */
9197 ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
9198 if (ubp == NULL) {
9199 EL(ha, "Failed\n");
9200 return (QL_FUNCTION_FAILED);
9201 }
9202
9203 QL_PRINT_3(ha, "Received LOGO from = %xh\n", tq->d_id.b24);
9204
9205 EL(ha, "Emulate PLOGI from = %xh tq = %x\n", tq->d_id.b24, tq);
9206
9207 sp = ubp->ub_fca_private;
9208
9209 /* Set header. */
9210 ubp->ub_frame.d_id = ha->d_id.b24;
9211 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
9212 ubp->ub_frame.s_id = tq->d_id.b24;
9213 ubp->ub_frame.rsvd = 0;
9214 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
9215 F_CTL_SEQ_INITIATIVE;
9216 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
9217 ubp->ub_frame.seq_cnt = 0;
9218 ubp->ub_frame.df_ctl = 0;
9219 ubp->ub_frame.seq_id = 0;
9220 ubp->ub_frame.rx_id = 0xffff;
9221 ubp->ub_frame.ox_id = 0xffff;
9222
9223 /* set payload. */
9224 payload = (la_els_logi_t *)ubp->ub_buffer;
9225 bzero(payload, sizeof (la_els_logi_t));
9226
9227 payload->ls_code.ls_code = LA_ELS_PLOGI;
9228 payload->common_service.fcph_version = 0x2006;
9229 payload->common_service.cmn_features =
9230 ha->topology & QL_N_PORT ? 0x8000 : 0x8800;
9231 payload->common_service.rx_bufsize =
9232 ha->loginparams.common_service.rx_bufsize;
9233 payload->common_service.conc_sequences = 0xff;
9234 payload->common_service.relative_offset = 0x03;
9235 payload->common_service.e_d_tov = 0x7d0;
9236
9237 bcopy((void *)&tq->port_name[0],
9238 (void *)&payload->nport_ww_name.raw_wwn[0], 8);
9239
9240 bcopy((void *)&tq->node_name[0],
9241 (void *)&payload->node_ww_name.raw_wwn[0], 8);
9242
9243 class3_param = (class_svc_param_t *)&payload->class_3;
9244 class3_param->class_valid_svc_opt = 0x8000;
9245 class3_param->recipient_ctl = tq->class3_recipient_ctl;
9246 class3_param->rcv_data_size = tq->class3_rcv_data_size;
9247 class3_param->conc_sequences = tq->class3_conc_sequences;
9248 class3_param->open_sequences_per_exch =
9249 tq->class3_open_sequences_per_exch;
9250
9251 QL_UB_LOCK(ha);
9252 sp->flags |= SRB_UB_CALLBACK;
9253 QL_UB_UNLOCK(ha);
9254
9255 if (done_q) {
9256 ql_add_link_b(done_q, &sp->cmd);
9257 } else {
9258 ql_awaken_task_daemon(ha, sp, 0, 0);
9259 }
9260
9261 QL_PRINT_3(ha, "done\n");
9262
9263 return (QL_SUCCESS);
9264 }
9265
9266 /*
9267 * Abort outstanding commands in the Firmware, clear internally
9268 * queued commands in the driver, Synchronize the target with
9269 * the Firmware
9270 */
9271 int
9272 ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
9273 {
9274 ql_link_t *link, *link2;
9275 ql_lun_t *lq;
9276 int rval = QL_SUCCESS;
9277 ql_srb_t *sp;
9278 ql_head_t done_q = { NULL, NULL };
9279
9280 QL_PRINT_10(ha, "started\n");
9281
9282 /*
9283 * First clear, internally queued commands
9284 */
9285 DEVICE_QUEUE_LOCK(tq);
9286 for (link = tq->lun_queues.first; link != NULL; link = link->next) {
9287 lq = link->base_address;
9288
9289 link2 = lq->cmd.first;
9290 while (link2 != NULL) {
9291 sp = link2->base_address;
9292 link2 = link2->next;
9293
9294 /* Remove srb from device command queue. */
9295 ql_remove_link(&lq->cmd, &sp->cmd);
9296 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
9297
9298 /* Set ending status. */
9299 sp->pkt->pkt_reason = CS_ABORTED;
9300
9301 /* Call done routine to handle completions. */
9302 ql_add_link_b(&done_q, &sp->cmd);
9303 }
9304 }
9305 DEVICE_QUEUE_UNLOCK(tq);
9306
9307 if (done_q.first != NULL) {
9308 ql_done(done_q.first, B_FALSE);
9309 }
9310
9311 if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
9312 rval = ql_abort_target(ha, tq, 0);
9313 }
9314
9315 if (rval != QL_SUCCESS) {
9316 EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
9317 } else {
9318 /*EMPTY*/
9319 QL_PRINT_10(ha, "done\n");
9320 }
9321
9322 return (rval);
9323 }
9324
9325 /*
9326 * ql_rcv_rscn_els
9327 * Processes received RSCN extended link service.
9328 *
9329 * Input:
9330 * ha: adapter state pointer.
9331 * mb: array containing input mailbox registers.
9332 * done_q: done queue pointer.
9333 *
9334 * Context:
9335 * Interrupt or Kernel context, no mailbox commands allowed.
9336 */
9337 void
9338 ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
9339 {
9340 fc_unsol_buf_t *ubp;
9341 ql_srb_t *sp;
9342 fc_rscn_t *rn;
9343 fc_affected_id_t *af;
9344 port_id_t d_id;
9345
9346 QL_PRINT_3(ha, "started\n");
9347
9348 /* Locate a buffer to use. */
9349 ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
9350 if (ubp != NULL) {
9351 sp = ubp->ub_fca_private;
9352
9353 /* Set header. */
9354 ubp->ub_frame.d_id = ha->d_id.b24;
9355 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
9356 ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
9357 ubp->ub_frame.rsvd = 0;
9358 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
9359 F_CTL_SEQ_INITIATIVE;
9360 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
9361 ubp->ub_frame.seq_cnt = 0;
9362 ubp->ub_frame.df_ctl = 0;
9363 ubp->ub_frame.seq_id = 0;
9364 ubp->ub_frame.rx_id = 0xffff;
9365 ubp->ub_frame.ox_id = 0xffff;
9366
9367 /* set payload. */
9368 rn = (fc_rscn_t *)ubp->ub_buffer;
9369 af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
9370
9371 rn->rscn_code = LA_ELS_RSCN;
9372 rn->rscn_len = 4;
9373 rn->rscn_payload_len = 8;
9374 d_id.b.al_pa = LSB(mb[2]);
9375 d_id.b.area = MSB(mb[2]);
9376 d_id.b.domain = LSB(mb[1]);
9377 af->aff_d_id = d_id.b24;
9378 af->aff_format = MSB(mb[1]);
9379
9380 EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
9381 af->aff_d_id);
9382
9383 ql_update_rscn(ha, af);
9384
9385 QL_UB_LOCK(ha);
9386 sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
9387 QL_UB_UNLOCK(ha);
9388 ql_add_link_b(done_q, &sp->cmd);
9389 }
9390
9391 if (ubp == NULL) {
9392 EL(ha, "Failed, get_unsolicited_buffer\n");
9393 } else {
9394 /*EMPTY*/
9395 QL_PRINT_3(ha, "done\n");
9396 }
9397 }
9398
9399 /*
9400 * ql_update_rscn
9401 * Update devices from received RSCN.
9402 *
9403 * Input:
9404 * ha: adapter state pointer.
9405 * af: pointer to RSCN data.
9406 *
9407 * Context:
9408 * Interrupt or Kernel context, no mailbox commands allowed.
9409 */
9410 static void
9411 ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9412 {
9413 ql_link_t *link;
9414 uint16_t index;
9415 ql_tgt_t *tq;
9416
9417 QL_PRINT_3(ha, "started\n");
9418
9419 if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9420 port_id_t d_id;
9421
9422 d_id.r.rsvd_1 = 0;
9423 d_id.b24 = af->aff_d_id;
9424
9425 tq = ql_d_id_to_queue(ha, d_id);
9426 if (tq) {
9427 EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
9428 DEVICE_QUEUE_LOCK(tq);
9429 tq->flags |= TQF_RSCN_RCVD;
9430 ql_requeue_pending_cmds(ha, tq);
9431 DEVICE_QUEUE_UNLOCK(tq);
9432 }
9433 QL_PRINT_3(ha, "FC_RSCN_PORT_ADDRESS done\n");
9434
9435 return;
9436 }
9437
9438 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9439 for (link = ha->dev[index].first; link != NULL;
9440 link = link->next) {
9441 tq = link->base_address;
9442
9443 switch (af->aff_format) {
9444 case FC_RSCN_FABRIC_ADDRESS:
9445 if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9446 EL(ha, "SD_RSCN_RCVD %xh RFA\n",
9447 tq->d_id.b24);
9448 DEVICE_QUEUE_LOCK(tq);
9449 tq->flags |= TQF_RSCN_RCVD;
9450 ql_requeue_pending_cmds(ha, tq);
9451 DEVICE_QUEUE_UNLOCK(tq);
9452 }
9453 break;
9454
9455 case FC_RSCN_AREA_ADDRESS:
9456 if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
9457 EL(ha, "SD_RSCN_RCVD %xh RAA\n",
9458 tq->d_id.b24);
9459 DEVICE_QUEUE_LOCK(tq);
9460 tq->flags |= TQF_RSCN_RCVD;
9461 ql_requeue_pending_cmds(ha, tq);
9462 DEVICE_QUEUE_UNLOCK(tq);
9463 }
9464 break;
9465
9466 case FC_RSCN_DOMAIN_ADDRESS:
9467 if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
9468 EL(ha, "SD_RSCN_RCVD %xh RDA\n",
9469 tq->d_id.b24);
9470 DEVICE_QUEUE_LOCK(tq);
9471 tq->flags |= TQF_RSCN_RCVD;
9472 ql_requeue_pending_cmds(ha, tq);
9473 DEVICE_QUEUE_UNLOCK(tq);
9474 }
9475 break;
9476
9477 default:
9478 break;
9479 }
9480 }
9481 }
9482 QL_PRINT_3(ha, "done\n");
9483 }
9484
9485 /*
9486 * ql_requeue_pending_cmds
9487 * Requeue target commands from pending queue to LUN queue
9488 *
9489 * Input:
9490 * ha: adapter state pointer.
9491 * tq: target queue pointer.
9492 * DEVICE_QUEUE_LOCK must be already obtained.
9493 *
9494 * Context:
9495 * Interrupt or Kernel context, no mailbox commands allowed.
9496 */
9497 void
9498 ql_requeue_pending_cmds(ql_adapter_state_t *vha, ql_tgt_t *tq)
9499 {
9500 ql_link_t *link;
9501 ql_srb_t *sp;
9502 ql_lun_t *lq;
9503 ql_adapter_state_t *ha = vha->pha;
9504
9505 QL_PRINT_3(ha, "started\n");
9506
9507 REQUEST_RING_LOCK(ha);
9508 for (link = ha->pending_cmds.first; link != NULL; link = link->next) {
9509 sp = link->base_address;
9510 if ((lq = sp->lun_queue) == NULL || lq->target_queue != tq) {
9511 continue;
9512 }
9513 ql_remove_link(&ha->pending_cmds, &sp->cmd);
9514
9515 if (tq->outcnt) {
9516 tq->outcnt--;
9517 }
9518 if (sp->flags & SRB_FCP_CMD_PKT) {
9519 if (sp->fcp->fcp_cntl.cntl_qtype ==
9520 FCP_QTYPE_UNTAGGED) {
9521 lq->flags &= ~LQF_UNTAGGED_PENDING;
9522 }
9523 if (lq->lun_outcnt != 0) {
9524 lq->lun_outcnt--;
9525 }
9526 }
9527 ql_add_link_t(&lq->cmd, &sp->cmd);
9528 sp->flags |= SRB_IN_DEVICE_QUEUE;
9529 }
9530 REQUEST_RING_UNLOCK(ha);
9531
9532 QL_PRINT_3(ha, "done\n");
9533 }
9534
9535 /*
9536 * ql_process_rscn
9537 *
9538 * Input:
9539 * ha: adapter state pointer.
9540 * af: RSCN payload pointer.
9541 *
9542 * Context:
9543 * Kernel context.
9544 */
9545 static int
9546 ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9547 {
9548 int sendit;
9549 int sendup = 1;
9550 ql_link_t *link;
9551 uint16_t index;
9552 ql_tgt_t *tq;
9553
9554 QL_PRINT_3(ha, "started\n");
9555
9556 if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9557 port_id_t d_id;
9558
9559 d_id.r.rsvd_1 = 0;
9560 d_id.b24 = af->aff_d_id;
9561
9562 tq = ql_d_id_to_queue(ha, d_id);
9563 if (tq) {
9564 sendup = ql_process_rscn_for_device(ha, tq);
9565 }
9566
9567 QL_PRINT_3(ha, "done\n");
9568
9569 return (sendup);
9570 }
9571
9572 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9573 for (link = ha->dev[index].first; link != NULL;
9574 link = link->next) {
9575
9576 tq = link->base_address;
9577 if (tq == NULL) {
9578 continue;
9579 }
9580
9581 switch (af->aff_format) {
9582 case FC_RSCN_FABRIC_ADDRESS:
9583 if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9584 sendit = ql_process_rscn_for_device(
9585 ha, tq);
9586 if (sendup) {
9587 sendup = sendit;
9588 }
9589 }
9590 break;
9591
9592 case FC_RSCN_AREA_ADDRESS:
9593 if ((tq->d_id.b24 & 0xffff00) ==
9594 af->aff_d_id) {
9595 sendit = ql_process_rscn_for_device(
9596 ha, tq);
9597
9598 if (sendup) {
9599 sendup = sendit;
9600 }
9601 }
9602 break;
9603
9604 case FC_RSCN_DOMAIN_ADDRESS:
9605 if ((tq->d_id.b24 & 0xff0000) ==
9606 af->aff_d_id) {
9607 sendit = ql_process_rscn_for_device(
9608 ha, tq);
9609
9610 if (sendup) {
9611 sendup = sendit;
9612 }
9613 }
9614 break;
9615
9616 default:
9617 break;
9618 }
9619 }
9620 }
9621
9622 QL_PRINT_3(ha, "done\n");
9623
9624 return (sendup);
9625 }
9626
9627 /*
9628 * ql_process_rscn_for_device
9629 *
9630 * Input:
9631 * ha: adapter state pointer.
9632 * tq: target queue pointer.
9633 *
9634 * Context:
9635 * Kernel context.
9636 */
9637 static int
9638 ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9639 {
9640 int sendup = 1;
9641
9642 QL_PRINT_3(ha, "started\n");
9643
9644 DEVICE_QUEUE_LOCK(tq);
9645
9646 /*
9647 * Let FCP-2 compliant devices continue I/Os
9648 * with their low level recoveries.
9649 */
9650 if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
9651 (tq->prli_svc_param_word_3 & PRLI_W3_RETRY)) {
9652 /*
9653 * Cause ADISC to go out
9654 */
9655 DEVICE_QUEUE_UNLOCK(tq);
9656
9657 (void) ql_get_port_database(ha, tq, PDF_NONE);
9658
9659 DEVICE_QUEUE_LOCK(tq);
9660 tq->flags &= ~TQF_RSCN_RCVD;
9661
9662 } else if (tq->loop_id != PORT_NO_LOOP_ID) {
9663 if (tq->d_id.b24 != BROADCAST_ADDR) {
9664 tq->flags |= TQF_NEED_AUTHENTICATION;
9665 }
9666
9667 DEVICE_QUEUE_UNLOCK(tq);
9668
9669 (void) ql_abort_device(ha, tq, 1);
9670
9671 DEVICE_QUEUE_LOCK(tq);
9672
9673 if (tq->outcnt) {
9674 EL(ha, "busy tq->outcnt=%d\n", tq->outcnt);
9675 sendup = 0;
9676 } else {
9677 tq->flags &= ~TQF_RSCN_RCVD;
9678 }
9679 } else {
9680 tq->flags &= ~TQF_RSCN_RCVD;
9681 }
9682
9683 if (sendup) {
9684 if (tq->d_id.b24 != BROADCAST_ADDR) {
9685 tq->flags |= TQF_NEED_AUTHENTICATION;
9686 }
9687 }
9688
9689 DEVICE_QUEUE_UNLOCK(tq);
9690
9691 QL_PRINT_3(ha, "done\n");
9692
9693 return (sendup);
9694 }
9695
9696 static int
9697 ql_handle_rscn_update(ql_adapter_state_t *ha)
9698 {
9699 int rval;
9700 ql_tgt_t *tq;
9701 uint16_t index, loop_id;
9702 ql_dev_id_list_t *list;
9703 uint32_t list_size;
9704 port_id_t d_id;
9705 ql_mbx_data_t mr;
9706 ql_head_t done_q = { NULL, NULL };
9707
9708 QL_PRINT_3(ha, "started\n");
9709
9710 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
9711 list = kmem_zalloc(list_size, KM_SLEEP);
9712 if (list == NULL) {
9713 rval = QL_MEMORY_ALLOC_FAILED;
9714 EL(ha, "kmem_zalloc failed=%xh\n", rval);
9715 return (rval);
9716 }
9717
9718 /*
9719 * Get data from RISC code d_id list to init each device queue.
9720 */
9721 rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
9722 if (rval != QL_SUCCESS) {
9723 kmem_free(list, list_size);
9724 EL(ha, "get_id_list failed=%xh\n", rval);
9725 return (rval);
9726 }
9727
9728 /* Acquire adapter state lock. */
9729 ADAPTER_STATE_LOCK(ha);
9730
9731 /* Check for new devices */
9732 for (index = 0; index < mr.mb[1]; index++) {
9733 ql_dev_list(ha, list, index, &d_id, &loop_id);
9734
9735 if (VALID_DEVICE_ID(ha, loop_id)) {
9736 d_id.r.rsvd_1 = 0;
9737
9738 tq = ql_d_id_to_queue(ha, d_id);
9739 if (tq != NULL) {
9740 continue;
9741 }
9742
9743 tq = ql_dev_init(ha, d_id, loop_id);
9744
9745 /* Test for fabric device. */
9746 if (ha->topology & QL_F_PORT ||
9747 d_id.b.domain != ha->d_id.b.domain ||
9748 d_id.b.area != ha->d_id.b.area) {
9749 tq->flags |= TQF_FABRIC_DEVICE;
9750 }
9751
9752 ADAPTER_STATE_UNLOCK(ha);
9753 if (ql_get_port_database(ha, tq, PDF_NONE) !=
9754 QL_SUCCESS) {
9755 tq->loop_id = PORT_NO_LOOP_ID;
9756 }
9757 ADAPTER_STATE_LOCK(ha);
9758
9759 /*
9760 * Send up a PLOGI about the new device
9761 */
9762 if (VALID_DEVICE_ID(ha, tq->loop_id)) {
9763 (void) ql_send_plogi(ha, tq, &done_q);
9764 }
9765 }
9766 }
9767
9768 /* Release adapter state lock. */
9769 ADAPTER_STATE_UNLOCK(ha);
9770
9771 if (done_q.first != NULL) {
9772 ql_done(done_q.first, B_FALSE);
9773 }
9774
9775 kmem_free(list, list_size);
9776
9777 if (rval != QL_SUCCESS) {
9778 EL(ha, "failed=%xh\n", rval);
9779 } else {
9780 /*EMPTY*/
9781 QL_PRINT_3(ha, "done\n");
9782 }
9783
9784 return (rval);
9785 }
9786
9787 /*
9788 * ql_free_unsolicited_buffer
9789 * Frees allocated buffer.
9790 *
9791 * Input:
9792 * ha = adapter state pointer.
9793 * index = buffer array index.
9794 * ADAPTER_STATE_LOCK must be already obtained.
9795 *
9796 * Context:
9797 * Kernel context.
9798 */
9799 static void
9800 ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9801 {
9802 ql_srb_t *sp;
9803 int status;
9804
9805 QL_PRINT_3(ha, "started\n");
9806
9807 sp = ubp->ub_fca_private;
9808 if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9809 /* Disconnect IP from system buffers. */
9810 if (ha->flags & IP_INITIALIZED) {
9811 status = ql_shutdown_ip(ha);
9812 if (status != QL_SUCCESS) {
9813 cmn_err(CE_WARN,
9814 "!Qlogic %s(%d): Failed to shutdown IP",
9815 QL_NAME, ha->instance);
9816 return;
9817 }
9818
9819 ha->flags &= ~IP_ENABLED;
9820 }
9821
9822 ql_free_phys(ha, &sp->ub_buffer);
9823 } else {
9824 kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9825 }
9826
9827 kmem_free(sp, sizeof (ql_srb_t));
9828 kmem_free(ubp, sizeof (fc_unsol_buf_t));
9829
9830 QL_UB_LOCK(ha);
9831 if (ha->ub_allocated != 0) {
9832 ha->ub_allocated--;
9833 }
9834 QL_UB_UNLOCK(ha);
9835
9836 QL_PRINT_3(ha, "done\n");
9837 }
9838
9839 /*
9840 * ql_get_unsolicited_buffer
9841 * Locates a free unsolicited buffer.
9842 *
9843 * Input:
9844 * ha = adapter state pointer.
9845 * type = buffer type.
9846 *
9847 * Returns:
9848 * Unsolicited buffer pointer.
9849 *
9850 * Context:
9851 * Interrupt or Kernel context, no mailbox commands allowed.
9852 */
9853 fc_unsol_buf_t *
9854 ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9855 {
9856 fc_unsol_buf_t *ubp;
9857 ql_srb_t *sp;
9858 uint16_t index;
9859
9860 QL_PRINT_3(ha, "started\n");
9861
9862 /* Locate a buffer to use. */
9863 ubp = NULL;
9864
9865 QL_UB_LOCK(ha);
9866 for (index = 0; index < QL_UB_LIMIT; index++) {
9867 ubp = ha->ub_array[index];
9868 if (ubp != NULL) {
9869 sp = ubp->ub_fca_private;
9870 if ((sp->ub_type == type) &&
9871 (sp->flags & SRB_UB_IN_FCA) &&
9872 (!(sp->flags & (SRB_UB_CALLBACK |
9873 SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9874 sp->flags |= SRB_UB_ACQUIRED;
9875 ubp->ub_resp_flags = 0;
9876 break;
9877 }
9878 ubp = NULL;
9879 }
9880 }
9881 QL_UB_UNLOCK(ha);
9882
9883 if (ubp) {
9884 ubp->ub_resp_token = NULL;
9885 ubp->ub_class = FC_TRAN_CLASS3;
9886 }
9887
9888 QL_PRINT_3(ha, "done\n");
9889
9890 return (ubp);
9891 }
9892
9893 /*
9894 * ql_ub_frame_hdr
9895 * Processes received unsolicited buffers from ISP.
9896 *
9897 * Input:
9898 * ha: adapter state pointer.
9899 * tq: target queue pointer.
9900 * index: unsolicited buffer array index.
9901 * done_q: done queue pointer.
9902 *
9903 * Returns:
9904 * ql local function return status code.
9905 *
9906 * Context:
9907 * Interrupt or Kernel context, no mailbox commands allowed.
9908 */
9909 int
9910 ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9911 ql_head_t *done_q)
9912 {
9913 fc_unsol_buf_t *ubp;
9914 ql_srb_t *sp;
9915 uint16_t loop_id;
9916 int rval = QL_FUNCTION_FAILED;
9917
9918 QL_PRINT_3(ha, "started\n");
9919
9920 QL_UB_LOCK(ha);
9921 if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9922 EL(ha, "Invalid buffer index=%xh\n", index);
9923 QL_UB_UNLOCK(ha);
9924 return (rval);
9925 }
9926
9927 sp = ubp->ub_fca_private;
9928 if (sp->flags & SRB_UB_FREE_REQUESTED) {
9929 EL(ha, "buffer freed index=%xh\n", index);
9930 sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9931 SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9932
9933 sp->flags |= SRB_UB_IN_FCA;
9934
9935 QL_UB_UNLOCK(ha);
9936 return (rval);
9937 }
9938
9939 if ((sp->handle == index) &&
9940 (sp->flags & SRB_UB_IN_ISP) &&
9941 (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9942 (!(sp->flags & SRB_UB_ACQUIRED))) {
9943 /* set broadcast D_ID */
9944 loop_id = (uint16_t)(CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
9945 BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9946 if (tq->ub_loop_id == loop_id) {
9947 if (ha->topology & QL_FL_PORT) {
9948 ubp->ub_frame.d_id = 0x000000;
9949 } else {
9950 ubp->ub_frame.d_id = FS_BROADCAST;
9951 }
9952 } else {
9953 ubp->ub_frame.d_id = ha->d_id.b24;
9954 }
9955 ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9956 ubp->ub_frame.rsvd = 0;
9957 ubp->ub_frame.s_id = tq->d_id.b24;
9958 ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9959 ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9960 ubp->ub_frame.df_ctl = 0;
9961 ubp->ub_frame.seq_id = tq->ub_seq_id;
9962 ubp->ub_frame.rx_id = 0xffff;
9963 ubp->ub_frame.ox_id = 0xffff;
9964 ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9965 sp->ub_size : tq->ub_sequence_length;
9966 ubp->ub_frame.ro = tq->ub_frame_ro;
9967
9968 tq->ub_sequence_length = (uint16_t)
9969 (tq->ub_sequence_length - ubp->ub_bufsize);
9970 tq->ub_frame_ro += ubp->ub_bufsize;
9971 tq->ub_seq_cnt++;
9972
9973 if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9974 if (tq->ub_seq_cnt == 1) {
9975 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9976 F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9977 } else {
9978 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9979 F_CTL_END_SEQ;
9980 }
9981 tq->ub_total_seg_cnt = 0;
9982 } else if (tq->ub_seq_cnt == 1) {
9983 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9984 F_CTL_FIRST_SEQ;
9985 ubp->ub_frame.df_ctl = 0x20;
9986 }
9987
9988 QL_PRINT_3(ha, "ub_frame.d_id=%xh\n", ubp->ub_frame.d_id);
9989 QL_PRINT_3(ha, "ub_frame.s_id=%xh\n", ubp->ub_frame.s_id);
9990 QL_PRINT_3(ha, "ub_frame.seq_cnt=%xh\n", ubp->ub_frame.seq_cnt);
9991 QL_PRINT_3(ha, "ub_frame.seq_id=%xh\n", ubp->ub_frame.seq_id);
9992 QL_PRINT_3(ha, "ub_frame.ro=%xh\n", ubp->ub_frame.ro);
9993 QL_PRINT_3(ha, "ub_frame.f_ctl=%xh\n", ubp->ub_frame.f_ctl);
9994 QL_PRINT_3(ha, "ub_bufsize=%xh\n", ubp->ub_bufsize);
9995 QL_DUMP_3(ubp->ub_buffer, 8,
9996 ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9997
9998 sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9999 ql_add_link_b(done_q, &sp->cmd);
10000 rval = QL_SUCCESS;
10001 } else {
10002 if (sp->handle != index) {
10003 EL(ha, "Bad index=%xh, expect=%xh\n", index,
10004 sp->handle);
10005 }
10006 if ((sp->flags & SRB_UB_IN_ISP) == 0) {
10007 EL(ha, "buffer was already in driver, index=%xh\n",
10008 index);
10009 }
10010 if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
10011 EL(ha, "buffer was not an IP buffer, index=%xh\n",
10012 index);
10013 }
10014 if (sp->flags & SRB_UB_ACQUIRED) {
10015 EL(ha, "buffer was being used by driver, index=%xh\n",
10016 index);
10017 }
10018 }
10019 QL_UB_UNLOCK(ha);
10020
10021 QL_PRINT_3(ha, "done\n");
10022
10023 return (rval);
10024 }
10025
10026 /*
10027 * ql_timer
10028 * One second timer function.
10029 *
10030 * Input:
10031 * ql_hba.first = first link in adapter list.
10032 *
10033 * Context:
10034 * Interrupt context, no mailbox commands allowed.
10035 */
10036 static void
10037 ql_timer(void *arg)
10038 {
10039 ql_link_t *link;
10040 uint64_t set_flags;
10041 ql_adapter_state_t *ha;
10042 static uint32_t sec_cnt = 0;
10043
10044 QL_PRINT_6(NULL, "started\n");
10045
10046 /* Acquire global state lock. */
10047 GLOBAL_TIMER_LOCK();
10048 if (ql_timer_timeout_id == NULL) {
10049 /* Release global state lock. */
10050 GLOBAL_TIMER_UNLOCK();
10051 return;
10052 }
10053
10054 sec_cnt++;
10055 for (link = ql_hba.first; link != NULL; link = link->next) {
10056 ha = link->base_address;
10057
10058 /* Skip adapter if suspended or stalled. */
10059 if (ha->flags & ADAPTER_SUSPENDED ||
10060 ha->task_daemon_flags & DRIVER_STALL ||
10061 !(ha->task_daemon_flags & FIRMWARE_UP)) {
10062 continue;
10063 }
10064
10065 QL_PM_LOCK(ha);
10066 if (ha->power_level != PM_LEVEL_D0) {
10067 QL_PM_UNLOCK(ha);
10068 continue;
10069 }
10070 ha->pm_busy++;
10071 QL_PM_UNLOCK(ha);
10072
10073 set_flags = 0;
10074
10075 /* All completion treads busy, wake up a helper thread. */
10076 if (ha->comp_thds_awake == ha->comp_thds_active &&
10077 ha->comp_q.first != NULL) {
10078 QL_PRINT_10(ha, "comp queue helper thrd started\n");
10079 (void) timeout(ql_process_comp_queue, (void *)ha, 1);
10080 }
10081
10082 /* Port retry timer handler. */
10083 if (LOOP_READY(ha)) {
10084 ADAPTER_STATE_LOCK(ha);
10085 if (ha->port_retry_timer != 0) {
10086 ha->port_retry_timer--;
10087 if (ha->port_retry_timer == 0) {
10088 set_flags |= PORT_RETRY_NEEDED;
10089 }
10090 }
10091 ADAPTER_STATE_UNLOCK(ha);
10092 }
10093
10094 /* Loop down timer handler. */
10095 if (LOOP_RECONFIGURE(ha) == 0) {
10096 if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
10097 ha->loop_down_timer--;
10098 /*
10099 * give the firmware loop down dump flag
10100 * a chance to work.
10101 */
10102 if (ha->loop_down_timer == LOOP_DOWN_RESET) {
10103 if (CFG_IST(ha,
10104 CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
10105 ADAPTER_STATE_LOCK(ha);
10106 ha->flags |= FW_DUMP_NEEDED;
10107 ADAPTER_STATE_UNLOCK(ha);
10108 }
10109 EL(ha, "loop_down_reset, "
10110 "isp_abort_needed\n");
10111 set_flags |= ISP_ABORT_NEEDED;
10112 }
10113 }
10114 if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
10115 /* Command abort time handler. */
10116 if (ha->loop_down_timer ==
10117 ha->loop_down_abort_time) {
10118 ADAPTER_STATE_LOCK(ha);
10119 ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
10120 ADAPTER_STATE_UNLOCK(ha);
10121 set_flags |= ABORT_QUEUES_NEEDED;
10122 EL(ha, "loop_down_abort_time, "
10123 "abort_queues_needed\n");
10124 }
10125
10126 /* Watchdog timer handler. */
10127 if (ha->watchdog_timer == 0) {
10128 ha->watchdog_timer = WATCHDOG_TIME;
10129 } else if (LOOP_READY(ha)) {
10130 ha->watchdog_timer--;
10131 if (ha->watchdog_timer == 0) {
10132 set_flags |= WATCHDOG_NEEDED;
10133 }
10134 }
10135 }
10136 }
10137
10138 /* Idle timer handler. */
10139 if (!DRIVER_SUSPENDED(ha)) {
10140 if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
10141 #if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
10142 set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
10143 #endif
10144 ha->idle_timer = 0;
10145 }
10146 if (ha->send_plogi_timer != NULL) {
10147 ha->send_plogi_timer--;
10148 if (ha->send_plogi_timer == NULL) {
10149 set_flags |= SEND_PLOGI;
10150 }
10151 }
10152 }
10153
10154 if (CFG_IST(ha, CFG_CTRL_82XX) && ha->flags & ONLINE &&
10155 !(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
10156 ABORT_ISP_ACTIVE)) &&
10157 !(sec_cnt % 2)) {
10158 set_flags |= IDC_POLL_NEEDED;
10159 }
10160
10161 if (ha->ledstate.BeaconState == BEACON_ON) {
10162 set_flags |= LED_BLINK;
10163 }
10164
10165 if (set_flags != 0) {
10166 ql_awaken_task_daemon(ha, NULL, set_flags, 0);
10167 }
10168
10169 /* Update the IO stats */
10170 if (ha->xioctl->IOInputByteCnt >= 0x100000) {
10171 ha->xioctl->IOInputMByteCnt +=
10172 (ha->xioctl->IOInputByteCnt / 0x100000);
10173 ha->xioctl->IOInputByteCnt %= 0x100000;
10174 }
10175
10176 if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
10177 ha->xioctl->IOOutputMByteCnt +=
10178 (ha->xioctl->IOOutputByteCnt / 0x100000);
10179 ha->xioctl->IOOutputByteCnt %= 0x100000;
10180 }
10181
10182 QL_PM_LOCK(ha);
10183 if (ha->pm_busy) {
10184 ha->pm_busy--;
10185 }
10186 QL_PM_UNLOCK(ha);
10187 }
10188
10189 /* Restart timer, if not being stopped. */
10190 if (ql_timer_timeout_id != NULL) {
10191 ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
10192 }
10193
10194 /* Release global state lock. */
10195 GLOBAL_TIMER_UNLOCK();
10196
10197 QL_PRINT_6(ha, "done\n");
10198 }
10199
10200 /*
10201 * ql_timeout_insert
10202 * Function used to insert a command block onto the
10203 * watchdog timer queue.
10204 *
10205 * Note: Must insure that pkt_time is not zero
10206 * before calling ql_timeout_insert.
10207 *
10208 * Input:
10209 * ha: adapter state pointer.
10210 * tq: target queue pointer.
10211 * sp: SRB pointer.
10212 * DEVICE_QUEUE_LOCK must be already obtained.
10213 *
10214 * Context:
10215 * Kernel context.
10216 */
10217 /* ARGSUSED */
10218 static void
10219 ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
10220 {
10221 QL_PRINT_3(ha, "started\n");
10222
10223 if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
10224 sp->isp_timeout = (uint16_t)(sp->pkt->pkt_timeout);
10225 /*
10226 * The WATCHDOG_TIME must be rounded up + 1. As an example,
10227 * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
10228 * will expire in the next watchdog call, which could be in
10229 * 1 microsecond.
10230 *
10231 */
10232 sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
10233 WATCHDOG_TIME;
10234 /*
10235 * Added an additional 10 to account for the
10236 * firmware timer drift which can occur with
10237 * very long timeout values.
10238 */
10239 sp->wdg_q_time += 10;
10240
10241 /*
10242 * Add 6 more to insure watchdog does not timeout at the same
10243 * time as ISP RISC code timeout.
10244 */
10245 sp->wdg_q_time += 6;
10246
10247 /* Save initial time for resetting watchdog time. */
10248 sp->init_wdg_q_time = sp->wdg_q_time;
10249
10250 /* Insert command onto watchdog queue. */
10251 ql_add_link_b(&tq->wdg, &sp->wdg);
10252
10253 sp->flags |= SRB_WATCHDOG_ENABLED;
10254 } else {
10255 sp->isp_timeout = 0;
10256 sp->wdg_q_time = 0;
10257 sp->init_wdg_q_time = 0;
10258 }
10259
10260 QL_PRINT_3(ha, "done\n");
10261 }
10262
10263 /*
10264 * ql_watchdog
10265 * Timeout handler that runs in interrupt context. The
10266 * ql_adapter_state_t * argument is the parameter set up when the
10267 * timeout was initialized (state structure pointer).
10268 * Function used to update timeout values and if timeout
10269 * has occurred command will be aborted.
10270 *
10271 * Input:
10272 * ha: adapter state pointer.
10273 *
10274 * Context:
10275 * Kernel context.
10276 */
10277 static void
10278 ql_watchdog(ql_adapter_state_t *ha)
10279 {
10280 ql_link_t *link;
10281 ql_tgt_t *tq;
10282 uint16_t index;
10283 ql_adapter_state_t *vha;
10284
10285 QL_PRINT_6(ha, "started\n");
10286
10287 for (vha = ha; vha != NULL; vha = vha->vp_next) {
10288 /* Loop through all targets. */
10289 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10290 for (link = vha->dev[index].first; link != NULL;
10291 link = link->next) {
10292 tq = link->base_address;
10293
10294 /* Try to acquire device queue lock. */
10295 if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
10296 break;
10297 }
10298
10299 if (!(CFG_IST(ha,
10300 CFG_ENABLE_LINK_DOWN_REPORTING)) &&
10301 (tq->port_down_retry_count == 0)) {
10302 /* Release device queue lock. */
10303 DEVICE_QUEUE_UNLOCK(tq);
10304 continue;
10305 }
10306 ql_wdg_tq_list(vha, tq);
10307 }
10308 }
10309 }
10310 ha->watchdog_timer = WATCHDOG_TIME;
10311
10312 QL_PRINT_6(ha, "done\n");
10313 }
10314
10315 /*
10316 * ql_wdg_tq_list
10317 * Timeout handler that runs in interrupt context. The
10318 * ql_adapter_state_t * argument is the parameter set up when the
10319 * timeout was initialized (state structure pointer).
10320 * Function used to update timeout values and if timeout
10321 * has occurred command will be aborted.
10322 *
10323 * Input:
10324 * ha: adapter state pointer.
10325 * tq: target queue pointer.
10326 * DEVICE_QUEUE_LOCK must be already obtained.
10327 *
10328 * Output:
10329 * Releases DEVICE_QUEUE_LOCK upon exit.
10330 *
10331 * Context:
10332 * Kernel context.
10333 */
10334 static void
10335 ql_wdg_tq_list(ql_adapter_state_t *ha, ql_tgt_t *tq)
10336 {
10337 ql_srb_t *sp;
10338 ql_link_t *link, *next_cmd;
10339 ql_lun_t *lq;
10340 boolean_t q_sane, timeout = B_FALSE;
10341
10342 QL_PRINT_6(ha, "started\n");
10343
10344 /* Find out if this device is in a sane state */
10345 if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
10346 TQF_QUEUE_SUSPENDED)) {
10347 q_sane = B_FALSE;
10348 } else {
10349 q_sane = B_TRUE;
10350 }
10351 /* Loop through commands on watchdog queue. */
10352 for (link = tq->wdg.first; link != NULL; link = next_cmd) {
10353 next_cmd = link->next;
10354 sp = link->base_address;
10355 lq = sp->lun_queue;
10356
10357 /*
10358 * For SCSI commands, if everything
10359 * seems to * be going fine and this
10360 * packet is stuck
10361 * because of throttling at LUN or
10362 * target level then do not decrement
10363 * the sp->wdg_q_time
10364 */
10365 if (ha->task_daemon_flags & STATE_ONLINE &&
10366 !(sp->flags & SRB_ISP_STARTED) &&
10367 q_sane == B_TRUE &&
10368 sp->flags & SRB_FCP_CMD_PKT &&
10369 lq->lun_outcnt >= ha->execution_throttle) {
10370 continue;
10371 }
10372
10373 if (sp->wdg_q_time != 0) {
10374 sp->wdg_q_time--;
10375
10376 /* Timeout? */
10377 if (sp->wdg_q_time != 0) {
10378 continue;
10379 }
10380
10381 sp->flags |= SRB_COMMAND_TIMEOUT;
10382 timeout = B_TRUE;
10383 }
10384 }
10385
10386 /*
10387 * Loop through commands on watchdog queue and
10388 * abort timed out commands.
10389 */
10390 if (timeout == B_TRUE) {
10391 for (link = tq->wdg.first; link != NULL; link = next_cmd) {
10392 sp = link->base_address;
10393 next_cmd = link->next;
10394
10395 if (sp->flags & SRB_COMMAND_TIMEOUT) {
10396 ql_remove_link(&tq->wdg, &sp->wdg);
10397 sp->flags &= ~(SRB_WATCHDOG_ENABLED |
10398 SRB_COMMAND_TIMEOUT);
10399 ql_cmd_timeout(ha, tq, sp);
10400 next_cmd = tq->wdg.first;
10401 }
10402 }
10403 }
10404
10405 /* Release device queue lock. */
10406 DEVICE_QUEUE_UNLOCK(tq);
10407
10408 QL_PRINT_6(ha, "done\n");
10409 }
10410
10411 /*
10412 * ql_cmd_timeout
10413 * Command timeout handler.
10414 *
10415 * Input:
10416 * ha: adapter state pointer.
10417 * tq: target queue pointer.
10418 * sp: SRB pointer.
10419 *
10420 * Context:
10421 * Kernel context.
10422 */
10423 static void
10424 ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
10425 {
10426 int rval = 0;
10427
10428 QL_PRINT_3(ha, "started\n");
10429
10430 REQUEST_RING_LOCK(ha);
10431 if (!(sp->flags & SRB_ISP_STARTED)) {
10432 EL(ha, "command timed out in driver, sp=%ph spf=%xh\n",
10433 (void *)sp, sp->flags);
10434
10435 /* if it's on a queue */
10436 if (sp->cmd.head) {
10437 /*
10438 * The pending_cmds que needs to be
10439 * protected by the ring lock
10440 */
10441 ql_remove_link(sp->cmd.head, &sp->cmd);
10442 }
10443 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10444
10445 /* Release device queue lock. */
10446 REQUEST_RING_UNLOCK(ha);
10447 DEVICE_QUEUE_UNLOCK(tq);
10448
10449 /* Set timeout status */
10450 sp->pkt->pkt_reason = CS_TIMEOUT;
10451
10452 /* Ensure no retry */
10453 sp->flags &= ~SRB_RETRY;
10454
10455 /* Call done routine to handle completion. */
10456 ql_done(&sp->cmd, B_FALSE);
10457 } else if (CFG_IST(ha, CFG_CTRL_82XX)) {
10458 REQUEST_RING_UNLOCK(ha);
10459 DEVICE_QUEUE_UNLOCK(tq);
10460
10461 EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10462 "spf=%xh\n", (void *)sp,
10463 (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10464 sp->handle & OSC_INDEX_MASK, sp->flags);
10465
10466 if (ha->pha->timeout_cnt++ > TIMEOUT_THRESHOLD ||
10467 (rval = ql_abort_io(ha, sp)) != QL_SUCCESS) {
10468 sp->flags |= SRB_COMMAND_TIMEOUT;
10469 TASK_DAEMON_LOCK(ha);
10470 ha->task_daemon_flags |= ISP_ABORT_NEEDED;
10471 TASK_DAEMON_UNLOCK(ha);
10472 EL(ha, "abort status=%xh, tc=%xh, isp_abort_"
10473 "needed\n", rval, ha->pha->timeout_cnt);
10474 }
10475 } else {
10476 REQUEST_RING_UNLOCK(ha);
10477 DEVICE_QUEUE_UNLOCK(tq);
10478
10479 EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10480 "spf=%xh, isp_abort_needed\n", (void *)sp,
10481 (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10482 sp->handle & OSC_INDEX_MASK, sp->flags);
10483
10484 INTR_LOCK(ha);
10485 ha->pha->xioctl->ControllerErrorCount++;
10486 INTR_UNLOCK(ha);
10487
10488 /* Set ISP needs to be reset */
10489 sp->flags |= SRB_COMMAND_TIMEOUT;
10490
10491 if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
10492 ADAPTER_STATE_LOCK(ha);
10493 ha->flags |= FW_DUMP_NEEDED;
10494 ADAPTER_STATE_UNLOCK(ha);
10495 }
10496
10497 TASK_DAEMON_LOCK(ha);
10498 ha->task_daemon_flags |= ISP_ABORT_NEEDED;
10499 TASK_DAEMON_UNLOCK(ha);
10500 }
10501 DEVICE_QUEUE_LOCK(tq);
10502
10503 QL_PRINT_3(ha, "done\n");
10504 }
10505
10506 /*
10507 * ql_cmd_wait
10508 * Stall driver until all outstanding commands are returned.
10509 *
10510 * Input:
10511 * ha = adapter state pointer.
10512 *
10513 * Context:
10514 * Kernel context.
10515 */
10516 void
10517 ql_cmd_wait(ql_adapter_state_t *ha)
10518 {
10519 uint16_t index;
10520 ql_link_t *link;
10521 ql_tgt_t *tq;
10522 ql_adapter_state_t *vha;
10523
10524 QL_PRINT_3(ha, "started\n");
10525
10526 /* Wait for all outstanding commands to be returned. */
10527 (void) ql_wait_outstanding(ha);
10528
10529 /*
10530 * clear out internally queued commands
10531 */
10532 for (vha = ha; vha != NULL; vha = vha->vp_next) {
10533 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10534 for (link = vha->dev[index].first; link != NULL;
10535 link = link->next) {
10536 tq = link->base_address;
10537 if (tq &&
10538 (!(tq->prli_svc_param_word_3 &
10539 PRLI_W3_RETRY) ||
10540 ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
10541 (void) ql_abort_device(vha, tq, 0);
10542 }
10543 }
10544 }
10545 }
10546
10547 QL_PRINT_3(ha, "done\n");
10548 }
10549
10550 /*
10551 * ql_wait_outstanding
10552 * Wait for all outstanding commands to complete.
10553 *
10554 * Input:
10555 * ha = adapter state pointer.
10556 *
10557 * Returns:
10558 * index - the index for ql_srb into outstanding_cmds.
10559 *
10560 * Context:
10561 * Kernel context.
10562 */
10563 static uint16_t
10564 ql_wait_outstanding(ql_adapter_state_t *ha)
10565 {
10566 ql_srb_t *sp;
10567 uint16_t index, count;
10568
10569 QL_PRINT_3(ha, "started\n");
10570
10571 count = ql_osc_wait_count;
10572 for (index = 1; index < ha->pha->osc_max_cnt; index++) {
10573 if (ha->pha->pending_cmds.first != NULL) {
10574 ql_start_iocb(ha, NULL);
10575 index = 1;
10576 }
10577 if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
10578 sp != QL_ABORTED_SRB(ha) &&
10579 (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
10580 if (count-- != 0) {
10581 ql_delay(ha, 10000);
10582 index = 0;
10583 } else {
10584 EL(ha, "still in OSC,sp=%ph,oci=%d,sph=%xh,"
10585 "spf=%xh\n", (void *) sp, index, sp->handle,
10586 sp->flags);
10587 break;
10588 }
10589 }
10590 }
10591
10592 QL_PRINT_3(ha, "done\n");
10593
10594 return (index);
10595 }
10596
10597 /*
10598 * ql_restart_queues
10599 * Restart device queues.
10600 *
10601 * Input:
10602 * ha = adapter state pointer.
10603 * DEVICE_QUEUE_LOCK must be released.
10604 *
10605 * Context:
10606 * Interrupt or Kernel context, no mailbox commands allowed.
10607 */
10608 void
10609 ql_restart_queues(ql_adapter_state_t *ha)
10610 {
10611 ql_link_t *link, *link2;
10612 ql_tgt_t *tq;
10613 ql_lun_t *lq;
10614 uint16_t index;
10615 ql_adapter_state_t *vha;
10616
10617 QL_PRINT_3(ha, "started\n");
10618
10619 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10620 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10621 for (link = vha->dev[index].first; link != NULL;
10622 link = link->next) {
10623 tq = link->base_address;
10624
10625 /* Acquire device queue lock. */
10626 DEVICE_QUEUE_LOCK(tq);
10627
10628 tq->flags &= ~TQF_QUEUE_SUSPENDED;
10629
10630 for (link2 = tq->lun_queues.first;
10631 link2 != NULL; link2 = link2->next) {
10632 lq = link2->base_address;
10633
10634 if (lq->cmd.first != NULL) {
10635 ql_next(vha, lq);
10636 DEVICE_QUEUE_LOCK(tq);
10637 }
10638 }
10639
10640 /* Release device queue lock. */
10641 DEVICE_QUEUE_UNLOCK(tq);
10642 }
10643 }
10644 }
10645
10646 QL_PRINT_3(ha, "done\n");
10647 }
10648
10649 /*
10650 * ql_iidma
10651 * Setup iiDMA parameters to firmware
10652 *
10653 * Input:
10654 * ha = adapter state pointer.
10655 * DEVICE_QUEUE_LOCK must be released.
10656 *
10657 * Context:
10658 * Interrupt or Kernel context, no mailbox commands allowed.
10659 */
10660 static void
10661 ql_iidma(ql_adapter_state_t *ha)
10662 {
10663 ql_link_t *link;
10664 ql_tgt_t *tq;
10665 uint16_t index;
10666 char buf[256];
10667 uint32_t data;
10668
10669 QL_PRINT_3(ha, "started\n");
10670
10671 if (!CFG_IST(ha, CFG_IIDMA_SUPPORT)) {
10672 QL_PRINT_3(ha, "done\n");
10673 return;
10674 }
10675
10676 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10677 for (link = ha->dev[index].first; link != NULL;
10678 link = link->next) {
10679 tq = link->base_address;
10680
10681 if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
10682 continue;
10683 }
10684
10685 /* Acquire device queue lock. */
10686 DEVICE_QUEUE_LOCK(tq);
10687
10688 tq->flags &= ~TQF_IIDMA_NEEDED;
10689
10690 /* Release device queue lock. */
10691 DEVICE_QUEUE_UNLOCK(tq);
10692
10693 if ((tq->loop_id > LAST_N_PORT_HDL) ||
10694 (tq->d_id.b24 == FS_MANAGEMENT_SERVER) ||
10695 (tq->flags & TQF_INITIATOR_DEVICE) ||
10696 (tq->iidma_rate == IIDMA_RATE_NDEF)) {
10697 continue;
10698 }
10699
10700 /* Get the iiDMA persistent data */
10701 (void) snprintf(buf, sizeof (buf),
10702 "iidma-rate-%02x%02x%02x%02x%02x"
10703 "%02x%02x%02x", tq->port_name[0],
10704 tq->port_name[1], tq->port_name[2],
10705 tq->port_name[3], tq->port_name[4],
10706 tq->port_name[5], tq->port_name[6],
10707 tq->port_name[7]);
10708
10709 if ((data = ql_get_prop(ha, buf)) ==
10710 0xffffffff) {
10711 tq->iidma_rate = IIDMA_RATE_NDEF;
10712 } else {
10713 switch (data) {
10714 case IIDMA_RATE_4GB:
10715 case IIDMA_RATE_8GB:
10716 case IIDMA_RATE_10GB:
10717 case IIDMA_RATE_16GB:
10718 case IIDMA_RATE_32GB:
10719 tq->iidma_rate = data;
10720 break;
10721 default:
10722 EL(ha, "invalid data for "
10723 "parameter: %s: %xh\n",
10724 buf, data);
10725 tq->iidma_rate =
10726 IIDMA_RATE_NDEF;
10727 break;
10728 }
10729 }
10730
10731 EL(ha, "d_id = %xh iidma_rate = %xh\n",
10732 tq->d_id.b24, tq->iidma_rate);
10733
10734 /* Set the firmware's iiDMA rate */
10735 if (!CFG_IST(ha, CFG_FCOE_SUPPORT)) {
10736 if (tq->iidma_rate <= IIDMA_RATE_MAX) {
10737 data = ql_iidma_rate(ha, tq->loop_id,
10738 &tq->iidma_rate,
10739 EXT_IIDMA_MODE_SET);
10740 if (data != QL_SUCCESS) {
10741 EL(ha, "mbx failed: %xh\n",
10742 data);
10743 }
10744 }
10745 }
10746 }
10747 }
10748
10749 QL_PRINT_3(ha, "done\n");
10750 }
10751
10752 /*
10753 * ql_abort_queues
10754 * Abort all commands on device queues.
10755 *
10756 * Input:
10757 * ha = adapter state pointer.
10758 *
10759 * Context:
10760 * Interrupt or Kernel context, no mailbox commands allowed.
10761 */
10762 void
10763 ql_abort_queues(ql_adapter_state_t *ha)
10764 {
10765 ql_link_t *link;
10766 ql_tgt_t *tq;
10767 ql_srb_t *sp;
10768 uint16_t index;
10769 ql_adapter_state_t *vha;
10770
10771 QL_PRINT_10(ha, "started\n");
10772
10773 /* Return all commands in outstanding command list. */
10774 INTR_LOCK(ha);
10775
10776 /* Place all commands in outstanding cmd list on device queue. */
10777 for (index = 1; index < ha->osc_max_cnt; index++) {
10778 if (ha->pending_cmds.first != NULL) {
10779 INTR_UNLOCK(ha);
10780 ql_start_iocb(ha, NULL);
10781 /* Delay for system */
10782 ql_delay(ha, 10000);
10783 INTR_LOCK(ha);
10784 index = 1;
10785 }
10786 sp = ha->outstanding_cmds[index];
10787
10788 if (sp && (sp == QL_ABORTED_SRB(ha) || sp->ha != ha)) {
10789 continue;
10790 }
10791
10792 /* skip devices capable of FCP2 retrys */
10793 if (sp != NULL &&
10794 (sp->lun_queue == NULL ||
10795 (tq = sp->lun_queue->target_queue) == NULL ||
10796 !(tq->prli_svc_param_word_3 & PRLI_W3_RETRY) ||
10797 ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
10798 ha->outstanding_cmds[index] = NULL;
10799 sp->handle = 0;
10800 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10801
10802 INTR_UNLOCK(ha);
10803
10804 /* Set ending status. */
10805 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10806 sp->flags |= SRB_ISP_COMPLETED;
10807
10808 /* Call done routine to handle completions. */
10809 sp->cmd.next = NULL;
10810 ql_done(&sp->cmd, B_FALSE);
10811
10812 INTR_LOCK(ha);
10813 }
10814 }
10815 INTR_UNLOCK(ha);
10816
10817 for (vha = ha; vha != NULL; vha = vha->vp_next) {
10818 QL_PRINT_10(vha, "abort instance\n");
10819 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10820 for (link = vha->dev[index].first; link != NULL;
10821 link = link->next) {
10822 tq = link->base_address;
10823 /* skip devices capable of FCP2 retrys */
10824 if (!(tq->prli_svc_param_word_3 &
10825 PRLI_W3_RETRY) ||
10826 ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
10827 /*
10828 * Set port unavailable status and
10829 * return all commands on a devices
10830 * queues.
10831 */
10832 ql_abort_device_queues(ha, tq);
10833 }
10834 }
10835 }
10836 }
10837 QL_PRINT_3(ha, "done\n");
10838 }
10839
10840 /*
10841 * ql_abort_device_queues
10842 * Abort all commands on device queues.
10843 *
10844 * Input:
10845 * ha = adapter state pointer.
10846 *
10847 * Context:
10848 * Interrupt or Kernel context, no mailbox commands allowed.
10849 */
10850 static void
10851 ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq)
10852 {
10853 ql_link_t *lun_link, *cmd_link;
10854 ql_srb_t *sp;
10855 ql_lun_t *lq;
10856
10857 QL_PRINT_10(ha, "started\n");
10858
10859 DEVICE_QUEUE_LOCK(tq);
10860 ql_requeue_pending_cmds(ha, tq);
10861
10862 for (lun_link = tq->lun_queues.first; lun_link != NULL;
10863 lun_link = lun_link->next) {
10864 lq = lun_link->base_address;
10865
10866 cmd_link = lq->cmd.first;
10867 while (cmd_link != NULL) {
10868 sp = cmd_link->base_address;
10869
10870 /* Remove srb from device cmd queue. */
10871 ql_remove_link(&lq->cmd, &sp->cmd);
10872
10873 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10874
10875 DEVICE_QUEUE_UNLOCK(tq);
10876
10877 /* Set ending status. */
10878 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10879
10880 /* Call done routine to handle completion. */
10881 ql_done(&sp->cmd, B_FALSE);
10882
10883 /* Delay for system */
10884 ql_delay(ha, 10000);
10885
10886 DEVICE_QUEUE_LOCK(tq);
10887 cmd_link = lq->cmd.first;
10888 }
10889 }
10890 DEVICE_QUEUE_UNLOCK(tq);
10891
10892 QL_PRINT_10(ha, "done\n");
10893 }
10894
10895 /*
10896 * ql_loop_resync
10897 * Resync with fibre channel devices.
10898 *
10899 * Input:
10900 * ha = adapter state pointer.
10901 * DEVICE_QUEUE_LOCK must be released.
10902 *
10903 * Context:
10904 * Kernel context.
10905 */
10906 static void
10907 ql_loop_resync(ql_adapter_state_t *ha)
10908 {
10909 int rval;
10910
10911 QL_PRINT_3(ha, "started\n");
10912
10913 if (ha->flags & IP_INITIALIZED) {
10914 (void) ql_shutdown_ip(ha);
10915 }
10916
10917 rval = ql_fw_ready(ha, 10);
10918
10919 TASK_DAEMON_LOCK(ha);
10920 ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10921 TASK_DAEMON_UNLOCK(ha);
10922
10923 /* Set loop online, if it really is. */
10924 if (rval == QL_SUCCESS) {
10925 ql_loop_online(ha);
10926 QL_PRINT_3(ha, "done\n");
10927 } else {
10928 EL(ha, "failed, rval = %xh\n", rval);
10929 }
10930 }
10931
10932 /*
10933 * ql_loop_online
10934 * Set loop online status if it really is online.
10935 *
10936 * Input:
10937 * ha = adapter state pointer.
10938 * DEVICE_QUEUE_LOCK must be released.
10939 *
10940 * Context:
10941 * Kernel context.
10942 */
10943 void
10944 ql_loop_online(ql_adapter_state_t *ha)
10945 {
10946 ql_adapter_state_t *vha;
10947
10948 QL_PRINT_3(ha, "started\n");
10949
10950 /* Inform the FC Transport that the hardware is online. */
10951 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10952 if (!(vha->task_daemon_flags &
10953 (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10954 /* Restart IP if it was shutdown. */
10955 if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10956 !(vha->flags & IP_INITIALIZED)) {
10957 (void) ql_initialize_ip(vha);
10958 ql_isp_rcvbuf(vha);
10959 }
10960
10961 if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10962 FC_PORT_STATE_MASK(vha->state) !=
10963 FC_STATE_ONLINE) {
10964 vha->state = FC_PORT_SPEED_MASK(vha->state);
10965 if (vha->topology & QL_LOOP_CONNECTION) {
10966 vha->state |= FC_STATE_LOOP;
10967 } else {
10968 vha->state |= FC_STATE_ONLINE;
10969 }
10970 TASK_DAEMON_LOCK(ha);
10971 vha->task_daemon_flags |= FC_STATE_CHANGE;
10972 TASK_DAEMON_UNLOCK(ha);
10973 }
10974 }
10975 }
10976
10977 ql_awaken_task_daemon(ha, NULL, 0, 0);
10978
10979 /* Restart device queues that may have been stopped. */
10980 ql_restart_queues(ha);
10981
10982 QL_PRINT_3(ha, "done\n");
10983 }
10984
10985 /*
10986 * ql_fca_handle_to_state
10987 * Verifies handle to be correct.
10988 *
10989 * Input:
10990 * fca_handle = pointer to state structure.
10991 *
10992 * Returns:
10993 * NULL = failure
10994 *
10995 * Context:
10996 * Kernel context.
10997 */
10998 static ql_adapter_state_t *
10999 ql_fca_handle_to_state(opaque_t fca_handle)
11000 {
11001 #ifdef QL_DEBUG_ROUTINES
11002 ql_link_t *link;
11003 ql_adapter_state_t *ha = NULL;
11004 ql_adapter_state_t *vha = NULL;
11005
11006 for (link = ql_hba.first; link != NULL; link = link->next) {
11007 ha = link->base_address;
11008 for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
11009 if ((opaque_t)vha == fca_handle) {
11010 ha = vha;
11011 break;
11012 }
11013 }
11014 if ((opaque_t)ha == fca_handle) {
11015 break;
11016 } else {
11017 ha = NULL;
11018 }
11019 }
11020
11021 if (ha == NULL) {
11022 /*EMPTY*/
11023 QL_PRINT_2(ha, "failed\n");
11024 }
11025
11026 #endif /* QL_DEBUG_ROUTINES */
11027
11028 return ((ql_adapter_state_t *)fca_handle);
11029 }
11030
11031 /*
11032 * ql_d_id_to_queue
11033 * Locate device queue that matches destination ID.
11034 *
11035 * Input:
11036 * ha = adapter state pointer.
11037 * d_id = destination ID
11038 *
11039 * Returns:
11040 * NULL = failure
11041 *
11042 * Context:
11043 * Interrupt or Kernel context, no mailbox commands allowed.
11044 */
11045 ql_tgt_t *
11046 ql_d_id_to_queue(ql_adapter_state_t *ha, port_id_t d_id)
11047 {
11048 uint16_t index;
11049 ql_tgt_t *tq;
11050 ql_link_t *link;
11051
11052 /* Get head queue index. */
11053 index = ql_alpa_to_index[d_id.b.al_pa];
11054
11055 for (link = ha->dev[index].first; link != NULL; link = link->next) {
11056 tq = link->base_address;
11057 if (tq->d_id.b24 == d_id.b24 &&
11058 VALID_DEVICE_ID(ha, tq->loop_id)) {
11059 return (tq);
11060 }
11061 }
11062
11063 return (NULL);
11064 }
11065
11066 /*
11067 * ql_loop_id_to_queue
11068 * Locate device queue that matches loop ID.
11069 *
11070 * Input:
11071 * ha: adapter state pointer.
11072 * loop_id: destination ID
11073 *
11074 * Returns:
11075 * NULL = failure
11076 *
11077 * Context:
11078 * Interrupt or Kernel context, no mailbox commands allowed.
11079 */
11080 ql_tgt_t *
11081 ql_loop_id_to_queue(ql_adapter_state_t *ha, uint16_t loop_id)
11082 {
11083 uint16_t index;
11084 ql_tgt_t *tq;
11085 ql_link_t *link;
11086
11087 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
11088 for (link = ha->dev[index].first; link != NULL;
11089 link = link->next) {
11090 tq = link->base_address;
11091 if (tq->loop_id == loop_id) {
11092 return (tq);
11093 }
11094 }
11095 }
11096
11097 return (NULL);
11098 }
11099
11100 /*
11101 * ql_kstat_update
11102 * Updates kernel statistics.
11103 *
11104 * Input:
11105 * ksp - driver kernel statistics structure pointer.
11106 * rw - function to perform
11107 *
11108 * Returns:
11109 * 0 or EACCES
11110 *
11111 * Context:
11112 * Kernel context.
11113 */
11114 /* ARGSUSED */
11115 static int
11116 ql_kstat_update(kstat_t *ksp, int rw)
11117 {
11118 int rval;
11119
11120 QL_PRINT_3(ksp->ks_private, "started\n");
11121
11122 if (rw == KSTAT_WRITE) {
11123 rval = EACCES;
11124 } else {
11125 rval = 0;
11126 }
11127
11128 if (rval != 0) {
11129 /*EMPTY*/
11130 QL_PRINT_2(ksp->ks_private, "failed, rval = %xh\n", rval);
11131 } else {
11132 /*EMPTY*/
11133 QL_PRINT_3(ksp->ks_private, "done\n");
11134 }
11135 return (rval);
11136 }
11137
11138 /*
11139 * ql_load_flash
11140 * Loads flash.
11141 *
11142 * Input:
11143 * ha: adapter state pointer.
11144 * dp: data pointer.
11145 * size: data length.
11146 *
11147 * Returns:
11148 * ql local function return status code.
11149 *
11150 * Context:
11151 * Kernel context.
11152 */
11153 int
11154 ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
11155 {
11156 uint32_t cnt;
11157 int rval;
11158 uint32_t size_to_offset;
11159 uint32_t size_to_compare;
11160 int erase_all;
11161
11162 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
11163 return (ql_24xx_load_flash(ha, dp, size, 0));
11164 }
11165
11166 QL_PRINT_3(ha, "started\n");
11167
11168 size_to_compare = 0x20000;
11169 size_to_offset = 0;
11170 erase_all = 0;
11171 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11172 if (size == 0x80000) {
11173 /* Request to flash the entire chip. */
11174 size_to_compare = 0x80000;
11175 erase_all = 1;
11176 } else {
11177 size_to_compare = 0x40000;
11178 if (ql_flash_sbus_fpga) {
11179 size_to_offset = 0x40000;
11180 }
11181 }
11182 }
11183 if (size > size_to_compare) {
11184 rval = QL_FUNCTION_PARAMETER_ERROR;
11185 EL(ha, "failed=%xh\n", rval);
11186 return (rval);
11187 }
11188
11189 /* Enable Flash Read/Write. */
11190 ql_flash_enable(ha);
11191
11192 /* Erase flash prior to write. */
11193 rval = ql_erase_flash(ha, erase_all);
11194
11195 if (rval == QL_SUCCESS) {
11196 /* Write data to flash. */
11197 for (cnt = 0; cnt < size; cnt++) {
11198 /* Allow other system activity. */
11199 if (cnt % 0x1000 == 0) {
11200 ql_delay(ha, 10000);
11201 }
11202 rval = ql_program_flash_address(ha,
11203 cnt + size_to_offset, *dp++);
11204 if (rval != QL_SUCCESS) {
11205 break;
11206 }
11207 }
11208 }
11209
11210 ql_flash_disable(ha);
11211
11212 if (rval != QL_SUCCESS) {
11213 EL(ha, "failed=%xh\n", rval);
11214 } else {
11215 /*EMPTY*/
11216 QL_PRINT_3(ha, "done\n");
11217 }
11218 return (rval);
11219 }
11220
11221 /*
11222 * ql_program_flash_address
11223 * Program flash address.
11224 *
11225 * Input:
11226 * ha = adapter state pointer.
11227 * addr = flash byte address.
11228 * data = data to be written to flash.
11229 *
11230 * Returns:
11231 * ql local function return status code.
11232 *
11233 * Context:
11234 * Kernel context.
11235 */
11236 static int
11237 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
11238 {
11239 int rval;
11240
11241 QL_PRINT_3(ha, "started\n");
11242
11243 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11244 ql_write_flash_byte(ha, 0x5555, 0xa0);
11245 ql_write_flash_byte(ha, addr, data);
11246 } else {
11247 /* Write Program Command Sequence */
11248 ql_write_flash_byte(ha, 0x5555, 0xaa);
11249 ql_write_flash_byte(ha, 0x2aaa, 0x55);
11250 ql_write_flash_byte(ha, 0x5555, 0xa0);
11251 ql_write_flash_byte(ha, addr, data);
11252 }
11253
11254 /* Wait for write to complete. */
11255 rval = ql_poll_flash(ha, addr, data);
11256
11257 if (rval != QL_SUCCESS) {
11258 EL(ha, "failed=%xh\n", rval);
11259 } else {
11260 /*EMPTY*/
11261 QL_PRINT_3(ha, "done\n");
11262 }
11263 return (rval);
11264 }
11265
11266 /*
11267 * ql_erase_flash
11268 * Erases entire flash.
11269 *
11270 * Input:
11271 * ha = adapter state pointer.
11272 *
11273 * Returns:
11274 * ql local function return status code.
11275 *
11276 * Context:
11277 * Kernel context.
11278 */
11279 int
11280 ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
11281 {
11282 int rval;
11283 uint32_t erase_delay = 2000000;
11284 uint32_t sStartAddr;
11285 uint32_t ssize;
11286 uint32_t cnt;
11287 uint8_t *bfp;
11288 uint8_t *tmp;
11289
11290 QL_PRINT_3(ha, "started\n");
11291
11292 if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
11293 if (ql_flash_sbus_fpga == 1) {
11294 ssize = QL_SBUS_FCODE_SIZE;
11295 sStartAddr = QL_FCODE_OFFSET;
11296 } else {
11297 ssize = QL_FPGA_SIZE;
11298 sStartAddr = QL_FPGA_OFFSET;
11299 }
11300
11301 erase_delay = 20000000;
11302
11303 bfp = (uint8_t *)kmem_zalloc(ssize, KM_SLEEP);
11304
11305 /* Save the section of flash we're not updating to buffer */
11306 tmp = bfp;
11307 for (cnt = sStartAddr; cnt < ssize + sStartAddr; cnt++) {
11308 /* Allow other system activity. */
11309 if (cnt % 0x1000 == 0) {
11310 ql_delay(ha, 10000);
11311 }
11312 *tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
11313 }
11314
11315 /* Chip Erase Command Sequence */
11316 ql_write_flash_byte(ha, 0x5555, 0xaa);
11317 ql_write_flash_byte(ha, 0x2aaa, 0x55);
11318 ql_write_flash_byte(ha, 0x5555, 0x80);
11319 ql_write_flash_byte(ha, 0x5555, 0xaa);
11320 ql_write_flash_byte(ha, 0x2aaa, 0x55);
11321 ql_write_flash_byte(ha, 0x5555, 0x10);
11322
11323 ql_delay(ha, erase_delay);
11324
11325 /* Wait for erase to complete. */
11326 rval = ql_poll_flash(ha, 0, 0x80);
11327
11328 if (rval == QL_SUCCESS) {
11329 /* Restore the section we saved off */
11330 tmp = bfp;
11331 for (cnt = sStartAddr; cnt < ssize + sStartAddr;
11332 cnt++) {
11333 /* Allow other system activity. */
11334 if (cnt % 0x1000 == 0) {
11335 ql_delay(ha, 10000);
11336 }
11337 rval = ql_program_flash_address(ha, cnt,
11338 *tmp++);
11339 if (rval != QL_SUCCESS) {
11340 break;
11341 }
11342 }
11343 }
11344 kmem_free(bfp, ssize);
11345 } else {
11346 /* Chip Erase Command Sequence */
11347 ql_write_flash_byte(ha, 0x5555, 0xaa);
11348 ql_write_flash_byte(ha, 0x2aaa, 0x55);
11349 ql_write_flash_byte(ha, 0x5555, 0x80);
11350 ql_write_flash_byte(ha, 0x5555, 0xaa);
11351 ql_write_flash_byte(ha, 0x2aaa, 0x55);
11352 ql_write_flash_byte(ha, 0x5555, 0x10);
11353
11354 ql_delay(ha, erase_delay);
11355
11356 /* Wait for erase to complete. */
11357 rval = ql_poll_flash(ha, 0, 0x80);
11358 }
11359
11360 if (rval != QL_SUCCESS) {
11361 EL(ha, "failed=%xh\n", rval);
11362 } else {
11363 /*EMPTY*/
11364 QL_PRINT_3(ha, "done\n");
11365 }
11366 return (rval);
11367 }
11368
11369 /*
11370 * ql_poll_flash
11371 * Polls flash for completion.
11372 *
11373 * Input:
11374 * ha = adapter state pointer.
11375 * addr = flash byte address.
11376 * data = data to be polled.
11377 *
11378 * Returns:
11379 * ql local function return status code.
11380 *
11381 * Context:
11382 * Kernel context.
11383 */
11384 int
11385 ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
11386 {
11387 uint8_t flash_data;
11388 uint32_t cnt;
11389 int rval = QL_FUNCTION_FAILED;
11390
11391 QL_PRINT_3(ha, "started\n");
11392
11393 poll_data = (uint8_t)(poll_data & BIT_7);
11394
11395 /* Wait for 30 seconds for command to finish. */
11396 for (cnt = 30000000; cnt; cnt--) {
11397 flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
11398
11399 if ((flash_data & BIT_7) == poll_data) {
11400 rval = QL_SUCCESS;
11401 break;
11402 }
11403 if (flash_data & BIT_5 && cnt > 2) {
11404 cnt = 2;
11405 }
11406 drv_usecwait(1);
11407 }
11408
11409 if (rval != QL_SUCCESS) {
11410 EL(ha, "failed=%xh\n", rval);
11411 } else {
11412 /*EMPTY*/
11413 QL_PRINT_3(ha, "done\n");
11414 }
11415 return (rval);
11416 }
11417
11418 /*
11419 * ql_flash_enable
11420 * Setup flash for reading/writing.
11421 *
11422 * Input:
11423 * ha = adapter state pointer.
11424 *
11425 * Context:
11426 * Kernel context.
11427 */
11428 void
11429 ql_flash_enable(ql_adapter_state_t *ha)
11430 {
11431 uint16_t data;
11432
11433 QL_PRINT_3(ha, "started\n");
11434
11435 /* Enable Flash Read/Write. */
11436 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11437 data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11438 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11439 data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
11440 ddi_put16(ha->sbus_fpga_dev_handle,
11441 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11442 /* Read reset command sequence */
11443 ql_write_flash_byte(ha, 0xaaa, 0xaa);
11444 ql_write_flash_byte(ha, 0x555, 0x55);
11445 ql_write_flash_byte(ha, 0xaaa, 0x20);
11446 ql_write_flash_byte(ha, 0x555, 0xf0);
11447 } else {
11448 data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
11449 ISP_FLASH_ENABLE);
11450 WRT16_IO_REG(ha, ctrl_status, data);
11451
11452 /* Read/Reset Command Sequence */
11453 ql_write_flash_byte(ha, 0x5555, 0xaa);
11454 ql_write_flash_byte(ha, 0x2aaa, 0x55);
11455 ql_write_flash_byte(ha, 0x5555, 0xf0);
11456 }
11457 (void) ql_read_flash_byte(ha, 0);
11458
11459 QL_PRINT_3(ha, "done\n");
11460 }
11461
11462 /*
11463 * ql_flash_disable
11464 * Disable flash and allow RISC to run.
11465 *
11466 * Input:
11467 * ha = adapter state pointer.
11468 *
11469 * Context:
11470 * Kernel context.
11471 */
11472 void
11473 ql_flash_disable(ql_adapter_state_t *ha)
11474 {
11475 uint16_t data;
11476
11477 QL_PRINT_3(ha, "started\n");
11478
11479 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11480 /*
11481 * Lock the flash back up.
11482 */
11483 ql_write_flash_byte(ha, 0x555, 0x90);
11484 ql_write_flash_byte(ha, 0x555, 0x0);
11485
11486 data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11487 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11488 data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
11489 ddi_put16(ha->sbus_fpga_dev_handle,
11490 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11491 } else {
11492 data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
11493 ~ISP_FLASH_ENABLE);
11494 WRT16_IO_REG(ha, ctrl_status, data);
11495 }
11496
11497 QL_PRINT_3(ha, "done\n");
11498 }
11499
11500 /*
11501 * ql_write_flash_byte
11502 * Write byte to flash.
11503 *
11504 * Input:
11505 * ha = adapter state pointer.
11506 * addr = flash byte address.
11507 * data = data to be written.
11508 *
11509 * Context:
11510 * Kernel context.
11511 */
11512 void
11513 ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
11514 {
11515 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11516 ddi_put16(ha->sbus_fpga_dev_handle,
11517 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11518 LSW(addr));
11519 ddi_put16(ha->sbus_fpga_dev_handle,
11520 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11521 MSW(addr));
11522 ddi_put16(ha->sbus_fpga_dev_handle,
11523 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
11524 (uint16_t)data);
11525 } else {
11526 uint16_t bank_select;
11527
11528 /* Setup bit 16 of flash address. */
11529 bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
11530
11531 if (ha->device_id == 0x2322 || ha->device_id == 0x6322) {
11532 bank_select = (uint16_t)(bank_select & ~0xf0);
11533 bank_select = (uint16_t)(bank_select |
11534 ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11535 WRT16_IO_REG(ha, ctrl_status, bank_select);
11536 } else {
11537 if (addr & BIT_16 && !(bank_select &
11538 ISP_FLASH_64K_BANK)) {
11539 bank_select = (uint16_t)(bank_select |
11540 ISP_FLASH_64K_BANK);
11541 WRT16_IO_REG(ha, ctrl_status, bank_select);
11542 } else if (!(addr & BIT_16) && bank_select &
11543 ISP_FLASH_64K_BANK) {
11544 bank_select = (uint16_t)(bank_select &
11545 ~ISP_FLASH_64K_BANK);
11546 WRT16_IO_REG(ha, ctrl_status, bank_select);
11547 }
11548 }
11549
11550 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11551 WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
11552 WRT16_IO_REG(ha, flash_data, (uint16_t)data);
11553 } else {
11554 WRT16_IOMAP_REG(ha, flash_address, addr);
11555 WRT16_IOMAP_REG(ha, flash_data, data);
11556 }
11557 }
11558 }
11559
11560 /*
11561 * ql_read_flash_byte
11562 * Reads byte from flash, but must read a word from chip.
11563 *
11564 * Input:
11565 * ha = adapter state pointer.
11566 * addr = flash byte address.
11567 *
11568 * Returns:
11569 * byte from flash.
11570 *
11571 * Context:
11572 * Kernel context.
11573 */
11574 uint8_t
11575 ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
11576 {
11577 uint8_t data;
11578
11579 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11580 ddi_put16(ha->sbus_fpga_dev_handle,
11581 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11582 LSW(addr));
11583 ddi_put16(ha->sbus_fpga_dev_handle,
11584 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11585 MSW(addr));
11586 data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
11587 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
11588 } else {
11589 uint16_t bank_select;
11590
11591 /* Setup bit 16 of flash address. */
11592 bank_select = RD16_IO_REG(ha, ctrl_status);
11593 if (ha->device_id == 0x2322 || ha->device_id == 0x6322) {
11594 bank_select = (uint16_t)(bank_select & ~0xf0);
11595 bank_select = (uint16_t)(bank_select |
11596 ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11597 WRT16_IO_REG(ha, ctrl_status, bank_select);
11598 } else {
11599 if (addr & BIT_16 &&
11600 !(bank_select & ISP_FLASH_64K_BANK)) {
11601 bank_select = (uint16_t)(bank_select |
11602 ISP_FLASH_64K_BANK);
11603 WRT16_IO_REG(ha, ctrl_status, bank_select);
11604 } else if (!(addr & BIT_16) &&
11605 bank_select & ISP_FLASH_64K_BANK) {
11606 bank_select = (uint16_t)(bank_select &
11607 ~ISP_FLASH_64K_BANK);
11608 WRT16_IO_REG(ha, ctrl_status, bank_select);
11609 }
11610 }
11611
11612 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11613 WRT16_IO_REG(ha, flash_address, addr);
11614 data = (uint8_t)RD16_IO_REG(ha, flash_data);
11615 } else {
11616 WRT16_IOMAP_REG(ha, flash_address, addr);
11617 data = (uint8_t)RD16_IOMAP_REG(ha, flash_data);
11618 }
11619 }
11620
11621 return (data);
11622 }
11623
11624 /*
11625 * ql_24xx_flash_id
11626 * Get flash IDs.
11627 *
11628 * Input:
11629 * ha: adapter state pointer.
11630 *
11631 * Returns:
11632 * ql local function return status code.
11633 *
11634 * Context:
11635 * Kernel context.
11636 */
11637 int
11638 ql_24xx_flash_id(ql_adapter_state_t *vha)
11639 {
11640 int rval;
11641 uint32_t fdata = 0;
11642 ql_adapter_state_t *ha = vha->pha;
11643 ql_xioctl_t *xp = ha->xioctl;
11644
11645 QL_PRINT_3(ha, "started\n");
11646
11647 rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
11648 if (CFG_IST(ha, CFG_CTRL_24XX)) {
11649 if (rval != QL_SUCCESS || fdata == 0) {
11650 fdata = 0;
11651 rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x39F,
11652 &fdata);
11653 }
11654 } else {
11655 fdata = 0;
11656 rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
11657 (CFG_IST(ha, CFG_CTRL_25XX) ? 0x49F : 0x39F), &fdata);
11658 }
11659
11660 if (rval != QL_SUCCESS) {
11661 EL(ha, "24xx read_flash failed=%xh\n", rval);
11662 } else if (fdata != 0) {
11663 xp->fdesc.flash_manuf = LSB(LSW(fdata));
11664 xp->fdesc.flash_id = MSB(LSW(fdata));
11665 xp->fdesc.flash_len = LSB(MSW(fdata));
11666 } else {
11667 xp->fdesc.flash_manuf = ATMEL_FLASH;
11668 xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
11669 xp->fdesc.flash_len = 0;
11670 }
11671
11672 QL_PRINT_3(ha, "done\n");
11673
11674 return (rval);
11675 }
11676
11677 /*
11678 * ql_24xx_load_flash
11679 * Loads flash.
11680 *
11681 * Input:
11682 * ha = adapter state pointer.
11683 * dp = data pointer.
11684 * size = data length in bytes.
11685 * faddr = 32bit word flash byte address.
11686 *
11687 * Returns:
11688 * ql local function return status code.
11689 *
11690 * Context:
11691 * Kernel context.
11692 */
11693 int
11694 ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
11695 uint32_t faddr)
11696 {
11697 int rval;
11698 uint32_t cnt, rest_addr, fdata, wc;
11699 dma_mem_t dmabuf = {0};
11700 ql_adapter_state_t *ha = vha->pha;
11701 ql_xioctl_t *xp = ha->xioctl;
11702
11703 QL_PRINT_3(ha, "started, faddr=%xh, size=%xh\n",
11704 ha->instance, faddr, size);
11705
11706 /* start address must be 32 bit word aligned */
11707 if ((faddr & 0x3) != 0) {
11708 EL(ha, "incorrect buffer size alignment\n");
11709 return (QL_FUNCTION_PARAMETER_ERROR);
11710 }
11711
11712 /* Allocate DMA buffer */
11713 if (CFG_IST(ha, CFG_FLASH_DMA_SUPPORT)) {
11714 if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
11715 LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN)) !=
11716 QL_SUCCESS) {
11717 EL(ha, "dma alloc failed, rval=%xh\n", rval);
11718 return (rval);
11719 }
11720 }
11721
11722 /* Enable flash write */
11723 if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
11724 EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
11725 ql_free_phys(ha, &dmabuf);
11726 return (rval);
11727 }
11728
11729 /* setup mask of address range within a sector */
11730 rest_addr = (xp->fdesc.block_size - 1) >> 2;
11731
11732 faddr = faddr >> 2; /* flash gets 32 bit words */
11733
11734 /*
11735 * Write data to flash.
11736 */
11737 cnt = 0;
11738 size = (size + 3) >> 2; /* Round up & convert to dwords */
11739
11740 while (cnt < size) {
11741 /* Beginning of a sector? */
11742 if ((faddr & rest_addr) == 0) {
11743 if (CFG_IST(ha, CFG_CTRL_82XX)) {
11744 fdata = ha->flash_data_addr | faddr;
11745 rval = ql_8021_rom_erase(ha, fdata);
11746 if (rval != QL_SUCCESS) {
11747 EL(ha, "8021 erase sector status="
11748 "%xh, start=%xh, end=%xh"
11749 "\n", rval, fdata,
11750 fdata + rest_addr);
11751 break;
11752 }
11753 } else if (CFG_IST(ha, CFG_FLASH_ACC_SUPPORT)) {
11754 fdata = ha->flash_data_addr | faddr;
11755 rval = ql_flash_access(ha,
11756 FAC_ERASE_SECTOR, fdata, fdata +
11757 rest_addr, 0);
11758 if (rval != QL_SUCCESS) {
11759 EL(ha, "erase sector status="
11760 "%xh, start=%xh, end=%xh"
11761 "\n", rval, fdata,
11762 fdata + rest_addr);
11763 break;
11764 }
11765 } else {
11766 fdata = (faddr & ~rest_addr) << 2;
11767 fdata = (fdata & 0xff00) |
11768 (fdata << 16 & 0xff0000) |
11769 (fdata >> 16 & 0xff);
11770
11771 if (rest_addr == 0x1fff) {
11772 /* 32kb sector block erase */
11773 rval = ql_24xx_write_flash(ha,
11774 FLASH_CONF_ADDR | 0x0352,
11775 fdata);
11776 } else {
11777 /* 64kb sector block erase */
11778 rval = ql_24xx_write_flash(ha,
11779 FLASH_CONF_ADDR | 0x03d8,
11780 fdata);
11781 }
11782 if (rval != QL_SUCCESS) {
11783 EL(ha, "Unable to flash sector"
11784 ": address=%xh\n", faddr);
11785 break;
11786 }
11787 }
11788 }
11789
11790 /* Write data */
11791 if (CFG_IST(ha, CFG_FLASH_DMA_SUPPORT) &&
11792 ((faddr & 0x3f) == 0)) {
11793 /*
11794 * Limit write up to sector boundary.
11795 */
11796 wc = ((~faddr & (rest_addr>>1)) + 1);
11797
11798 if (size - cnt < wc) {
11799 wc = size - cnt;
11800 }
11801
11802 ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
11803 (uint8_t *)dmabuf.bp, wc<<2,
11804 DDI_DEV_AUTOINCR);
11805
11806 rval = ql_wrt_risc_ram(ha, ha->flash_data_addr |
11807 faddr, dmabuf.cookie.dmac_laddress, wc);
11808 if (rval != QL_SUCCESS) {
11809 EL(ha, "unable to dma to flash "
11810 "address=%xh\n", faddr << 2);
11811 break;
11812 }
11813
11814 cnt += wc;
11815 faddr += wc;
11816 dp += wc << 2;
11817 } else {
11818 fdata = *dp++;
11819 fdata |= *dp++ << 8;
11820 fdata |= *dp++ << 16;
11821 fdata |= *dp++ << 24;
11822 rval = ql_24xx_write_flash(ha,
11823 ha->flash_data_addr | faddr, fdata);
11824 if (rval != QL_SUCCESS) {
11825 EL(ha, "Unable to program flash "
11826 "address=%xh data=%xh\n", faddr,
11827 *dp);
11828 break;
11829 }
11830 cnt++;
11831 faddr++;
11832
11833 /* Allow other system activity. */
11834 if (cnt % 0x1000 == 0) {
11835 ql_delay(ha, 10000);
11836 }
11837 }
11838 }
11839
11840 ql_24xx_protect_flash(ha);
11841
11842 if (CFG_IST(ha, CFG_FLASH_DMA_SUPPORT)) {
11843 ql_free_phys(ha, &dmabuf);
11844 }
11845
11846 if (rval != QL_SUCCESS) {
11847 EL(ha, "failed=%xh\n", rval);
11848 } else {
11849 /*EMPTY*/
11850 QL_PRINT_3(ha, "done\n");
11851 }
11852 return (rval);
11853 }
11854
11855 /*
11856 * ql_24xx_read_flash
11857 * Reads a 32bit word from ISP24xx NVRAM/FLASH.
11858 *
11859 * Input:
11860 * ha: adapter state pointer.
11861 * faddr: NVRAM/FLASH address.
11862 * bp: data pointer.
11863 *
11864 * Returns:
11865 * ql local function return status code.
11866 *
11867 * Context:
11868 * Kernel context.
11869 */
11870 int
11871 ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
11872 {
11873 uint32_t timer;
11874 int rval = QL_SUCCESS;
11875 ql_adapter_state_t *ha = vha->pha;
11876
11877 if (CFG_IST(ha, CFG_CTRL_82XX)) {
11878 if ((rval = ql_8021_rom_read(ha, faddr, bp)) != QL_SUCCESS) {
11879 EL(ha, "8021 access error\n");
11880 }
11881 return (rval);
11882 }
11883
11884 /* Clear access error flag */
11885 WRT32_IO_REG(ha, ctrl_status,
11886 RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11887
11888 WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
11889
11890 /* Wait for READ cycle to complete. */
11891 for (timer = 300000; timer; timer--) {
11892 if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
11893 break;
11894 }
11895 drv_usecwait(10);
11896 }
11897
11898 if (timer == 0) {
11899 EL(ha, "failed, timeout\n");
11900 rval = QL_FUNCTION_TIMEOUT;
11901 } else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11902 EL(ha, "failed, access error\n");
11903 rval = QL_FUNCTION_FAILED;
11904 }
11905
11906 *bp = RD32_IO_REG(ha, flash_data);
11907
11908 return (rval);
11909 }
11910
11911 /*
11912 * ql_24xx_write_flash
11913 * Writes a 32bit word to ISP24xx NVRAM/FLASH.
11914 *
11915 * Input:
11916 * ha: adapter state pointer.
11917 * addr: NVRAM/FLASH address.
11918 * value: data.
11919 *
11920 * Returns:
11921 * ql local function return status code.
11922 *
11923 * Context:
11924 * Kernel context.
11925 */
11926 int
11927 ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11928 {
11929 uint32_t timer, fdata;
11930 int rval = QL_SUCCESS;
11931 ql_adapter_state_t *ha = vha->pha;
11932
11933 if (CFG_IST(ha, CFG_CTRL_82XX)) {
11934 if ((rval = ql_8021_rom_write(ha, addr, data)) != QL_SUCCESS) {
11935 EL(ha, "8021 access error\n");
11936 }
11937 return (rval);
11938 }
11939 /* Clear access error flag */
11940 WRT32_IO_REG(ha, ctrl_status,
11941 RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11942
11943 WRT32_IO_REG(ha, flash_data, data);
11944 RD32_IO_REG(ha, flash_data); /* PCI Posting. */
11945 WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11946
11947 /* Wait for Write cycle to complete. */
11948 for (timer = 3000000; timer; timer--) {
11949 if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11950 /* Check flash write in progress. */
11951 if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11952 (void) ql_24xx_read_flash(ha,
11953 FLASH_CONF_ADDR | 0x105, &fdata);
11954 if (!(fdata & BIT_0)) {
11955 break;
11956 }
11957 } else {
11958 break;
11959 }
11960 }
11961 drv_usecwait(10);
11962 }
11963 if (timer == 0) {
11964 EL(ha, "failed, timeout\n");
11965 rval = QL_FUNCTION_TIMEOUT;
11966 } else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11967 EL(ha, "access error\n");
11968 rval = QL_FUNCTION_FAILED;
11969 }
11970
11971 return (rval);
11972 }
11973 /*
11974 * ql_24xx_unprotect_flash
11975 * Enable writes
11976 *
11977 * Input:
11978 * ha: adapter state pointer.
11979 *
11980 * Returns:
11981 * ql local function return status code.
11982 *
11983 * Context:
11984 * Kernel context.
11985 */
11986 int
11987 ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11988 {
11989 int rval;
11990 uint32_t fdata, timer;
11991 ql_adapter_state_t *ha = vha->pha;
11992 ql_xioctl_t *xp = ha->xioctl;
11993
11994 QL_PRINT_3(ha, "started\n");
11995
11996 if (CFG_IST(ha, CFG_CTRL_82XX)) {
11997 (void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11998 rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11999 if (rval != QL_SUCCESS) {
12000 EL(ha, "8021 access error\n");
12001 }
12002 return (rval);
12003 }
12004 if (CFG_IST(ha, CFG_FLASH_ACC_SUPPORT)) {
12005 if (ha->task_daemon_flags & FIRMWARE_UP) {
12006 for (timer = 3000; timer; timer--) {
12007 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
12008 EL(ha, "ISP_ABORT_NEEDED done\n");
12009 return (QL_ABORTED);
12010 }
12011 rval = ql_flash_access(ha, FAC_SEMA_LOCK,
12012 0, 0, NULL);
12013 if (rval == QL_SUCCESS ||
12014 rval == QL_FUNCTION_TIMEOUT) {
12015 EL(ha, "lock status=%xh\n", rval);
12016 break;
12017 }
12018 delay(1);
12019 }
12020
12021 if (rval == QL_SUCCESS &&
12022 (rval = ql_flash_access(ha, FAC_WRT_ENABLE, 0,
12023 0, NULL)) != QL_SUCCESS) {
12024 EL(ha, "WRT_ENABLE status=%xh\n", rval);
12025 (void) ql_flash_access(ha, FAC_SEMA_UNLOCK,
12026 0, 0, NULL);
12027 }
12028 } else {
12029 rval = QL_SUCCESS;
12030 }
12031 QL_PRINT_3(ha, "CFG_FLASH_ACC_SUPPORT done\n");
12032 return (rval);
12033 } else {
12034 /* Enable flash write. */
12035 WRT32_IO_REG(ha, ctrl_status,
12036 RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
12037 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
12038 }
12039
12040 /* Sector/Block Protection Register Lock (SST, ST, ATMEL). */
12041 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
12042 xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
12043
12044 /*
12045 * Remove block write protection (SST and ST)
12046 * Global unprotect sectors (ATMEL).
12047 */
12048 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
12049 xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
12050
12051 if (xp->fdesc.unprotect_sector_cmd != 0) {
12052 for (fdata = 0; fdata < 0x10; fdata++) {
12053 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
12054 0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
12055 }
12056
12057 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
12058 xp->fdesc.unprotect_sector_cmd, 0x00400f);
12059 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
12060 xp->fdesc.unprotect_sector_cmd, 0x00600f);
12061 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
12062 xp->fdesc.unprotect_sector_cmd, 0x00800f);
12063 }
12064
12065 QL_PRINT_3(ha, "done\n");
12066
12067 return (QL_SUCCESS);
12068 }
12069
12070 /*
12071 * ql_24xx_protect_flash
12072 * Disable writes
12073 *
12074 * Input:
12075 * ha: adapter state pointer.
12076 *
12077 * Context:
12078 * Kernel context.
12079 */
12080 void
12081 ql_24xx_protect_flash(ql_adapter_state_t *vha)
12082 {
12083 int rval;
12084 uint32_t fdata, timer;
12085 ql_adapter_state_t *ha = vha->pha;
12086 ql_xioctl_t *xp = ha->xioctl;
12087
12088 QL_PRINT_3(ha, "started\n");
12089
12090 if (CFG_IST(ha, CFG_CTRL_82XX)) {
12091 (void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
12092 rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_disable_bits);
12093 if (rval != QL_SUCCESS) {
12094 EL(ha, "8021 access error\n");
12095 }
12096 return;
12097 }
12098 if (CFG_IST(ha, CFG_FLASH_ACC_SUPPORT)) {
12099 if (ha->task_daemon_flags & FIRMWARE_UP) {
12100 for (timer = 3000; timer; timer--) {
12101 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
12102 EL(ha, "ISP_ABORT_NEEDED done\n");
12103 return;
12104 }
12105 rval = ql_flash_access(ha, FAC_SEMA_LOCK,
12106 0, 0, NULL);
12107 if (rval == QL_SUCCESS ||
12108 rval == QL_FUNCTION_TIMEOUT) {
12109 if (rval != QL_SUCCESS) {
12110 EL(ha, "lock status=%xh\n",
12111 rval);
12112 }
12113 break;
12114 }
12115 delay(1);
12116 }
12117
12118 if (rval == QL_SUCCESS &&
12119 (rval = ql_flash_access(ha, FAC_WRT_PROTECT, 0,
12120 0, NULL)) != QL_SUCCESS) {
12121 EL(ha, "protect status=%xh\n", rval);
12122 (void) ql_flash_access(ha, FAC_SEMA_UNLOCK, 0,
12123 0, NULL);
12124 }
12125 QL_PRINT_3(ha, "CFG_FLASH_ACC_SUPPORT done\n");
12126 return;
12127 }
12128 } else {
12129 /* Enable flash write. */
12130 WRT32_IO_REG(ha, ctrl_status,
12131 RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
12132 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
12133 }
12134
12135 /*
12136 * Protect sectors.
12137 * Set block write protection (SST and ST) and
12138 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
12139 */
12140 if (xp->fdesc.protect_sector_cmd != 0) {
12141 for (fdata = 0; fdata < 0x10; fdata++) {
12142 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
12143 0x300 | xp->fdesc.protect_sector_cmd, fdata);
12144 }
12145 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
12146 xp->fdesc.protect_sector_cmd, 0x00400f);
12147 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
12148 xp->fdesc.protect_sector_cmd, 0x00600f);
12149 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
12150 xp->fdesc.protect_sector_cmd, 0x00800f);
12151 }
12152
12153 /* Remove Sector Protection Registers Locked (SPRL) bit. */
12154 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
12155 xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
12156
12157 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
12158 xp->fdesc.write_statusreg_cmd, xp->fdesc.write_disable_bits);
12159
12160 /* Disable flash write. */
12161 if (!CFG_IST(ha, CFG_FLASH_ACC_SUPPORT)) {
12162 WRT32_IO_REG(ha, ctrl_status,
12163 RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
12164 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
12165 }
12166
12167 QL_PRINT_3(ha, "done\n");
12168 }
12169
12170 /*
12171 * ql_dump_firmware
12172 * Save RISC code state information.
12173 *
12174 * Input:
12175 * ha = adapter state pointer.
12176 *
12177 * Returns:
12178 * QL local function return status code.
12179 *
12180 * Context:
12181 * Kernel context.
12182 */
12183 int
12184 ql_dump_firmware(ql_adapter_state_t *vha)
12185 {
12186 int rval;
12187 clock_t timer = drv_usectohz(30000000);
12188 ql_adapter_state_t *ha = vha->pha;
12189
12190 QL_PRINT_3(ha, "started\n");
12191
12192 QL_DUMP_LOCK(ha);
12193
12194 if (ha->ql_dump_state & QL_DUMPING ||
12195 (ha->ql_dump_state & QL_DUMP_VALID &&
12196 !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
12197 QL_PRINT_3(ha, "done\n");
12198 QL_DUMP_UNLOCK(ha);
12199 return (QL_SUCCESS);
12200 }
12201
12202 QL_DUMP_UNLOCK(ha);
12203
12204 (void) ql_stall_driver(ha, 0);
12205
12206 /* Dump firmware. */
12207 if (CFG_IST(ha, CFG_CTRL_82XX)) {
12208 rval = ql_binary_fw_dump(ha, FALSE);
12209 } else {
12210 rval = ql_binary_fw_dump(ha, TRUE);
12211 }
12212
12213 /* Do abort to force restart. */
12214 ql_restart_driver(ha);
12215 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
12216 EL(ha, "restarting, isp_abort_needed\n");
12217
12218 /* Acquire task daemon lock. */
12219 TASK_DAEMON_LOCK(ha);
12220
12221 /* Wait for suspension to end. */
12222 while (DRIVER_SUSPENDED(ha)) {
12223 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
12224
12225 /* 30 seconds from now */
12226 if (cv_reltimedwait(&ha->cv_dr_suspended,
12227 &ha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
12228 /*
12229 * The timeout time 'timer' was
12230 * reached without the condition
12231 * being signaled.
12232 */
12233 break;
12234 }
12235 }
12236
12237 /* Release task daemon lock. */
12238 TASK_DAEMON_UNLOCK(ha);
12239
12240 if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
12241 /*EMPTY*/
12242 QL_PRINT_3(ha, "done\n");
12243 } else {
12244 EL(ha, "failed, rval = %xh\n", rval);
12245 }
12246 return (rval);
12247 }
12248
12249 /*
12250 * ql_binary_fw_dump
12251 * Dumps binary data from firmware.
12252 *
12253 * Input:
12254 * ha = adapter state pointer.
12255 * lock_needed = mailbox lock needed.
12256 *
12257 * Returns:
12258 * ql local function return status code.
12259 *
12260 * Context:
12261 * Interrupt or Kernel context, no mailbox commands allowed.
12262 */
12263 int
12264 ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
12265 {
12266 uint32_t cnt, index;
12267 clock_t timer;
12268 int rval = QL_SUCCESS;
12269 ql_adapter_state_t *ha = vha->pha;
12270
12271 QL_PRINT_3(ha, "started\n");
12272
12273 ADAPTER_STATE_LOCK(ha);
12274 ha->flags &= ~FW_DUMP_NEEDED;
12275 ADAPTER_STATE_UNLOCK(ha);
12276
12277 if (CFG_IST(ha, CFG_CTRL_82XX) && ha->md_capture_size == 0) {
12278 EL(ha, "8021 not supported\n");
12279 return (QL_NOT_SUPPORTED);
12280 }
12281
12282 QL_DUMP_LOCK(ha);
12283
12284 if (ha->ql_dump_state & QL_DUMPING ||
12285 (ha->ql_dump_state & QL_DUMP_VALID &&
12286 !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
12287 EL(ha, "dump already done, qds=%x\n", ha->ql_dump_state);
12288 QL_DUMP_UNLOCK(ha);
12289 return (QL_DATA_EXISTS);
12290 }
12291
12292 ha->ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
12293 ha->ql_dump_state |= QL_DUMPING;
12294
12295 QL_DUMP_UNLOCK(ha);
12296
12297 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE)) {
12298 /* Insert Time Stamp */
12299 rval = ql_fw_etrace(ha, &ha->fwexttracebuf,
12300 FTO_INSERT_TIME_STAMP, NULL);
12301 if (rval != QL_SUCCESS) {
12302 EL(ha, "f/w extended trace insert"
12303 "time stamp failed: %xh\n", rval);
12304 }
12305 }
12306
12307 if (lock_needed == TRUE) {
12308 /* Acquire mailbox register lock. */
12309 MBX_REGISTER_LOCK(ha);
12310 timer = ((MAILBOX_TOV + 6) * drv_usectohz(1000000));
12311
12312 /* Check for mailbox available, if not wait for signal. */
12313 while (ha->mailbox_flags & MBX_BUSY_FLG) {
12314 ha->mailbox_flags = (uint8_t)
12315 (ha->mailbox_flags | MBX_WANT_FLG);
12316
12317 /* 30 seconds from now */
12318 if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
12319 timer, TR_CLOCK_TICK) == -1) {
12320 /*
12321 * The timeout time 'timer' was
12322 * reached without the condition
12323 * being signaled.
12324 */
12325
12326 /* Release mailbox register lock. */
12327 MBX_REGISTER_UNLOCK(ha);
12328
12329 EL(ha, "failed, rval = %xh\n",
12330 QL_FUNCTION_TIMEOUT);
12331 return (QL_FUNCTION_TIMEOUT);
12332 }
12333 }
12334
12335 /* Set busy flag. */
12336 ha->mailbox_flags = (uint8_t)
12337 (ha->mailbox_flags | MBX_BUSY_FLG);
12338
12339 /* Release mailbox register lock. */
12340 MBX_REGISTER_UNLOCK(ha);
12341 }
12342
12343 /* Free previous dump buffer. */
12344 if (ha->ql_dump_ptr != NULL) {
12345 kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
12346 ha->ql_dump_ptr = NULL;
12347 }
12348
12349 if (CFG_IST(ha, CFG_CTRL_24XX)) {
12350 ha->ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
12351 ha->fw_ext_memory_size);
12352 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
12353 cnt = ha->rsp_queues_cnt > 1 ? ha->req_q[0]->req_ring.size +
12354 ha->req_q[1]->req_ring.size : ha->req_q[0]->req_ring.size;
12355 index = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
12356
12357 ha->ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
12358 cnt + index + ha->fw_ext_memory_size +
12359 (ha->rsp_queues_cnt * 16));
12360
12361 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
12362 cnt = ha->rsp_queues_cnt > 1 ? ha->req_q[0]->req_ring.size +
12363 ha->req_q[1]->req_ring.size : ha->req_q[0]->req_ring.size;
12364 index = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
12365
12366 ha->ql_dump_size = (uint32_t)(sizeof (ql_81xx_fw_dump_t) +
12367 cnt + index + ha->fw_ext_memory_size +
12368 (ha->rsp_queues_cnt * 16));
12369
12370 } else if (CFG_IST(ha, CFG_CTRL_83XX)) {
12371 cnt = ha->rsp_queues_cnt > 1 ? ha->req_q[0]->req_ring.size +
12372 ha->req_q[1]->req_ring.size : ha->req_q[0]->req_ring.size;
12373 index = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
12374
12375 ha->ql_dump_size = (uint32_t)(sizeof (ql_83xx_fw_dump_t) +
12376 cnt + index + ha->fw_ext_memory_size +
12377 (ha->rsp_queues_cnt * 16));
12378 } else if (CFG_IST(ha, CFG_CTRL_82XX)) {
12379 ha->ql_dump_size = ha->md_capture_size;
12380 } else {
12381 ha->ql_dump_size = sizeof (ql_fw_dump_t);
12382 }
12383
12384 if (CFG_IST(ha, CFG_CTRL_27XX)) {
12385 rval = ql_27xx_binary_fw_dump(ha);
12386 } else {
12387 if ((ha->ql_dump_ptr =
12388 kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) == NULL) {
12389 rval = QL_MEMORY_ALLOC_FAILED;
12390 } else {
12391 if (CFG_IST(ha, CFG_CTRL_2363)) {
12392 rval = ql_2300_binary_fw_dump(ha,
12393 ha->ql_dump_ptr);
12394 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
12395 rval = ql_81xx_binary_fw_dump(ha,
12396 ha->ql_dump_ptr);
12397 } else if (CFG_IST(ha, CFG_CTRL_83XX)) {
12398 rval = ql_83xx_binary_fw_dump(ha,
12399 ha->ql_dump_ptr);
12400 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
12401 rval = ql_25xx_binary_fw_dump(ha,
12402 ha->ql_dump_ptr);
12403 } else if (CFG_IST(ha, CFG_CTRL_24XX)) {
12404 rval = ql_24xx_binary_fw_dump(ha,
12405 ha->ql_dump_ptr);
12406 } else if (CFG_IST(ha, CFG_CTRL_82XX)) {
12407 (void) ql_8021_reset_fw(ha);
12408 rval = QL_SUCCESS;
12409 } else {
12410 rval = ql_2200_binary_fw_dump(ha,
12411 ha->ql_dump_ptr);
12412 }
12413 }
12414 }
12415
12416 /* Reset ISP chip. */
12417 ql_reset_chip(ha);
12418
12419 QL_DUMP_LOCK(ha);
12420
12421 if (rval != QL_SUCCESS) {
12422 if (ha->ql_dump_ptr != NULL) {
12423 kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
12424 ha->ql_dump_ptr = NULL;
12425 }
12426 ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
12427 QL_DUMP_UPLOADED);
12428 EL(ha, "failed, rval = %xh\n", rval);
12429 } else {
12430 ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
12431 ha->ql_dump_state |= QL_DUMP_VALID;
12432 EL(ha, "done\n");
12433 }
12434
12435 QL_DUMP_UNLOCK(ha);
12436
12437 return (rval);
12438 }
12439
12440 /*
12441 * ql_ascii_fw_dump
12442 * Converts firmware binary dump to ascii.
12443 *
12444 * Input:
12445 * ha = adapter state pointer.
12446 * bptr = buffer pointer.
12447 *
12448 * Returns:
12449 * Amount of data buffer used.
12450 *
12451 * Context:
12452 * Kernel context.
12453 */
12454 size_t
12455 ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
12456 {
12457 uint32_t cnt;
12458 caddr_t bp;
12459 int mbox_cnt;
12460 ql_adapter_state_t *ha = vha->pha;
12461 ql_fw_dump_t *fw = ha->ql_dump_ptr;
12462
12463 if (CFG_IST(ha, CFG_CTRL_24XX)) {
12464 return (ql_24xx_ascii_fw_dump(ha, bufp));
12465 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
12466 return (ql_25xx_ascii_fw_dump(ha, bufp));
12467 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
12468 return (ql_81xx_ascii_fw_dump(ha, bufp));
12469 } else if (CFG_IST(ha, CFG_CTRL_82XX)) {
12470 return (ql_8021_ascii_fw_dump(ha, bufp));
12471 } else if (CFG_IST(ha, CFG_CTRL_83XX)) {
12472 return (ql_83xx_ascii_fw_dump(ha, bufp));
12473 } else if (CFG_IST(ha, CFG_CTRL_27XX)) {
12474 return (ql_27xx_ascii_fw_dump(ha, bufp));
12475 }
12476
12477 QL_PRINT_3(ha, "started\n");
12478
12479 if (CFG_IST(ha, CFG_CTRL_23XX)) {
12480 (void) sprintf(bufp, "\nISP 2300IP ");
12481 } else if (CFG_IST(ha, CFG_CTRL_63XX)) {
12482 (void) sprintf(bufp, "\nISP 2322/6322FLX ");
12483 } else {
12484 (void) sprintf(bufp, "\nISP 2200IP ");
12485 }
12486
12487 bp = bufp + strlen(bufp);
12488 (void) sprintf(bp, "Firmware Version %d.%d.%d\n",
12489 ha->fw_major_version, ha->fw_minor_version,
12490 ha->fw_subminor_version);
12491
12492 (void) strcat(bufp, "\nPBIU Registers:");
12493 bp = bufp + strlen(bufp);
12494 for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
12495 if (cnt % 8 == 0) {
12496 *bp++ = '\n';
12497 }
12498 (void) sprintf(bp, "%04x ", fw->pbiu_reg[cnt]);
12499 bp = bp + 6;
12500 }
12501
12502 if (CFG_IST(ha, CFG_CTRL_2363)) {
12503 (void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
12504 "registers:");
12505 bp = bufp + strlen(bufp);
12506 for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
12507 if (cnt % 8 == 0) {
12508 *bp++ = '\n';
12509 }
12510 (void) sprintf(bp, "%04x ", fw->risc_host_reg[cnt]);
12511 bp = bp + 6;
12512 }
12513 }
12514
12515 (void) strcat(bp, "\n\nMailbox Registers:");
12516 bp = bufp + strlen(bufp);
12517 mbox_cnt = CFG_IST(ha, CFG_CTRL_2363) ? 16 : 8;
12518 for (cnt = 0; cnt < mbox_cnt; cnt++) {
12519 if (cnt % 8 == 0) {
12520 *bp++ = '\n';
12521 }
12522 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12523 bp = bp + 6;
12524 }
12525
12526 if (CFG_IST(ha, CFG_CTRL_2363)) {
12527 (void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
12528 bp = bufp + strlen(bufp);
12529 for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
12530 if (cnt % 8 == 0) {
12531 *bp++ = '\n';
12532 }
12533 (void) sprintf(bp, "%04x ", fw->resp_dma_reg[cnt]);
12534 bp = bp + 6;
12535 }
12536 }
12537
12538 (void) strcat(bp, "\n\nDMA Registers:");
12539 bp = bufp + strlen(bufp);
12540 for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
12541 if (cnt % 8 == 0) {
12542 *bp++ = '\n';
12543 }
12544 (void) sprintf(bp, "%04x ", fw->dma_reg[cnt]);
12545 bp = bp + 6;
12546 }
12547
12548 (void) strcat(bp, "\n\nRISC Hardware Registers:");
12549 bp = bufp + strlen(bufp);
12550 for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
12551 if (cnt % 8 == 0) {
12552 *bp++ = '\n';
12553 }
12554 (void) sprintf(bp, "%04x ", fw->risc_hdw_reg[cnt]);
12555 bp = bp + 6;
12556 }
12557
12558 (void) strcat(bp, "\n\nRISC GP0 Registers:");
12559 bp = bufp + strlen(bufp);
12560 for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
12561 if (cnt % 8 == 0) {
12562 *bp++ = '\n';
12563 }
12564 (void) sprintf(bp, "%04x ", fw->risc_gp0_reg[cnt]);
12565 bp = bp + 6;
12566 }
12567
12568 (void) strcat(bp, "\n\nRISC GP1 Registers:");
12569 bp = bufp + strlen(bufp);
12570 for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
12571 if (cnt % 8 == 0) {
12572 *bp++ = '\n';
12573 }
12574 (void) sprintf(bp, "%04x ", fw->risc_gp1_reg[cnt]);
12575 bp = bp + 6;
12576 }
12577
12578 (void) strcat(bp, "\n\nRISC GP2 Registers:");
12579 bp = bufp + strlen(bufp);
12580 for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
12581 if (cnt % 8 == 0) {
12582 *bp++ = '\n';
12583 }
12584 (void) sprintf(bp, "%04x ", fw->risc_gp2_reg[cnt]);
12585 bp = bp + 6;
12586 }
12587
12588 (void) strcat(bp, "\n\nRISC GP3 Registers:");
12589 bp = bufp + strlen(bufp);
12590 for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
12591 if (cnt % 8 == 0) {
12592 *bp++ = '\n';
12593 }
12594 (void) sprintf(bp, "%04x ", fw->risc_gp3_reg[cnt]);
12595 bp = bp + 6;
12596 }
12597
12598 (void) strcat(bp, "\n\nRISC GP4 Registers:");
12599 bp = bufp + strlen(bufp);
12600 for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
12601 if (cnt % 8 == 0) {
12602 *bp++ = '\n';
12603 }
12604 (void) sprintf(bp, "%04x ", fw->risc_gp4_reg[cnt]);
12605 bp = bp + 6;
12606 }
12607
12608 (void) strcat(bp, "\n\nRISC GP5 Registers:");
12609 bp = bufp + strlen(bufp);
12610 for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
12611 if (cnt % 8 == 0) {
12612 *bp++ = '\n';
12613 }
12614 (void) sprintf(bp, "%04x ", fw->risc_gp5_reg[cnt]);
12615 bp = bp + 6;
12616 }
12617
12618 (void) strcat(bp, "\n\nRISC GP6 Registers:");
12619 bp = bufp + strlen(bufp);
12620 for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
12621 if (cnt % 8 == 0) {
12622 *bp++ = '\n';
12623 }
12624 (void) sprintf(bp, "%04x ", fw->risc_gp6_reg[cnt]);
12625 bp = bp + 6;
12626 }
12627
12628 (void) strcat(bp, "\n\nRISC GP7 Registers:");
12629 bp = bufp + strlen(bufp);
12630 for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
12631 if (cnt % 8 == 0) {
12632 *bp++ = '\n';
12633 }
12634 (void) sprintf(bp, "%04x ", fw->risc_gp7_reg[cnt]);
12635 bp = bp + 6;
12636 }
12637
12638 (void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
12639 bp = bufp + strlen(bufp);
12640 for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
12641 if (cnt == 16 && !CFG_IST(ha, CFG_CTRL_2363)) {
12642 break;
12643 }
12644 if (cnt % 8 == 0) {
12645 *bp++ = '\n';
12646 }
12647 (void) sprintf(bp, "%04x ", fw->frame_buf_hdw_reg[cnt]);
12648 bp = bp + 6;
12649 }
12650
12651 (void) strcat(bp, "\n\nFPM B0 Registers:");
12652 bp = bufp + strlen(bufp);
12653 for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
12654 if (cnt % 8 == 0) {
12655 *bp++ = '\n';
12656 }
12657 (void) sprintf(bp, "%04x ", fw->fpm_b0_reg[cnt]);
12658 bp = bp + 6;
12659 }
12660
12661 (void) strcat(bp, "\n\nFPM B1 Registers:");
12662 bp = bufp + strlen(bufp);
12663 for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
12664 if (cnt % 8 == 0) {
12665 *bp++ = '\n';
12666 }
12667 (void) sprintf(bp, "%04x ", fw->fpm_b1_reg[cnt]);
12668 bp = bp + 6;
12669 }
12670
12671 if (CFG_IST(ha, CFG_CTRL_2363)) {
12672 (void) strcat(bp, "\n\nCode RAM Dump:");
12673 bp = bufp + strlen(bufp);
12674 for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
12675 if (cnt % 8 == 0) {
12676 (void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
12677 bp = bp + 8;
12678 }
12679 (void) sprintf(bp, "%04x ", fw->risc_ram[cnt]);
12680 bp = bp + 6;
12681 }
12682
12683 (void) strcat(bp, "\n\nStack RAM Dump:");
12684 bp = bufp + strlen(bufp);
12685 for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
12686 if (cnt % 8 == 0) {
12687 (void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
12688 bp = bp + 8;
12689 }
12690 (void) sprintf(bp, "%04x ", fw->stack_ram[cnt]);
12691 bp = bp + 6;
12692 }
12693
12694 (void) strcat(bp, "\n\nData RAM Dump:");
12695 bp = bufp + strlen(bufp);
12696 for (cnt = 0; cnt < sizeof (fw->data_ram) / 2; cnt++) {
12697 if (cnt % 8 == 0) {
12698 (void) sprintf(bp, "\n%05x: ", cnt + 0x010800);
12699 bp = bp + 8;
12700 }
12701 (void) sprintf(bp, "%04x ", fw->data_ram[cnt]);
12702 bp = bp + 6;
12703 }
12704 } else {
12705 (void) strcat(bp, "\n\nRISC SRAM:");
12706 bp = bufp + strlen(bufp);
12707 for (cnt = 0; cnt < 0xf000; cnt++) {
12708 if (cnt % 8 == 0) {
12709 (void) sprintf(bp, "\n%04x: ", cnt + 0x1000);
12710 bp = bp + 7;
12711 }
12712 (void) sprintf(bp, "%04x ", fw->risc_ram[cnt]);
12713 bp = bp + 6;
12714 }
12715 }
12716
12717 (void) strcat(bp, "\n\n[<==END] ISP Debug Dump.");
12718 bp += strlen(bp);
12719
12720 (void) sprintf(bp, "\n\nRequest Queue");
12721 bp += strlen(bp);
12722 for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12723 if (cnt % 8 == 0) {
12724 (void) sprintf(bp, "\n%08x: ", cnt);
12725 bp += strlen(bp);
12726 }
12727 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12728 bp += strlen(bp);
12729 }
12730
12731 (void) sprintf(bp, "\n\nResponse Queue");
12732 bp += strlen(bp);
12733 for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12734 if (cnt % 8 == 0) {
12735 (void) sprintf(bp, "\n%08x: ", cnt);
12736 bp += strlen(bp);
12737 }
12738 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12739 bp += strlen(bp);
12740 }
12741
12742 (void) sprintf(bp, "\n");
12743
12744 QL_PRINT_10(ha, "done, size=0x%x\n", strlen(bufp));
12745
12746 return (strlen(bufp));
12747 }
12748
12749 /*
12750 * ql_24xx_ascii_fw_dump
12751 * Converts ISP24xx firmware binary dump to ascii.
12752 *
12753 * Input:
12754 * ha = adapter state pointer.
12755 * bptr = buffer pointer.
12756 *
12757 * Returns:
12758 * Amount of data buffer used.
12759 *
12760 * Context:
12761 * Kernel context.
12762 */
12763 static size_t
12764 ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12765 {
12766 uint32_t cnt;
12767 caddr_t bp = bufp;
12768 ql_24xx_fw_dump_t *fw = ha->ql_dump_ptr;
12769
12770 QL_PRINT_3(ha, "started\n");
12771
12772 (void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
12773 ha->fw_major_version, ha->fw_minor_version,
12774 ha->fw_subminor_version, ha->fw_attributes);
12775 bp += strlen(bp);
12776
12777 (void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
12778
12779 (void) strcat(bp, "\nHost Interface Registers");
12780 bp += strlen(bp);
12781 for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12782 if (cnt % 8 == 0) {
12783 (void) sprintf(bp++, "\n");
12784 }
12785
12786 (void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12787 bp += 9;
12788 }
12789
12790 (void) sprintf(bp, "\n\nMailbox Registers");
12791 bp += strlen(bp);
12792 for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12793 if (cnt % 16 == 0) {
12794 (void) sprintf(bp++, "\n");
12795 }
12796
12797 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12798 bp += 5;
12799 }
12800
12801 (void) sprintf(bp, "\n\nXSEQ GP Registers");
12802 bp += strlen(bp);
12803 for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12804 if (cnt % 8 == 0) {
12805 (void) sprintf(bp++, "\n");
12806 }
12807
12808 (void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12809 bp += 9;
12810 }
12811
12812 (void) sprintf(bp, "\n\nXSEQ-0 Registers");
12813 bp += strlen(bp);
12814 for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12815 if (cnt % 8 == 0) {
12816 (void) sprintf(bp++, "\n");
12817 }
12818
12819 (void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12820 bp += 9;
12821 }
12822
12823 (void) sprintf(bp, "\n\nXSEQ-1 Registers");
12824 bp += strlen(bp);
12825 for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12826 if (cnt % 8 == 0) {
12827 (void) sprintf(bp++, "\n");
12828 }
12829
12830 (void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12831 bp += 9;
12832 }
12833
12834 (void) sprintf(bp, "\n\nRSEQ GP Registers");
12835 bp += strlen(bp);
12836 for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12837 if (cnt % 8 == 0) {
12838 (void) sprintf(bp++, "\n");
12839 }
12840
12841 (void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12842 bp += 9;
12843 }
12844
12845 (void) sprintf(bp, "\n\nRSEQ-0 Registers");
12846 bp += strlen(bp);
12847 for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12848 if (cnt % 8 == 0) {
12849 (void) sprintf(bp++, "\n");
12850 }
12851
12852 (void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12853 bp += 9;
12854 }
12855
12856 (void) sprintf(bp, "\n\nRSEQ-1 Registers");
12857 bp += strlen(bp);
12858 for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12859 if (cnt % 8 == 0) {
12860 (void) sprintf(bp++, "\n");
12861 }
12862
12863 (void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12864 bp += 9;
12865 }
12866
12867 (void) sprintf(bp, "\n\nRSEQ-2 Registers");
12868 bp += strlen(bp);
12869 for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12870 if (cnt % 8 == 0) {
12871 (void) sprintf(bp++, "\n");
12872 }
12873
12874 (void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12875 bp += 9;
12876 }
12877
12878 (void) sprintf(bp, "\n\nCommand DMA Registers");
12879 bp += strlen(bp);
12880 for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12881 if (cnt % 8 == 0) {
12882 (void) sprintf(bp++, "\n");
12883 }
12884
12885 (void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12886 bp += 9;
12887 }
12888
12889 (void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12890 bp += strlen(bp);
12891 for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12892 if (cnt % 8 == 0) {
12893 (void) sprintf(bp++, "\n");
12894 }
12895
12896 (void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12897 bp += 9;
12898 }
12899
12900 (void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12901 bp += strlen(bp);
12902 for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12903 if (cnt % 8 == 0) {
12904 (void) sprintf(bp++, "\n");
12905 }
12906
12907 (void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12908 bp += 9;
12909 }
12910
12911 (void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12912 bp += strlen(bp);
12913 for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12914 if (cnt % 8 == 0) {
12915 (void) sprintf(bp++, "\n");
12916 }
12917
12918 (void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12919 bp += 9;
12920 }
12921
12922 (void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12923 bp += strlen(bp);
12924 for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12925 if (cnt % 8 == 0) {
12926 (void) sprintf(bp++, "\n");
12927 }
12928
12929 (void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12930 bp += 9;
12931 }
12932
12933 (void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12934 bp += strlen(bp);
12935 for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12936 if (cnt % 8 == 0) {
12937 (void) sprintf(bp++, "\n");
12938 }
12939
12940 (void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12941 bp += 9;
12942 }
12943
12944 (void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12945 bp += strlen(bp);
12946 for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12947 if (cnt % 8 == 0) {
12948 (void) sprintf(bp++, "\n");
12949 }
12950
12951 (void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12952 bp += 9;
12953 }
12954
12955 (void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12956 bp += strlen(bp);
12957 for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12958 if (cnt % 8 == 0) {
12959 (void) sprintf(bp++, "\n");
12960 }
12961
12962 (void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12963 bp += 9;
12964 }
12965
12966 (void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12967 bp += strlen(bp);
12968 for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12969 if (cnt % 8 == 0) {
12970 (void) sprintf(bp++, "\n");
12971 }
12972
12973 (void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12974 bp += 9;
12975 }
12976
12977 (void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12978 bp += strlen(bp);
12979 for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12980 if (cnt % 8 == 0) {
12981 (void) sprintf(bp++, "\n");
12982 }
12983
12984 (void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12985 bp += 9;
12986 }
12987
12988 (void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12989 bp += strlen(bp);
12990 for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12991 if (cnt % 8 == 0) {
12992 (void) sprintf(bp++, "\n");
12993 }
12994
12995 (void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12996 bp += 9;
12997 }
12998
12999 (void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
13000 bp += strlen(bp);
13001 for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
13002 if (cnt % 8 == 0) {
13003 (void) sprintf(bp++, "\n");
13004 }
13005
13006 (void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
13007 bp += 9;
13008 }
13009
13010 (void) sprintf(bp, "\n\nRISC GP Registers");
13011 bp += strlen(bp);
13012 for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
13013 if (cnt % 8 == 0) {
13014 (void) sprintf(bp++, "\n");
13015 }
13016
13017 (void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
13018 bp += 9;
13019 }
13020
13021 (void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
13022 bp += strlen(bp);
13023 for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
13024 if (cnt % 8 == 0) {
13025 (void) sprintf(bp++, "\n");
13026 }
13027
13028 (void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
13029 bp += 9;
13030 }
13031
13032 (void) sprintf(bp, "\n\nLMC Registers");
13033 bp += strlen(bp);
13034 for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
13035 if (cnt % 8 == 0) {
13036 (void) sprintf(bp++, "\n");
13037 }
13038
13039 (void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
13040 bp += 9;
13041 }
13042
13043 (void) sprintf(bp, "\n\nFPM Hardware Registers");
13044 bp += strlen(bp);
13045 for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
13046 if (cnt % 8 == 0) {
13047 (void) sprintf(bp++, "\n");
13048 }
13049
13050 (void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
13051 bp += 9;
13052 }
13053
13054 (void) sprintf(bp, "\n\nFB Hardware Registers");
13055 bp += strlen(bp);
13056 for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
13057 if (cnt % 8 == 0) {
13058 (void) sprintf(bp++, "\n");
13059 }
13060
13061 (void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
13062 bp += 9;
13063 }
13064
13065 (void) sprintf(bp, "\n\nCode RAM");
13066 bp += strlen(bp);
13067 for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
13068 if (cnt % 8 == 0) {
13069 (void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
13070 bp += 11;
13071 }
13072
13073 (void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
13074 bp += 9;
13075 }
13076
13077 (void) sprintf(bp, "\n\nExternal Memory");
13078 bp += strlen(bp);
13079 for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
13080 if (cnt % 8 == 0) {
13081 (void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
13082 bp += 11;
13083 }
13084 (void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
13085 bp += 9;
13086 }
13087
13088 (void) sprintf(bp, "\n[<==END] ISP Debug Dump");
13089 bp += strlen(bp);
13090
13091 (void) sprintf(bp, "\n\nRequest Queue");
13092 bp += strlen(bp);
13093 for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
13094 if (cnt % 8 == 0) {
13095 (void) sprintf(bp, "\n%08x: ", cnt);
13096 bp += strlen(bp);
13097 }
13098 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
13099 bp += strlen(bp);
13100 }
13101
13102 (void) sprintf(bp, "\n\nResponse Queue");
13103 bp += strlen(bp);
13104 for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
13105 if (cnt % 8 == 0) {
13106 (void) sprintf(bp, "\n%08x: ", cnt);
13107 bp += strlen(bp);
13108 }
13109 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
13110 bp += strlen(bp);
13111 }
13112
13113 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13114 (ha->fwexttracebuf.bp != NULL)) {
13115 uint32_t cnt_b = 0;
13116 uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
13117
13118 (void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
13119 bp += strlen(bp);
13120 /* show data address as a byte address, data as long words */
13121 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13122 cnt_b = cnt * 4;
13123 if (cnt_b % 32 == 0) {
13124 (void) sprintf(bp, "\n%08x: ",
13125 (int)(w64 + cnt_b));
13126 bp += 11;
13127 }
13128 (void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
13129 bp += 9;
13130 }
13131 }
13132
13133 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13134 (ha->fwfcetracebuf.bp != NULL)) {
13135 uint32_t cnt_b = 0;
13136 uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
13137
13138 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
13139 bp += strlen(bp);
13140 /* show data address as a byte address, data as long words */
13141 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13142 cnt_b = cnt * 4;
13143 if (cnt_b % 32 == 0) {
13144 (void) sprintf(bp, "\n%08x: ",
13145 (int)(w64 + cnt_b));
13146 bp += 11;
13147 }
13148 (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
13149 bp += 9;
13150 }
13151 }
13152
13153 (void) sprintf(bp, "\n\n");
13154 bp += strlen(bp);
13155
13156 cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
13157
13158 QL_PRINT_10(ha, "done=%xh\n", cnt);
13159
13160 return (cnt);
13161 }
13162
13163 /*
13164 * ql_25xx_ascii_fw_dump
13165 * Converts ISP25xx firmware binary dump to ascii.
13166 *
13167 * Input:
13168 * ha = adapter state pointer.
13169 * bptr = buffer pointer.
13170 *
13171 * Returns:
13172 * Amount of data buffer used.
13173 *
13174 * Context:
13175 * Kernel context.
13176 */
13177 static size_t
13178 ql_25xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
13179 {
13180 uint32_t cnt, cnt1, *dp, *dp2;
13181 caddr_t bp = bufp;
13182 ql_25xx_fw_dump_t *fw = ha->ql_dump_ptr;
13183
13184 QL_PRINT_3(ha, "started\n");
13185
13186 (void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
13187 ha->fw_major_version, ha->fw_minor_version,
13188 ha->fw_subminor_version, ha->fw_attributes);
13189 bp += strlen(bp);
13190
13191 (void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
13192 bp += strlen(bp);
13193
13194 (void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
13195 bp += strlen(bp);
13196
13197 (void) sprintf(bp, "\nAER Uncorrectable Error Status Register\n%08x\n",
13198 fw->aer_ues);
13199 bp += strlen(bp);
13200
13201 (void) sprintf(bp, "\nHostRisc Registers");
13202 bp += strlen(bp);
13203 for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
13204 if (cnt % 8 == 0) {
13205 (void) sprintf(bp++, "\n");
13206 }
13207 (void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
13208 bp += 9;
13209 }
13210
13211 (void) sprintf(bp, "\n\nPCIe Registers");
13212 bp += strlen(bp);
13213 for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
13214 if (cnt % 8 == 0) {
13215 (void) sprintf(bp++, "\n");
13216 }
13217 (void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
13218 bp += 9;
13219 }
13220
13221 (void) strcat(bp, "\n\nHost Interface Registers");
13222 bp += strlen(bp);
13223 for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
13224 if (cnt % 8 == 0) {
13225 (void) sprintf(bp++, "\n");
13226 }
13227 (void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
13228 bp += 9;
13229 }
13230
13231 (void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
13232 bp += strlen(bp);
13233 for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
13234 if (cnt % 8 == 0) {
13235 (void) sprintf(bp++, "\n");
13236 }
13237 (void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
13238 bp += 9;
13239 }
13240
13241 (void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
13242 fw->risc_io);
13243 bp += strlen(bp);
13244
13245 (void) sprintf(bp, "\n\nMailbox Registers");
13246 bp += strlen(bp);
13247 for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
13248 if (cnt % 16 == 0) {
13249 (void) sprintf(bp++, "\n");
13250 }
13251 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
13252 bp += 5;
13253 }
13254
13255 (void) sprintf(bp, "\n\nXSEQ GP Registers");
13256 bp += strlen(bp);
13257 for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
13258 if (cnt % 8 == 0) {
13259 (void) sprintf(bp++, "\n");
13260 }
13261 (void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
13262 bp += 9;
13263 }
13264
13265 (void) sprintf(bp, "\n\nXSEQ-0 Registers");
13266 bp += strlen(bp);
13267 for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
13268 if (cnt % 8 == 0) {
13269 (void) sprintf(bp++, "\n");
13270 }
13271 (void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
13272 bp += 9;
13273 }
13274
13275 (void) sprintf(bp, "\n\nXSEQ-1 Registers");
13276 bp += strlen(bp);
13277 for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
13278 if (cnt % 8 == 0) {
13279 (void) sprintf(bp++, "\n");
13280 }
13281 (void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
13282 bp += 9;
13283 }
13284
13285 (void) sprintf(bp, "\n\nRSEQ GP Registers");
13286 bp += strlen(bp);
13287 for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
13288 if (cnt % 8 == 0) {
13289 (void) sprintf(bp++, "\n");
13290 }
13291 (void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
13292 bp += 9;
13293 }
13294
13295 (void) sprintf(bp, "\n\nRSEQ-0 Registers");
13296 bp += strlen(bp);
13297 for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
13298 if (cnt % 8 == 0) {
13299 (void) sprintf(bp++, "\n");
13300 }
13301 (void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
13302 bp += 9;
13303 }
13304
13305 (void) sprintf(bp, "\n\nRSEQ-1 Registers");
13306 bp += strlen(bp);
13307 for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
13308 if (cnt % 8 == 0) {
13309 (void) sprintf(bp++, "\n");
13310 }
13311 (void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
13312 bp += 9;
13313 }
13314
13315 (void) sprintf(bp, "\n\nRSEQ-2 Registers");
13316 bp += strlen(bp);
13317 for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
13318 if (cnt % 8 == 0) {
13319 (void) sprintf(bp++, "\n");
13320 }
13321 (void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
13322 bp += 9;
13323 }
13324
13325 (void) sprintf(bp, "\n\nASEQ GP Registers");
13326 bp += strlen(bp);
13327 for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
13328 if (cnt % 8 == 0) {
13329 (void) sprintf(bp++, "\n");
13330 }
13331 (void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
13332 bp += 9;
13333 }
13334
13335 (void) sprintf(bp, "\n\nASEQ-0 Registers");
13336 bp += strlen(bp);
13337 for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
13338 if (cnt % 8 == 0) {
13339 (void) sprintf(bp++, "\n");
13340 }
13341 (void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
13342 bp += 9;
13343 }
13344
13345 (void) sprintf(bp, "\n\nASEQ-1 Registers");
13346 bp += strlen(bp);
13347 for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
13348 if (cnt % 8 == 0) {
13349 (void) sprintf(bp++, "\n");
13350 }
13351 (void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
13352 bp += 9;
13353 }
13354
13355 (void) sprintf(bp, "\n\nASEQ-2 Registers");
13356 bp += strlen(bp);
13357 for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
13358 if (cnt % 8 == 0) {
13359 (void) sprintf(bp++, "\n");
13360 }
13361 (void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
13362 bp += 9;
13363 }
13364
13365 (void) sprintf(bp, "\n\nCommand DMA Registers");
13366 bp += strlen(bp);
13367 for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
13368 if (cnt % 8 == 0) {
13369 (void) sprintf(bp++, "\n");
13370 }
13371 (void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
13372 bp += 9;
13373 }
13374
13375 (void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
13376 bp += strlen(bp);
13377 for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
13378 if (cnt % 8 == 0) {
13379 (void) sprintf(bp++, "\n");
13380 }
13381 (void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
13382 bp += 9;
13383 }
13384
13385 (void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
13386 bp += strlen(bp);
13387 for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
13388 if (cnt % 8 == 0) {
13389 (void) sprintf(bp++, "\n");
13390 }
13391 (void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
13392 bp += 9;
13393 }
13394
13395 (void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
13396 bp += strlen(bp);
13397 for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
13398 if (cnt % 8 == 0) {
13399 (void) sprintf(bp++, "\n");
13400 }
13401 (void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
13402 bp += 9;
13403 }
13404
13405 (void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
13406 bp += strlen(bp);
13407 for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
13408 if (cnt % 8 == 0) {
13409 (void) sprintf(bp++, "\n");
13410 }
13411 (void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
13412 bp += 9;
13413 }
13414
13415 (void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
13416 bp += strlen(bp);
13417 for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
13418 if (cnt % 8 == 0) {
13419 (void) sprintf(bp++, "\n");
13420 }
13421 (void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
13422 bp += 9;
13423 }
13424
13425 (void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
13426 bp += strlen(bp);
13427 for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
13428 if (cnt % 8 == 0) {
13429 (void) sprintf(bp++, "\n");
13430 }
13431 (void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
13432 bp += 9;
13433 }
13434
13435 (void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
13436 bp += strlen(bp);
13437 for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
13438 if (cnt % 8 == 0) {
13439 (void) sprintf(bp++, "\n");
13440 }
13441 (void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
13442 bp += 9;
13443 }
13444
13445 (void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
13446 bp += strlen(bp);
13447 for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
13448 if (cnt % 8 == 0) {
13449 (void) sprintf(bp++, "\n");
13450 }
13451 (void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
13452 bp += 9;
13453 }
13454
13455 (void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
13456 bp += strlen(bp);
13457 for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
13458 if (cnt % 8 == 0) {
13459 (void) sprintf(bp++, "\n");
13460 }
13461 (void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
13462 bp += 9;
13463 }
13464
13465 (void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
13466 bp += strlen(bp);
13467 for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
13468 if (cnt % 8 == 0) {
13469 (void) sprintf(bp++, "\n");
13470 }
13471 (void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
13472 bp += 9;
13473 }
13474
13475 (void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
13476 bp += strlen(bp);
13477 for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
13478 if (cnt % 8 == 0) {
13479 (void) sprintf(bp++, "\n");
13480 }
13481 (void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
13482 bp += 9;
13483 }
13484
13485 (void) sprintf(bp, "\n\nRISC GP Registers");
13486 bp += strlen(bp);
13487 for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
13488 if (cnt % 8 == 0) {
13489 (void) sprintf(bp++, "\n");
13490 }
13491 (void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
13492 bp += 9;
13493 }
13494
13495 (void) sprintf(bp, "\n\nLMC Registers");
13496 bp += strlen(bp);
13497 for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
13498 if (cnt % 8 == 0) {
13499 (void) sprintf(bp++, "\n");
13500 }
13501 (void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
13502 bp += 9;
13503 }
13504
13505 (void) sprintf(bp, "\n\nFPM Hardware Registers");
13506 bp += strlen(bp);
13507 cnt1 = sizeof (fw->fpm_hdw_reg);
13508 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13509 if (cnt % 8 == 0) {
13510 (void) sprintf(bp++, "\n");
13511 }
13512 (void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
13513 bp += 9;
13514 }
13515
13516 (void) sprintf(bp, "\n\nFB Hardware Registers");
13517 bp += strlen(bp);
13518 cnt1 = sizeof (fw->fb_hdw_reg);
13519 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13520 if (cnt % 8 == 0) {
13521 (void) sprintf(bp++, "\n");
13522 }
13523 (void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
13524 bp += 9;
13525 }
13526
13527 (void) sprintf(bp, "\n\nCode RAM");
13528 bp += strlen(bp);
13529 for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
13530 if (cnt % 8 == 0) {
13531 (void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
13532 bp += 11;
13533 }
13534 (void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
13535 bp += 9;
13536 }
13537
13538 (void) sprintf(bp, "\n\nExternal Memory");
13539 bp += strlen(bp);
13540 dp = (uint32_t *)((caddr_t)fw->req_rsp_ext_mem + fw->req_q_size[0] +
13541 fw->req_q_size[1] + fw->rsp_q_size + (ha->rsp_queues_cnt * 16));
13542 for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
13543 if (cnt % 8 == 0) {
13544 (void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
13545 bp += 11;
13546 }
13547 (void) sprintf(bp, "%08x ", *dp++);
13548 bp += 9;
13549 }
13550
13551 (void) sprintf(bp, "\n[<==END] ISP Debug Dump");
13552 bp += strlen(bp);
13553
13554 dp = fw->req_rsp_ext_mem + (ha->rsp_queues_cnt * 4);
13555 for (cnt = 0; cnt < 2 && fw->req_q_size[cnt]; cnt++) {
13556 dp2 = dp;
13557 for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
13558 if (*dp2++) {
13559 break;
13560 }
13561 }
13562 if (cnt1 == fw->req_q_size[cnt] / 4) {
13563 dp = dp2;
13564 continue;
13565 }
13566 (void) sprintf(bp, "\n\nRequest Queue\nQueue %d:", cnt);
13567 bp += strlen(bp);
13568 for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
13569 if (cnt1 % 8 == 0) {
13570 (void) sprintf(bp, "\n%08x: ", cnt1);
13571 bp += strlen(bp);
13572 }
13573 (void) sprintf(bp, "%08x ", *dp++);
13574 bp += strlen(bp);
13575 }
13576 }
13577
13578 for (cnt = 0; cnt < ha->rsp_queues_cnt && cnt < 16; cnt++) {
13579 dp2 = dp;
13580 for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
13581 cnt1++) {
13582 if (*dp2++) {
13583 break;
13584 }
13585 }
13586 if (cnt1 == ha->rsp_queues[cnt]->rsp_ring.size / 4) {
13587 dp = dp2;
13588 continue;
13589 }
13590 (void) sprintf(bp, "\n\nResponse Queue\nQueue %d:", cnt);
13591 bp += strlen(bp);
13592 for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
13593 cnt1++) {
13594 if (cnt1 % 8 == 0) {
13595 (void) sprintf(bp, "\n%08x: ", cnt1);
13596 bp += strlen(bp);
13597 }
13598 (void) sprintf(bp, "%08x ", *dp++);
13599 bp += strlen(bp);
13600 }
13601 }
13602
13603 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13604 (ha->fwexttracebuf.bp != NULL)) {
13605 uint32_t cnt_b = 0;
13606 uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
13607
13608 (void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
13609 bp += strlen(bp);
13610 /* show data address as a byte address, data as long words */
13611 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13612 cnt_b = cnt * 4;
13613 if (cnt_b % 32 == 0) {
13614 (void) sprintf(bp, "\n%08x: ",
13615 (int)(w64 + cnt_b));
13616 bp += 11;
13617 }
13618 (void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
13619 bp += 9;
13620 }
13621 }
13622
13623 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13624 (ha->fwfcetracebuf.bp != NULL)) {
13625 uint32_t cnt_b = 0;
13626 uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
13627
13628 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
13629 bp += strlen(bp);
13630 /* show data address as a byte address, data as long words */
13631 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13632 cnt_b = cnt * 4;
13633 if (cnt_b % 32 == 0) {
13634 (void) sprintf(bp, "\n%08x: ",
13635 (int)(w64 + cnt_b));
13636 bp += 11;
13637 }
13638 (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
13639 bp += 9;
13640 }
13641 }
13642
13643 (void) sprintf(bp, "\n\n");
13644 bp += strlen(bp);
13645
13646 cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
13647
13648 QL_PRINT_10(ha, "done=%xh\n", cnt);
13649
13650 return (cnt);
13651 }
13652
13653 /*
13654 * ql_81xx_ascii_fw_dump
13655 * Converts ISP81xx firmware binary dump to ascii.
13656 *
13657 * Input:
13658 * ha = adapter state pointer.
13659 * bptr = buffer pointer.
13660 *
13661 * Returns:
13662 * Amount of data buffer used.
13663 *
13664 * Context:
13665 * Kernel context.
13666 */
13667 static size_t
13668 ql_81xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
13669 {
13670 uint32_t cnt, cnt1, *dp, *dp2;
13671 caddr_t bp = bufp;
13672 ql_81xx_fw_dump_t *fw = ha->ql_dump_ptr;
13673
13674 QL_PRINT_3(ha, "started\n");
13675
13676 (void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
13677 ha->fw_major_version, ha->fw_minor_version,
13678 ha->fw_subminor_version, ha->fw_attributes);
13679 bp += strlen(bp);
13680
13681 (void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
13682 bp += strlen(bp);
13683
13684 (void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
13685 bp += strlen(bp);
13686
13687 (void) sprintf(bp, "\nAER Uncorrectable Error Status Register\n%08x\n",
13688 fw->aer_ues);
13689 bp += strlen(bp);
13690
13691 (void) sprintf(bp, "\nHostRisc Registers");
13692 bp += strlen(bp);
13693 for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
13694 if (cnt % 8 == 0) {
13695 (void) sprintf(bp++, "\n");
13696 }
13697 (void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
13698 bp += 9;
13699 }
13700
13701 (void) sprintf(bp, "\n\nPCIe Registers");
13702 bp += strlen(bp);
13703 for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
13704 if (cnt % 8 == 0) {
13705 (void) sprintf(bp++, "\n");
13706 }
13707 (void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
13708 bp += 9;
13709 }
13710
13711 (void) strcat(bp, "\n\nHost Interface Registers");
13712 bp += strlen(bp);
13713 for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
13714 if (cnt % 8 == 0) {
13715 (void) sprintf(bp++, "\n");
13716 }
13717 (void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
13718 bp += 9;
13719 }
13720
13721 (void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
13722 bp += strlen(bp);
13723 for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
13724 if (cnt % 8 == 0) {
13725 (void) sprintf(bp++, "\n");
13726 }
13727 (void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
13728 bp += 9;
13729 }
13730
13731 (void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
13732 fw->risc_io);
13733 bp += strlen(bp);
13734
13735 (void) sprintf(bp, "\n\nMailbox Registers");
13736 bp += strlen(bp);
13737 for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
13738 if (cnt % 16 == 0) {
13739 (void) sprintf(bp++, "\n");
13740 }
13741 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
13742 bp += 5;
13743 }
13744
13745 (void) sprintf(bp, "\n\nXSEQ GP Registers");
13746 bp += strlen(bp);
13747 for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
13748 if (cnt % 8 == 0) {
13749 (void) sprintf(bp++, "\n");
13750 }
13751 (void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
13752 bp += 9;
13753 }
13754
13755 (void) sprintf(bp, "\n\nXSEQ-0 Registers");
13756 bp += strlen(bp);
13757 for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
13758 if (cnt % 8 == 0) {
13759 (void) sprintf(bp++, "\n");
13760 }
13761 (void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
13762 bp += 9;
13763 }
13764
13765 (void) sprintf(bp, "\n\nXSEQ-1 Registers");
13766 bp += strlen(bp);
13767 for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
13768 if (cnt % 8 == 0) {
13769 (void) sprintf(bp++, "\n");
13770 }
13771 (void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
13772 bp += 9;
13773 }
13774
13775 (void) sprintf(bp, "\n\nRSEQ GP Registers");
13776 bp += strlen(bp);
13777 for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
13778 if (cnt % 8 == 0) {
13779 (void) sprintf(bp++, "\n");
13780 }
13781 (void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
13782 bp += 9;
13783 }
13784
13785 (void) sprintf(bp, "\n\nRSEQ-0 Registers");
13786 bp += strlen(bp);
13787 for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
13788 if (cnt % 8 == 0) {
13789 (void) sprintf(bp++, "\n");
13790 }
13791 (void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
13792 bp += 9;
13793 }
13794
13795 (void) sprintf(bp, "\n\nRSEQ-1 Registers");
13796 bp += strlen(bp);
13797 for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
13798 if (cnt % 8 == 0) {
13799 (void) sprintf(bp++, "\n");
13800 }
13801 (void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
13802 bp += 9;
13803 }
13804
13805 (void) sprintf(bp, "\n\nRSEQ-2 Registers");
13806 bp += strlen(bp);
13807 for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
13808 if (cnt % 8 == 0) {
13809 (void) sprintf(bp++, "\n");
13810 }
13811 (void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
13812 bp += 9;
13813 }
13814
13815 (void) sprintf(bp, "\n\nASEQ GP Registers");
13816 bp += strlen(bp);
13817 for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
13818 if (cnt % 8 == 0) {
13819 (void) sprintf(bp++, "\n");
13820 }
13821 (void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
13822 bp += 9;
13823 }
13824
13825 (void) sprintf(bp, "\n\nASEQ-0 Registers");
13826 bp += strlen(bp);
13827 for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
13828 if (cnt % 8 == 0) {
13829 (void) sprintf(bp++, "\n");
13830 }
13831 (void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
13832 bp += 9;
13833 }
13834
13835 (void) sprintf(bp, "\n\nASEQ-1 Registers");
13836 bp += strlen(bp);
13837 for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
13838 if (cnt % 8 == 0) {
13839 (void) sprintf(bp++, "\n");
13840 }
13841 (void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
13842 bp += 9;
13843 }
13844
13845 (void) sprintf(bp, "\n\nASEQ-2 Registers");
13846 bp += strlen(bp);
13847 for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
13848 if (cnt % 8 == 0) {
13849 (void) sprintf(bp++, "\n");
13850 }
13851 (void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
13852 bp += 9;
13853 }
13854
13855 (void) sprintf(bp, "\n\nCommand DMA Registers");
13856 bp += strlen(bp);
13857 for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
13858 if (cnt % 8 == 0) {
13859 (void) sprintf(bp++, "\n");
13860 }
13861 (void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
13862 bp += 9;
13863 }
13864
13865 (void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
13866 bp += strlen(bp);
13867 for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
13868 if (cnt % 8 == 0) {
13869 (void) sprintf(bp++, "\n");
13870 }
13871 (void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
13872 bp += 9;
13873 }
13874
13875 (void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
13876 bp += strlen(bp);
13877 for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
13878 if (cnt % 8 == 0) {
13879 (void) sprintf(bp++, "\n");
13880 }
13881 (void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
13882 bp += 9;
13883 }
13884
13885 (void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
13886 bp += strlen(bp);
13887 for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
13888 if (cnt % 8 == 0) {
13889 (void) sprintf(bp++, "\n");
13890 }
13891 (void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
13892 bp += 9;
13893 }
13894
13895 (void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
13896 bp += strlen(bp);
13897 for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
13898 if (cnt % 8 == 0) {
13899 (void) sprintf(bp++, "\n");
13900 }
13901 (void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
13902 bp += 9;
13903 }
13904
13905 (void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
13906 bp += strlen(bp);
13907 for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
13908 if (cnt % 8 == 0) {
13909 (void) sprintf(bp++, "\n");
13910 }
13911 (void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
13912 bp += 9;
13913 }
13914
13915 (void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
13916 bp += strlen(bp);
13917 for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
13918 if (cnt % 8 == 0) {
13919 (void) sprintf(bp++, "\n");
13920 }
13921 (void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
13922 bp += 9;
13923 }
13924
13925 (void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
13926 bp += strlen(bp);
13927 for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
13928 if (cnt % 8 == 0) {
13929 (void) sprintf(bp++, "\n");
13930 }
13931 (void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
13932 bp += 9;
13933 }
13934
13935 (void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
13936 bp += strlen(bp);
13937 for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
13938 if (cnt % 8 == 0) {
13939 (void) sprintf(bp++, "\n");
13940 }
13941 (void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
13942 bp += 9;
13943 }
13944
13945 (void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
13946 bp += strlen(bp);
13947 for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
13948 if (cnt % 8 == 0) {
13949 (void) sprintf(bp++, "\n");
13950 }
13951 (void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
13952 bp += 9;
13953 }
13954
13955 (void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
13956 bp += strlen(bp);
13957 for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
13958 if (cnt % 8 == 0) {
13959 (void) sprintf(bp++, "\n");
13960 }
13961 (void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
13962 bp += 9;
13963 }
13964
13965 (void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
13966 bp += strlen(bp);
13967 for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
13968 if (cnt % 8 == 0) {
13969 (void) sprintf(bp++, "\n");
13970 }
13971 (void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
13972 bp += 9;
13973 }
13974
13975 (void) sprintf(bp, "\n\nRISC GP Registers");
13976 bp += strlen(bp);
13977 for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
13978 if (cnt % 8 == 0) {
13979 (void) sprintf(bp++, "\n");
13980 }
13981 (void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
13982 bp += 9;
13983 }
13984
13985 (void) sprintf(bp, "\n\nLMC Registers");
13986 bp += strlen(bp);
13987 for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
13988 if (cnt % 8 == 0) {
13989 (void) sprintf(bp++, "\n");
13990 }
13991 (void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
13992 bp += 9;
13993 }
13994
13995 (void) sprintf(bp, "\n\nFPM Hardware Registers");
13996 bp += strlen(bp);
13997 cnt1 = sizeof (fw->fpm_hdw_reg);
13998 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13999 if (cnt % 8 == 0) {
14000 (void) sprintf(bp++, "\n");
14001 }
14002 (void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
14003 bp += 9;
14004 }
14005
14006 (void) sprintf(bp, "\n\nFB Hardware Registers");
14007 bp += strlen(bp);
14008 cnt1 = sizeof (fw->fb_hdw_reg);
14009 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
14010 if (cnt % 8 == 0) {
14011 (void) sprintf(bp++, "\n");
14012 }
14013 (void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
14014 bp += 9;
14015 }
14016
14017 (void) sprintf(bp, "\n\nCode RAM");
14018 bp += strlen(bp);
14019 for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
14020 if (cnt % 8 == 0) {
14021 (void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
14022 bp += 11;
14023 }
14024 (void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
14025 bp += 9;
14026 }
14027
14028 (void) sprintf(bp, "\n\nExternal Memory");
14029 bp += strlen(bp);
14030 dp = (uint32_t *)((caddr_t)fw->req_rsp_ext_mem + fw->req_q_size[0] +
14031 fw->req_q_size[1] + fw->rsp_q_size + (ha->rsp_queues_cnt * 16));
14032 for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
14033 if (cnt % 8 == 0) {
14034 (void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
14035 bp += 11;
14036 }
14037 (void) sprintf(bp, "%08x ", *dp++);
14038 bp += 9;
14039 }
14040
14041 (void) sprintf(bp, "\n[<==END] ISP Debug Dump");
14042 bp += strlen(bp);
14043
14044 dp = fw->req_rsp_ext_mem + (ha->rsp_queues_cnt * 4);
14045 for (cnt = 0; cnt < 2 && fw->req_q_size[cnt]; cnt++) {
14046 dp2 = dp;
14047 for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
14048 if (*dp2++) {
14049 break;
14050 }
14051 }
14052 if (cnt1 == fw->req_q_size[cnt] / 4) {
14053 dp = dp2;
14054 continue;
14055 }
14056 (void) sprintf(bp, "\n\nRequest Queue\nQueue %d:", cnt);
14057 bp += strlen(bp);
14058 for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
14059 if (cnt1 % 8 == 0) {
14060 (void) sprintf(bp, "\n%08x: ", cnt1);
14061 bp += strlen(bp);
14062 }
14063 (void) sprintf(bp, "%08x ", *dp++);
14064 bp += strlen(bp);
14065 }
14066 }
14067
14068 for (cnt = 0; cnt < ha->rsp_queues_cnt && cnt < 16; cnt++) {
14069 dp2 = dp;
14070 for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
14071 cnt1++) {
14072 if (*dp2++) {
14073 break;
14074 }
14075 }
14076 if (cnt1 == ha->rsp_queues[cnt]->rsp_ring.size / 4) {
14077 dp = dp2;
14078 continue;
14079 }
14080 (void) sprintf(bp, "\n\nResponse Queue\nQueue %d:", cnt);
14081 bp += strlen(bp);
14082 for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
14083 cnt1++) {
14084 if (cnt1 % 8 == 0) {
14085 (void) sprintf(bp, "\n%08x: ", cnt1);
14086 bp += strlen(bp);
14087 }
14088 (void) sprintf(bp, "%08x ", *dp++);
14089 bp += strlen(bp);
14090 }
14091 }
14092
14093 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14094 (ha->fwexttracebuf.bp != NULL)) {
14095 uint32_t cnt_b = 0;
14096 uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
14097
14098 (void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
14099 bp += strlen(bp);
14100 /* show data address as a byte address, data as long words */
14101 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14102 cnt_b = cnt * 4;
14103 if (cnt_b % 32 == 0) {
14104 (void) sprintf(bp, "\n%08x: ",
14105 (int)(w64 + cnt_b));
14106 bp += 11;
14107 }
14108 (void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
14109 bp += 9;
14110 }
14111 }
14112
14113 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14114 (ha->fwfcetracebuf.bp != NULL)) {
14115 uint32_t cnt_b = 0;
14116 uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
14117
14118 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
14119 bp += strlen(bp);
14120 /* show data address as a byte address, data as long words */
14121 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14122 cnt_b = cnt * 4;
14123 if (cnt_b % 32 == 0) {
14124 (void) sprintf(bp, "\n%08x: ",
14125 (int)(w64 + cnt_b));
14126 bp += 11;
14127 }
14128 (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
14129 bp += 9;
14130 }
14131 }
14132
14133 (void) sprintf(bp, "\n\n");
14134 bp += strlen(bp);
14135
14136 cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
14137
14138 QL_PRINT_10(ha, "done=%xh\n", cnt);
14139
14140 return (cnt);
14141 }
14142
14143 /*
14144 * ql_8021_ascii_fw_dump
14145 * Converts ISP8021 firmware binary dump to ascii.
14146 *
14147 * Input:
14148 * ha = adapter state pointer.
14149 * bptr = buffer pointer.
14150 *
14151 * Returns:
14152 * Amount of data buffer used.
14153 *
14154 * Context:
14155 * Kernel context.
14156 */
14157 static size_t
14158 ql_8021_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
14159 {
14160 uint32_t cnt;
14161 caddr_t bp = bufp;
14162 uint8_t *fw = ha->ql_dump_ptr;
14163
14164 /*
14165 * 2 ascii bytes per binary byte + a space and
14166 * a newline every 16 binary bytes
14167 */
14168 cnt = 0;
14169 while (cnt < ha->ql_dump_size) {
14170 (void) sprintf(bp, "%02x ", *fw++);
14171 bp += strlen(bp);
14172 if (++cnt % 16 == 0) {
14173 (void) sprintf(bp, "\n");
14174 bp += strlen(bp);
14175 }
14176 }
14177 if (cnt % 16 != 0) {
14178 (void) sprintf(bp, "\n");
14179 bp += strlen(bp);
14180 }
14181 cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
14182 QL_PRINT_10(ha, "done=%xh\n", cnt);
14183 return (cnt);
14184 }
14185
14186 /*
14187 * ql_2200_binary_fw_dump
14188 *
14189 * Input:
14190 * ha: adapter state pointer.
14191 * fw: firmware dump context pointer.
14192 *
14193 * Returns:
14194 * ql local function return status code.
14195 *
14196 * Context:
14197 * Interrupt or Kernel context, no mailbox commands allowed.
14198 */
14199 static int
14200 ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
14201 {
14202 uint32_t cnt;
14203 uint16_t risc_address;
14204 clock_t timer;
14205 mbx_cmd_t mc;
14206 mbx_cmd_t *mcp = &mc;
14207 int rval = QL_SUCCESS;
14208
14209 QL_PRINT_3(ha, "started\n");
14210
14211 /* Disable ISP interrupts. */
14212 ql_disable_intr(ha);
14213
14214 /* Release mailbox registers. */
14215 WRT16_IO_REG(ha, semaphore, 0);
14216
14217 /* Pause RISC. */
14218 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
14219 timer = 30000;
14220 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
14221 if (timer-- != 0) {
14222 drv_usecwait(MILLISEC);
14223 } else {
14224 rval = QL_FUNCTION_TIMEOUT;
14225 break;
14226 }
14227 }
14228
14229 if (rval == QL_SUCCESS) {
14230 (void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
14231 sizeof (fw->pbiu_reg) / 2, 16);
14232
14233 /* In 2200 we only read 8 mailboxes */
14234 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x10,
14235 8, 16);
14236
14237 (void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x20,
14238 sizeof (fw->dma_reg) / 2, 16);
14239
14240 WRT16_IO_REG(ha, ctrl_status, 0);
14241 (void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
14242 sizeof (fw->risc_hdw_reg) / 2, 16);
14243
14244 WRT16_IO_REG(ha, pcr, 0x2000);
14245 (void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
14246 sizeof (fw->risc_gp0_reg) / 2, 16);
14247
14248 WRT16_IO_REG(ha, pcr, 0x2100);
14249 (void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
14250 sizeof (fw->risc_gp1_reg) / 2, 16);
14251
14252 WRT16_IO_REG(ha, pcr, 0x2200);
14253 (void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
14254 sizeof (fw->risc_gp2_reg) / 2, 16);
14255
14256 WRT16_IO_REG(ha, pcr, 0x2300);
14257 (void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
14258 sizeof (fw->risc_gp3_reg) / 2, 16);
14259
14260 WRT16_IO_REG(ha, pcr, 0x2400);
14261 (void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
14262 sizeof (fw->risc_gp4_reg) / 2, 16);
14263
14264 WRT16_IO_REG(ha, pcr, 0x2500);
14265 (void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
14266 sizeof (fw->risc_gp5_reg) / 2, 16);
14267
14268 WRT16_IO_REG(ha, pcr, 0x2600);
14269 (void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
14270 sizeof (fw->risc_gp6_reg) / 2, 16);
14271
14272 WRT16_IO_REG(ha, pcr, 0x2700);
14273 (void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
14274 sizeof (fw->risc_gp7_reg) / 2, 16);
14275
14276 WRT16_IO_REG(ha, ctrl_status, 0x10);
14277 /* 2200 has only 16 registers */
14278 (void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
14279 ha->iobase + 0x80, 16, 16);
14280
14281 WRT16_IO_REG(ha, ctrl_status, 0x20);
14282 (void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
14283 sizeof (fw->fpm_b0_reg) / 2, 16);
14284
14285 WRT16_IO_REG(ha, ctrl_status, 0x30);
14286 (void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
14287 sizeof (fw->fpm_b1_reg) / 2, 16);
14288
14289 /* Select FPM registers. */
14290 WRT16_IO_REG(ha, ctrl_status, 0x20);
14291
14292 /* FPM Soft Reset. */
14293 WRT16_IO_REG(ha, fpm_diag_config, 0x100);
14294
14295 /* Select frame buffer registers. */
14296 WRT16_IO_REG(ha, ctrl_status, 0x10);
14297
14298 /* Reset frame buffer FIFOs. */
14299 WRT16_IO_REG(ha, fb_cmd, 0xa000);
14300
14301 /* Select RISC module registers. */
14302 WRT16_IO_REG(ha, ctrl_status, 0);
14303
14304 /* Reset RISC module. */
14305 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
14306
14307 /* Reset ISP semaphore. */
14308 WRT16_IO_REG(ha, semaphore, 0);
14309
14310 /* Release RISC module. */
14311 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
14312
14313 /* Wait for RISC to recover from reset. */
14314 timer = 30000;
14315 while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_ROM_BUSY) {
14316 if (timer-- != 0) {
14317 drv_usecwait(MILLISEC);
14318 } else {
14319 rval = QL_FUNCTION_TIMEOUT;
14320 break;
14321 }
14322 }
14323
14324 /* Disable RISC pause on FPM parity error. */
14325 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
14326 }
14327
14328 if (rval == QL_SUCCESS) {
14329 /* Pause RISC. */
14330 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
14331 timer = 30000;
14332 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
14333 if (timer-- != 0) {
14334 drv_usecwait(MILLISEC);
14335 } else {
14336 rval = QL_FUNCTION_TIMEOUT;
14337 break;
14338 }
14339 }
14340 }
14341
14342 if (rval == QL_SUCCESS) {
14343 /* Set memory configuration and timing. */
14344 WRT16_IO_REG(ha, mctr, 0xf2);
14345
14346 /* Release RISC. */
14347 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
14348
14349 /* Get RISC SRAM. */
14350 risc_address = 0x1000;
14351 WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_WORD);
14352 for (cnt = 0; cnt < 0xf000; cnt++) {
14353 WRT16_IO_REG(ha, mailbox_in[1], risc_address++);
14354 WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
14355 for (timer = 6000000; timer != 0; timer--) {
14356 /* Check for pending interrupts. */
14357 if (INTERRUPT_PENDING(ha)) {
14358 if (RD16_IO_REG(ha, semaphore) &
14359 BIT_0) {
14360 WRT16_IO_REG(ha, hccr,
14361 HC_CLR_RISC_INT);
14362 mcp->mb[0] = RD16_IO_REG(ha,
14363 mailbox_out[0]);
14364 fw->risc_ram[cnt] =
14365 RD16_IO_REG(ha,
14366 mailbox_out[2]);
14367 WRT16_IO_REG(ha,
14368 semaphore, 0);
14369 break;
14370 }
14371 WRT16_IO_REG(ha, hccr,
14372 HC_CLR_RISC_INT);
14373 }
14374 drv_usecwait(5);
14375 }
14376
14377 if (timer == 0) {
14378 rval = QL_FUNCTION_TIMEOUT;
14379 } else {
14380 rval = mcp->mb[0];
14381 }
14382
14383 if (rval != QL_SUCCESS) {
14384 break;
14385 }
14386 }
14387 }
14388
14389 QL_PRINT_3(ha, "done\n");
14390
14391 return (rval);
14392 }
14393
14394 /*
14395 * ql_2300_binary_fw_dump
14396 *
14397 * Input:
14398 * ha: adapter state pointer.
14399 * fw: firmware dump context pointer.
14400 *
14401 * Returns:
14402 * ql local function return status code.
14403 *
14404 * Context:
14405 * Interrupt or Kernel context, no mailbox commands allowed.
14406 */
14407 static int
14408 ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
14409 {
14410 clock_t timer;
14411 int rval = QL_SUCCESS;
14412
14413 QL_PRINT_3(ha, "started\n");
14414
14415 /* Disable ISP interrupts. */
14416 ql_disable_intr(ha);
14417
14418 /* Release mailbox registers. */
14419 WRT16_IO_REG(ha, semaphore, 0);
14420
14421 /* Pause RISC. */
14422 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
14423 timer = 30000;
14424 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
14425 if (timer-- != 0) {
14426 drv_usecwait(MILLISEC);
14427 } else {
14428 rval = QL_FUNCTION_TIMEOUT;
14429 break;
14430 }
14431 }
14432
14433 if (rval == QL_SUCCESS) {
14434 (void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
14435 sizeof (fw->pbiu_reg) / 2, 16);
14436
14437 (void) ql_read_regs(ha, fw->risc_host_reg, ha->iobase + 0x10,
14438 sizeof (fw->risc_host_reg) / 2, 16);
14439
14440 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x40,
14441 sizeof (fw->mailbox_reg) / 2, 16);
14442
14443 WRT16_IO_REG(ha, ctrl_status, 0x40);
14444 (void) ql_read_regs(ha, fw->resp_dma_reg, ha->iobase + 0x80,
14445 sizeof (fw->resp_dma_reg) / 2, 16);
14446
14447 WRT16_IO_REG(ha, ctrl_status, 0x50);
14448 (void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x80,
14449 sizeof (fw->dma_reg) / 2, 16);
14450
14451 WRT16_IO_REG(ha, ctrl_status, 0);
14452 (void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
14453 sizeof (fw->risc_hdw_reg) / 2, 16);
14454
14455 WRT16_IO_REG(ha, pcr, 0x2000);
14456 (void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
14457 sizeof (fw->risc_gp0_reg) / 2, 16);
14458
14459 WRT16_IO_REG(ha, pcr, 0x2200);
14460 (void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
14461 sizeof (fw->risc_gp1_reg) / 2, 16);
14462
14463 WRT16_IO_REG(ha, pcr, 0x2400);
14464 (void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
14465 sizeof (fw->risc_gp2_reg) / 2, 16);
14466
14467 WRT16_IO_REG(ha, pcr, 0x2600);
14468 (void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
14469 sizeof (fw->risc_gp3_reg) / 2, 16);
14470
14471 WRT16_IO_REG(ha, pcr, 0x2800);
14472 (void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
14473 sizeof (fw->risc_gp4_reg) / 2, 16);
14474
14475 WRT16_IO_REG(ha, pcr, 0x2A00);
14476 (void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
14477 sizeof (fw->risc_gp5_reg) / 2, 16);
14478
14479 WRT16_IO_REG(ha, pcr, 0x2C00);
14480 (void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
14481 sizeof (fw->risc_gp6_reg) / 2, 16);
14482
14483 WRT16_IO_REG(ha, pcr, 0x2E00);
14484 (void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
14485 sizeof (fw->risc_gp7_reg) / 2, 16);
14486
14487 WRT16_IO_REG(ha, ctrl_status, 0x10);
14488 (void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
14489 ha->iobase + 0x80, sizeof (fw->frame_buf_hdw_reg) / 2, 16);
14490
14491 WRT16_IO_REG(ha, ctrl_status, 0x20);
14492 (void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
14493 sizeof (fw->fpm_b0_reg) / 2, 16);
14494
14495 WRT16_IO_REG(ha, ctrl_status, 0x30);
14496 (void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
14497 sizeof (fw->fpm_b1_reg) / 2, 16);
14498
14499 /* Select FPM registers. */
14500 WRT16_IO_REG(ha, ctrl_status, 0x20);
14501
14502 /* FPM Soft Reset. */
14503 WRT16_IO_REG(ha, fpm_diag_config, 0x100);
14504
14505 /* Select frame buffer registers. */
14506 WRT16_IO_REG(ha, ctrl_status, 0x10);
14507
14508 /* Reset frame buffer FIFOs. */
14509 WRT16_IO_REG(ha, fb_cmd, 0xa000);
14510
14511 /* Select RISC module registers. */
14512 WRT16_IO_REG(ha, ctrl_status, 0);
14513
14514 /* Reset RISC module. */
14515 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
14516
14517 /* Reset ISP semaphore. */
14518 WRT16_IO_REG(ha, semaphore, 0);
14519
14520 /* Release RISC module. */
14521 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
14522
14523 /* Wait for RISC to recover from reset. */
14524 timer = 30000;
14525 while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_ROM_BUSY) {
14526 if (timer-- != 0) {
14527 drv_usecwait(MILLISEC);
14528 } else {
14529 rval = QL_FUNCTION_TIMEOUT;
14530 break;
14531 }
14532 }
14533
14534 /* Disable RISC pause on FPM parity error. */
14535 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
14536 }
14537
14538 /* Get RISC SRAM. */
14539 if (rval == QL_SUCCESS) {
14540 rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
14541 }
14542 /* Get STACK SRAM. */
14543 if (rval == QL_SUCCESS) {
14544 rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
14545 }
14546 /* Get DATA SRAM. */
14547 if (rval == QL_SUCCESS) {
14548 rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
14549 }
14550
14551 QL_PRINT_3(ha, "done\n");
14552
14553 return (rval);
14554 }
14555
14556 /*
14557 * ql_24xx_binary_fw_dump
14558 *
14559 * Input:
14560 * ha: adapter state pointer.
14561 * fw: firmware dump context pointer.
14562 *
14563 * Returns:
14564 * ql local function return status code.
14565 *
14566 * Context:
14567 * Interrupt or Kernel context, no mailbox commands allowed.
14568 */
14569 static int
14570 ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
14571 {
14572 uint32_t *reg32;
14573 void *bp;
14574 clock_t timer;
14575 int rval = QL_SUCCESS;
14576
14577 QL_PRINT_3(ha, "started\n");
14578
14579 fw->hccr = RD32_IO_REG(ha, hccr);
14580
14581 /* Pause RISC. */
14582 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
14583 /* Disable ISP interrupts. */
14584 ql_disable_intr(ha);
14585
14586 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
14587 for (timer = 30000;
14588 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
14589 rval == QL_SUCCESS; timer--) {
14590 if (timer) {
14591 drv_usecwait(100);
14592 } else {
14593 rval = QL_FUNCTION_TIMEOUT;
14594 }
14595 }
14596 }
14597
14598 if (rval == QL_SUCCESS) {
14599 /* Host interface registers. */
14600 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14601 sizeof (fw->host_reg) / 4, 32);
14602
14603 /* Disable ISP interrupts. */
14604 ql_disable_intr(ha);
14605
14606 /* Shadow registers. */
14607
14608 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14609 RD32_IO_REG(ha, io_base_addr);
14610
14611 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14612 WRT_REG_DWORD(ha, reg32, 0xB0000000);
14613 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14614 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14615
14616 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14617 WRT_REG_DWORD(ha, reg32, 0xB0100000);
14618 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14619 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14620
14621 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14622 WRT_REG_DWORD(ha, reg32, 0xB0200000);
14623 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14624 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14625
14626 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14627 WRT_REG_DWORD(ha, reg32, 0xB0300000);
14628 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14629 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14630
14631 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14632 WRT_REG_DWORD(ha, reg32, 0xB0400000);
14633 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14634 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14635
14636 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14637 WRT_REG_DWORD(ha, reg32, 0xB0500000);
14638 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14639 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14640
14641 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14642 WRT_REG_DWORD(ha, reg32, 0xB0600000);
14643 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14644 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14645
14646 /* Mailbox registers. */
14647 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14648 sizeof (fw->mailbox_reg) / 2, 16);
14649
14650 /* Transfer sequence registers. */
14651
14652 /* XSEQ GP */
14653 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14654 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14655 16, 32);
14656 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14657 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14658 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14659 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14660 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14661 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14662 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14663 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14664 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14665 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14666 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14667 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14668 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14669 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14670
14671 /* XSEQ-0 */
14672 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14673 (void) ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14674 sizeof (fw->xseq_0_reg) / 4, 32);
14675
14676 /* XSEQ-1 */
14677 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14678 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14679 sizeof (fw->xseq_1_reg) / 4, 32);
14680
14681 /* Receive sequence registers. */
14682
14683 /* RSEQ GP */
14684 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14685 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14686 16, 32);
14687 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14688 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14689 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14690 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14691 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14692 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14693 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14694 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14695 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14696 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14697 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14698 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14699 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14700 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14701
14702 /* RSEQ-0 */
14703 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14704 (void) ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14705 sizeof (fw->rseq_0_reg) / 4, 32);
14706
14707 /* RSEQ-1 */
14708 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14709 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14710 sizeof (fw->rseq_1_reg) / 4, 32);
14711
14712 /* RSEQ-2 */
14713 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14714 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14715 sizeof (fw->rseq_2_reg) / 4, 32);
14716
14717 /* Command DMA registers. */
14718
14719 WRT32_IO_REG(ha, io_base_addr, 0x7100);
14720 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14721 sizeof (fw->cmd_dma_reg) / 4, 32);
14722
14723 /* Queues. */
14724
14725 /* RequestQ0 */
14726 WRT32_IO_REG(ha, io_base_addr, 0x7200);
14727 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14728 8, 32);
14729 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14730
14731 /* ResponseQ0 */
14732 WRT32_IO_REG(ha, io_base_addr, 0x7300);
14733 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14734 8, 32);
14735 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14736
14737 /* RequestQ1 */
14738 WRT32_IO_REG(ha, io_base_addr, 0x7400);
14739 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14740 8, 32);
14741 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14742
14743 /* Transmit DMA registers. */
14744
14745 /* XMT0 */
14746 WRT32_IO_REG(ha, io_base_addr, 0x7600);
14747 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14748 16, 32);
14749 WRT32_IO_REG(ha, io_base_addr, 0x7610);
14750 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14751
14752 /* XMT1 */
14753 WRT32_IO_REG(ha, io_base_addr, 0x7620);
14754 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14755 16, 32);
14756 WRT32_IO_REG(ha, io_base_addr, 0x7630);
14757 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14758
14759 /* XMT2 */
14760 WRT32_IO_REG(ha, io_base_addr, 0x7640);
14761 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14762 16, 32);
14763 WRT32_IO_REG(ha, io_base_addr, 0x7650);
14764 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14765
14766 /* XMT3 */
14767 WRT32_IO_REG(ha, io_base_addr, 0x7660);
14768 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14769 16, 32);
14770 WRT32_IO_REG(ha, io_base_addr, 0x7670);
14771 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14772
14773 /* XMT4 */
14774 WRT32_IO_REG(ha, io_base_addr, 0x7680);
14775 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14776 16, 32);
14777 WRT32_IO_REG(ha, io_base_addr, 0x7690);
14778 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14779
14780 /* XMT Common */
14781 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14782 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14783 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14784
14785 /* Receive DMA registers. */
14786
14787 /* RCVThread0 */
14788 WRT32_IO_REG(ha, io_base_addr, 0x7700);
14789 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14790 ha->iobase + 0xC0, 16, 32);
14791 WRT32_IO_REG(ha, io_base_addr, 0x7710);
14792 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14793
14794 /* RCVThread1 */
14795 WRT32_IO_REG(ha, io_base_addr, 0x7720);
14796 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14797 ha->iobase + 0xC0, 16, 32);
14798 WRT32_IO_REG(ha, io_base_addr, 0x7730);
14799 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14800
14801 /* RISC registers. */
14802
14803 /* RISC GP */
14804 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14805 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14806 16, 32);
14807 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14808 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14809 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14810 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14811 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14812 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14813 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14814 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14815 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14816 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14817 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14818 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14819 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14820 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14821
14822 /* Local memory controller registers. */
14823
14824 /* LMC */
14825 WRT32_IO_REG(ha, io_base_addr, 0x3000);
14826 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14827 16, 32);
14828 WRT32_IO_REG(ha, io_base_addr, 0x3010);
14829 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14830 WRT32_IO_REG(ha, io_base_addr, 0x3020);
14831 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14832 WRT32_IO_REG(ha, io_base_addr, 0x3030);
14833 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14834 WRT32_IO_REG(ha, io_base_addr, 0x3040);
14835 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14836 WRT32_IO_REG(ha, io_base_addr, 0x3050);
14837 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14838 WRT32_IO_REG(ha, io_base_addr, 0x3060);
14839 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14840
14841 /* Fibre Protocol Module registers. */
14842
14843 /* FPM hardware */
14844 WRT32_IO_REG(ha, io_base_addr, 0x4000);
14845 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14846 16, 32);
14847 WRT32_IO_REG(ha, io_base_addr, 0x4010);
14848 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14849 WRT32_IO_REG(ha, io_base_addr, 0x4020);
14850 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14851 WRT32_IO_REG(ha, io_base_addr, 0x4030);
14852 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14853 WRT32_IO_REG(ha, io_base_addr, 0x4040);
14854 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14855 WRT32_IO_REG(ha, io_base_addr, 0x4050);
14856 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14857 WRT32_IO_REG(ha, io_base_addr, 0x4060);
14858 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14859 WRT32_IO_REG(ha, io_base_addr, 0x4070);
14860 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14861 WRT32_IO_REG(ha, io_base_addr, 0x4080);
14862 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14863 WRT32_IO_REG(ha, io_base_addr, 0x4090);
14864 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14865 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14866 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14867 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14868 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14869
14870 /* Frame Buffer registers. */
14871
14872 /* FB hardware */
14873 WRT32_IO_REG(ha, io_base_addr, 0x6000);
14874 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14875 16, 32);
14876 WRT32_IO_REG(ha, io_base_addr, 0x6010);
14877 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14878 WRT32_IO_REG(ha, io_base_addr, 0x6020);
14879 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14880 WRT32_IO_REG(ha, io_base_addr, 0x6030);
14881 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14882 WRT32_IO_REG(ha, io_base_addr, 0x6040);
14883 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14884 WRT32_IO_REG(ha, io_base_addr, 0x6100);
14885 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14886 WRT32_IO_REG(ha, io_base_addr, 0x6130);
14887 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14888 WRT32_IO_REG(ha, io_base_addr, 0x6150);
14889 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14890 WRT32_IO_REG(ha, io_base_addr, 0x6170);
14891 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14892 WRT32_IO_REG(ha, io_base_addr, 0x6190);
14893 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14894 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14895 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14896 }
14897
14898 /* Get the request queue */
14899 if (rval == QL_SUCCESS) {
14900 uint32_t cnt;
14901 uint32_t *w32 = (uint32_t *)ha->req_q[0]->req_ring.bp;
14902
14903 /* Sync DMA buffer. */
14904 (void) ddi_dma_sync(ha->req_q[0]->req_ring.dma_handle,
14905 0, sizeof (fw->req_q), DDI_DMA_SYNC_FORKERNEL);
14906
14907 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14908 fw->req_q[cnt] = *w32++;
14909 LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14910 }
14911 }
14912
14913 /* Get the response queue */
14914 if (rval == QL_SUCCESS) {
14915 uint32_t cnt;
14916 uint32_t *w32 =
14917 (uint32_t *)ha->rsp_queues[0]->rsp_ring.bp;
14918
14919 /* Sync DMA buffer. */
14920 (void) ddi_dma_sync(ha->rsp_queues[0]->rsp_ring.dma_handle,
14921 0, sizeof (fw->rsp_q), DDI_DMA_SYNC_FORKERNEL);
14922
14923 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14924 fw->rsp_q[cnt] = *w32++;
14925 LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14926 }
14927 }
14928
14929 /* Reset RISC. */
14930 ql_reset_chip(ha);
14931
14932 /* Memory. */
14933 if (rval == QL_SUCCESS) {
14934 /* Code RAM. */
14935 rval = ql_read_risc_ram(ha, 0x20000,
14936 sizeof (fw->code_ram) / 4, fw->code_ram);
14937 }
14938 if (rval == QL_SUCCESS) {
14939 /* External Memory. */
14940 rval = ql_read_risc_ram(ha, 0x100000,
14941 ha->fw_ext_memory_size / 4, fw->ext_mem);
14942 }
14943
14944 /* Get the extended trace buffer */
14945 if (rval == QL_SUCCESS) {
14946 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14947 (ha->fwexttracebuf.bp != NULL)) {
14948 uint32_t cnt;
14949 uint32_t *w32 = ha->fwexttracebuf.bp;
14950
14951 /* Sync DMA buffer. */
14952 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14953 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14954
14955 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14956 fw->ext_trace_buf[cnt] = *w32++;
14957 }
14958 }
14959 }
14960
14961 /* Get the FC event trace buffer */
14962 if (rval == QL_SUCCESS) {
14963 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14964 (ha->fwfcetracebuf.bp != NULL)) {
14965 uint32_t cnt;
14966 uint32_t *w32 = ha->fwfcetracebuf.bp;
14967
14968 /* Sync DMA buffer. */
14969 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14970 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14971
14972 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14973 fw->fce_trace_buf[cnt] = *w32++;
14974 }
14975 }
14976 }
14977
14978 if (rval != QL_SUCCESS) {
14979 EL(ha, "failed=%xh\n", rval);
14980 } else {
14981 /*EMPTY*/
14982 QL_PRINT_3(ha, "done\n");
14983 }
14984
14985 return (rval);
14986 }
14987
14988 /*
14989 * ql_25xx_binary_fw_dump
14990 *
14991 * Input:
14992 * ha: adapter state pointer.
14993 * fw: firmware dump context pointer.
14994 *
14995 * Returns:
14996 * ql local function return status code.
14997 *
14998 * Context:
14999 * Interrupt or Kernel context, no mailbox commands allowed.
15000 */
15001 static int
15002 ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
15003 {
15004 uint32_t *reg32, cnt, *w32ptr, index, *dp;
15005 void *bp;
15006 clock_t timer;
15007 int rval = QL_SUCCESS;
15008
15009 QL_PRINT_3(ha, "started\n");
15010
15011 fw->req_q_size[0] = ha->req_q[0]->req_ring.size;
15012 if (ha->req_q[1] != NULL) {
15013 fw->req_q_size[1] = ha->req_q[1]->req_ring.size;
15014 }
15015 fw->rsp_q_size = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
15016
15017 fw->hccr = RD32_IO_REG(ha, hccr);
15018 fw->r2h_status = RD32_IO_REG(ha, risc2host);
15019 fw->aer_ues = ql_pci_config_get32(ha, 0x104);
15020
15021 /* Pause RISC. */
15022 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
15023 /* Disable ISP interrupts. */
15024 ql_disable_intr(ha);
15025
15026 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
15027 for (timer = 30000;
15028 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
15029 rval == QL_SUCCESS; timer--) {
15030 if (timer) {
15031 drv_usecwait(100);
15032 if (timer % 10000 == 0) {
15033 EL(ha, "risc pause %d\n", timer);
15034 }
15035 } else {
15036 EL(ha, "risc pause timeout\n");
15037 rval = QL_FUNCTION_TIMEOUT;
15038 }
15039 }
15040 }
15041
15042 if (rval == QL_SUCCESS) {
15043
15044 /* Host Interface registers */
15045
15046 /* HostRisc registers. */
15047 WRT32_IO_REG(ha, io_base_addr, 0x7000);
15048 bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
15049 16, 32);
15050 WRT32_IO_REG(ha, io_base_addr, 0x7010);
15051 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15052
15053 /* PCIe registers. */
15054 WRT32_IO_REG(ha, io_base_addr, 0x7c00);
15055 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
15056 bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
15057 3, 32);
15058 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
15059 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
15060
15061 /* Host interface registers. */
15062 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
15063 sizeof (fw->host_reg) / 4, 32);
15064
15065 /* Disable ISP interrupts. */
15066 ql_disable_intr(ha);
15067
15068 /* Shadow registers. */
15069
15070 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
15071 RD32_IO_REG(ha, io_base_addr);
15072
15073 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15074 WRT_REG_DWORD(ha, reg32, 0xB0000000);
15075 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15076 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
15077
15078 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15079 WRT_REG_DWORD(ha, reg32, 0xB0100000);
15080 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15081 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
15082
15083 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15084 WRT_REG_DWORD(ha, reg32, 0xB0200000);
15085 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15086 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
15087
15088 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15089 WRT_REG_DWORD(ha, reg32, 0xB0300000);
15090 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15091 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
15092
15093 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15094 WRT_REG_DWORD(ha, reg32, 0xB0400000);
15095 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15096 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
15097
15098 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15099 WRT_REG_DWORD(ha, reg32, 0xB0500000);
15100 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15101 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
15102
15103 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15104 WRT_REG_DWORD(ha, reg32, 0xB0600000);
15105 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15106 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
15107
15108 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15109 WRT_REG_DWORD(ha, reg32, 0xB0700000);
15110 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15111 fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
15112
15113 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15114 WRT_REG_DWORD(ha, reg32, 0xB0800000);
15115 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15116 fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
15117
15118 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15119 WRT_REG_DWORD(ha, reg32, 0xB0900000);
15120 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15121 fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
15122
15123 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15124 WRT_REG_DWORD(ha, reg32, 0xB0A00000);
15125 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15126 fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
15127
15128 /* RISC I/O register. */
15129
15130 WRT32_IO_REG(ha, io_base_addr, 0x0010);
15131 (void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
15132 1, 32);
15133
15134 /* Mailbox registers. */
15135
15136 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
15137 sizeof (fw->mailbox_reg) / 2, 16);
15138
15139 /* Transfer sequence registers. */
15140
15141 /* XSEQ GP */
15142 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
15143 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
15144 16, 32);
15145 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
15146 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15147 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
15148 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15149 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
15150 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15151 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
15152 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15153 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
15154 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15155 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
15156 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15157 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
15158 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15159
15160 /* XSEQ-0 */
15161 WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
15162 bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
15163 16, 32);
15164 WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
15165 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15166 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
15167 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15168
15169 /* XSEQ-1 */
15170 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
15171 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
15172 16, 32);
15173
15174 /* Receive sequence registers. */
15175
15176 /* RSEQ GP */
15177 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
15178 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
15179 16, 32);
15180 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
15181 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15182 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
15183 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15184 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
15185 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15186 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
15187 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15188 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
15189 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15190 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
15191 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15192 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
15193 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15194
15195 /* RSEQ-0 */
15196 WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
15197 bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
15198 16, 32);
15199 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
15200 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15201
15202 /* RSEQ-1 */
15203 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
15204 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
15205 sizeof (fw->rseq_1_reg) / 4, 32);
15206
15207 /* RSEQ-2 */
15208 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
15209 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
15210 sizeof (fw->rseq_2_reg) / 4, 32);
15211
15212 /* Auxiliary sequencer registers. */
15213
15214 /* ASEQ GP */
15215 WRT32_IO_REG(ha, io_base_addr, 0xB000);
15216 bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
15217 16, 32);
15218 WRT32_IO_REG(ha, io_base_addr, 0xB010);
15219 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15220 WRT32_IO_REG(ha, io_base_addr, 0xB020);
15221 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15222 WRT32_IO_REG(ha, io_base_addr, 0xB030);
15223 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15224 WRT32_IO_REG(ha, io_base_addr, 0xB040);
15225 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15226 WRT32_IO_REG(ha, io_base_addr, 0xB050);
15227 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15228 WRT32_IO_REG(ha, io_base_addr, 0xB060);
15229 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15230 WRT32_IO_REG(ha, io_base_addr, 0xB070);
15231 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15232
15233 /* ASEQ-0 */
15234 WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
15235 bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
15236 16, 32);
15237 WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
15238 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15239
15240 /* ASEQ-1 */
15241 WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
15242 (void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
15243 16, 32);
15244
15245 /* ASEQ-2 */
15246 WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
15247 (void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
15248 16, 32);
15249
15250 /* Command DMA registers. */
15251
15252 WRT32_IO_REG(ha, io_base_addr, 0x7100);
15253 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
15254 sizeof (fw->cmd_dma_reg) / 4, 32);
15255
15256 /* Queues. */
15257
15258 /* RequestQ0 */
15259 WRT32_IO_REG(ha, io_base_addr, 0x7200);
15260 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
15261 8, 32);
15262 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
15263
15264 /* ResponseQ0 */
15265 WRT32_IO_REG(ha, io_base_addr, 0x7300);
15266 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
15267 8, 32);
15268 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
15269
15270 /* RequestQ1 */
15271 WRT32_IO_REG(ha, io_base_addr, 0x7400);
15272 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
15273 8, 32);
15274 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
15275
15276 /* Transmit DMA registers. */
15277
15278 /* XMT0 */
15279 WRT32_IO_REG(ha, io_base_addr, 0x7600);
15280 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
15281 16, 32);
15282 WRT32_IO_REG(ha, io_base_addr, 0x7610);
15283 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15284
15285 /* XMT1 */
15286 WRT32_IO_REG(ha, io_base_addr, 0x7620);
15287 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
15288 16, 32);
15289 WRT32_IO_REG(ha, io_base_addr, 0x7630);
15290 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15291
15292 /* XMT2 */
15293 WRT32_IO_REG(ha, io_base_addr, 0x7640);
15294 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
15295 16, 32);
15296 WRT32_IO_REG(ha, io_base_addr, 0x7650);
15297 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15298
15299 /* XMT3 */
15300 WRT32_IO_REG(ha, io_base_addr, 0x7660);
15301 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
15302 16, 32);
15303 WRT32_IO_REG(ha, io_base_addr, 0x7670);
15304 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15305
15306 /* XMT4 */
15307 WRT32_IO_REG(ha, io_base_addr, 0x7680);
15308 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
15309 16, 32);
15310 WRT32_IO_REG(ha, io_base_addr, 0x7690);
15311 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15312
15313 /* XMT Common */
15314 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
15315 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
15316 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
15317
15318 /* Receive DMA registers. */
15319
15320 /* RCVThread0 */
15321 WRT32_IO_REG(ha, io_base_addr, 0x7700);
15322 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
15323 ha->iobase + 0xC0, 16, 32);
15324 WRT32_IO_REG(ha, io_base_addr, 0x7710);
15325 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15326
15327 /* RCVThread1 */
15328 WRT32_IO_REG(ha, io_base_addr, 0x7720);
15329 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
15330 ha->iobase + 0xC0, 16, 32);
15331 WRT32_IO_REG(ha, io_base_addr, 0x7730);
15332 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15333
15334 /* RISC registers. */
15335
15336 /* RISC GP */
15337 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
15338 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
15339 16, 32);
15340 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
15341 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15342 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
15343 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15344 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
15345 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15346 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
15347 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15348 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
15349 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15350 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
15351 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15352 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
15353 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15354
15355 /* Local memory controller (LMC) registers. */
15356
15357 /* LMC */
15358 WRT32_IO_REG(ha, io_base_addr, 0x3000);
15359 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
15360 16, 32);
15361 WRT32_IO_REG(ha, io_base_addr, 0x3010);
15362 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15363 WRT32_IO_REG(ha, io_base_addr, 0x3020);
15364 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15365 WRT32_IO_REG(ha, io_base_addr, 0x3030);
15366 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15367 WRT32_IO_REG(ha, io_base_addr, 0x3040);
15368 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15369 WRT32_IO_REG(ha, io_base_addr, 0x3050);
15370 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15371 WRT32_IO_REG(ha, io_base_addr, 0x3060);
15372 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15373 WRT32_IO_REG(ha, io_base_addr, 0x3070);
15374 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15375
15376 /* Fibre Protocol Module registers. */
15377
15378 /* FPM hardware */
15379 WRT32_IO_REG(ha, io_base_addr, 0x4000);
15380 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
15381 16, 32);
15382 WRT32_IO_REG(ha, io_base_addr, 0x4010);
15383 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15384 WRT32_IO_REG(ha, io_base_addr, 0x4020);
15385 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15386 WRT32_IO_REG(ha, io_base_addr, 0x4030);
15387 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15388 WRT32_IO_REG(ha, io_base_addr, 0x4040);
15389 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15390 WRT32_IO_REG(ha, io_base_addr, 0x4050);
15391 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15392 WRT32_IO_REG(ha, io_base_addr, 0x4060);
15393 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15394 WRT32_IO_REG(ha, io_base_addr, 0x4070);
15395 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15396 WRT32_IO_REG(ha, io_base_addr, 0x4080);
15397 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15398 WRT32_IO_REG(ha, io_base_addr, 0x4090);
15399 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15400 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
15401 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15402 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
15403 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15404
15405 /* Frame Buffer registers. */
15406
15407 /* FB hardware */
15408 WRT32_IO_REG(ha, io_base_addr, 0x6000);
15409 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
15410 16, 32);
15411 WRT32_IO_REG(ha, io_base_addr, 0x6010);
15412 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15413 WRT32_IO_REG(ha, io_base_addr, 0x6020);
15414 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15415 WRT32_IO_REG(ha, io_base_addr, 0x6030);
15416 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15417 WRT32_IO_REG(ha, io_base_addr, 0x6040);
15418 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15419 WRT32_IO_REG(ha, io_base_addr, 0x6100);
15420 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15421 WRT32_IO_REG(ha, io_base_addr, 0x6130);
15422 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15423 WRT32_IO_REG(ha, io_base_addr, 0x6150);
15424 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15425 WRT32_IO_REG(ha, io_base_addr, 0x6170);
15426 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15427 WRT32_IO_REG(ha, io_base_addr, 0x6190);
15428 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15429 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
15430 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15431 WRT32_IO_REG(ha, io_base_addr, 0x6F00);
15432 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15433 }
15434
15435 if (rval == QL_SUCCESS) {
15436 /* Get the Queue Pointers */
15437 dp = fw->req_rsp_ext_mem;
15438 for (index = 0; index < ha->rsp_queues_cnt; index++) {
15439 if (index == 0 && ha->flags & MULTI_QUEUE) {
15440 *dp = RD32_MBAR_REG(ha,
15441 ha->req_q[0]->mbar_req_in);
15442 LITTLE_ENDIAN_32(dp);
15443 dp++;
15444 *dp = RD32_MBAR_REG(ha,
15445 ha->req_q[0]->mbar_req_out);
15446 LITTLE_ENDIAN_32(dp);
15447 dp++;
15448 } else if (index == 1 && ha->flags & MULTI_QUEUE) {
15449 *dp = RD32_MBAR_REG(ha,
15450 ha->req_q[1]->mbar_req_in);
15451 LITTLE_ENDIAN_32(dp);
15452 dp++;
15453 *dp = RD32_MBAR_REG(ha,
15454 ha->req_q[1]->mbar_req_out);
15455 LITTLE_ENDIAN_32(dp);
15456 dp++;
15457 } else {
15458 *dp++ = 0;
15459 *dp++ = 0;
15460 }
15461 if (ha->flags & MULTI_QUEUE) {
15462 *dp = RD32_MBAR_REG(ha,
15463 ha->rsp_queues[index]->mbar_rsp_in);
15464 LITTLE_ENDIAN_32(dp);
15465 dp++;
15466 *dp = RD32_MBAR_REG(ha,
15467 ha->rsp_queues[index]->mbar_rsp_out);
15468 LITTLE_ENDIAN_32(dp);
15469 dp++;
15470 } else {
15471 *dp++ = 0;
15472 *dp++ = 0;
15473 }
15474 }
15475 /* Get the request queue */
15476 (void) ddi_dma_sync(ha->req_q[0]->req_ring.dma_handle, 0, 0,
15477 DDI_DMA_SYNC_FORCPU);
15478 w32ptr = (uint32_t *)ha->req_q[0]->req_ring.bp;
15479 for (cnt = 0; cnt < fw->req_q_size[0] / 4; cnt++) {
15480 *dp = *w32ptr++;
15481 LITTLE_ENDIAN_32(dp);
15482 dp++;
15483 }
15484 if (ha->req_q[1] != NULL) {
15485 (void) ddi_dma_sync(ha->req_q[1]->req_ring.dma_handle,
15486 0, 0, DDI_DMA_SYNC_FORCPU);
15487 w32ptr = (uint32_t *)ha->req_q[1]->req_ring.bp;
15488 for (cnt = 0; cnt < fw->req_q_size[1] / 4; cnt++) {
15489 *dp = *w32ptr++;
15490 LITTLE_ENDIAN_32(dp);
15491 dp++;
15492 }
15493 }
15494
15495 /* Get the response queues */
15496 for (index = 0; index < ha->rsp_queues_cnt; index++) {
15497 (void) ddi_dma_sync(
15498 ha->rsp_queues[index]->rsp_ring.dma_handle,
15499 0, 0, DDI_DMA_SYNC_FORCPU);
15500 w32ptr = (uint32_t *)
15501 ha->rsp_queues[index]->rsp_ring.bp;
15502 for (cnt = 0;
15503 cnt < ha->rsp_queues[index]->rsp_ring.size / 4;
15504 cnt++) {
15505 *dp = *w32ptr++;
15506 LITTLE_ENDIAN_32(dp);
15507 dp++;
15508 }
15509 }
15510 }
15511
15512 /* Reset RISC. */
15513 ql_reset_chip(ha);
15514
15515 /* Memory. */
15516 if (rval == QL_SUCCESS) {
15517 /* Code RAM. */
15518 rval = ql_read_risc_ram(ha, 0x20000,
15519 sizeof (fw->code_ram) / 4, fw->code_ram);
15520 }
15521 if (rval == QL_SUCCESS) {
15522 /* External Memory. */
15523 rval = ql_read_risc_ram(ha, 0x100000,
15524 ha->fw_ext_memory_size / 4, dp);
15525 }
15526
15527 /* Get the FC event trace buffer */
15528 if (rval == QL_SUCCESS) {
15529 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
15530 (ha->fwfcetracebuf.bp != NULL)) {
15531 uint32_t cnt;
15532 uint32_t *w32 = ha->fwfcetracebuf.bp;
15533
15534 /* Sync DMA buffer. */
15535 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
15536 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
15537
15538 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
15539 fw->fce_trace_buf[cnt] = *w32++;
15540 }
15541 }
15542 }
15543
15544 /* Get the extended trace buffer */
15545 if (rval == QL_SUCCESS) {
15546 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
15547 (ha->fwexttracebuf.bp != NULL)) {
15548 uint32_t cnt;
15549 uint32_t *w32 = ha->fwexttracebuf.bp;
15550
15551 /* Sync DMA buffer. */
15552 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
15553 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
15554
15555 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
15556 fw->ext_trace_buf[cnt] = *w32++;
15557 }
15558 }
15559 }
15560
15561 if (rval != QL_SUCCESS) {
15562 EL(ha, "failed=%xh\n", rval);
15563 } else {
15564 /*EMPTY*/
15565 QL_PRINT_3(ha, "done\n");
15566 }
15567
15568 return (rval);
15569 }
15570
15571 /*
15572 * ql_81xx_binary_fw_dump
15573 *
15574 * Input:
15575 * ha: adapter state pointer.
15576 * fw: firmware dump context pointer.
15577 *
15578 * Returns:
15579 * ql local function return status code.
15580 *
15581 * Context:
15582 * Interrupt or Kernel context, no mailbox commands allowed.
15583 */
15584 static int
15585 ql_81xx_binary_fw_dump(ql_adapter_state_t *ha, ql_81xx_fw_dump_t *fw)
15586 {
15587 uint32_t *reg32, cnt, *w32ptr, index, *dp;
15588 void *bp;
15589 clock_t timer;
15590 int rval = QL_SUCCESS;
15591
15592 QL_PRINT_3(ha, "started\n");
15593
15594 fw->req_q_size[0] = ha->req_q[0]->req_ring.size;
15595 if (ha->req_q[1] != NULL) {
15596 fw->req_q_size[1] = ha->req_q[1]->req_ring.size;
15597 }
15598 fw->rsp_q_size = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
15599
15600 fw->hccr = RD32_IO_REG(ha, hccr);
15601 fw->r2h_status = RD32_IO_REG(ha, risc2host);
15602 fw->aer_ues = ql_pci_config_get32(ha, 0x104);
15603
15604 /* Pause RISC. */
15605 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
15606 /* Disable ISP interrupts. */
15607 ql_disable_intr(ha);
15608
15609 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
15610 for (timer = 30000;
15611 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
15612 rval == QL_SUCCESS; timer--) {
15613 if (timer) {
15614 drv_usecwait(100);
15615 if (timer % 10000 == 0) {
15616 EL(ha, "risc pause %d\n", timer);
15617 }
15618 } else {
15619 EL(ha, "risc pause timeout\n");
15620 rval = QL_FUNCTION_TIMEOUT;
15621 }
15622 }
15623 }
15624
15625 if (rval == QL_SUCCESS) {
15626
15627 /* Host Interface registers */
15628
15629 /* HostRisc registers. */
15630 WRT32_IO_REG(ha, io_base_addr, 0x7000);
15631 bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
15632 16, 32);
15633 WRT32_IO_REG(ha, io_base_addr, 0x7010);
15634 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15635
15636 /* PCIe registers. */
15637 WRT32_IO_REG(ha, io_base_addr, 0x7c00);
15638 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
15639 bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
15640 3, 32);
15641 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
15642 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
15643
15644 /* Host interface registers. */
15645 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
15646 sizeof (fw->host_reg) / 4, 32);
15647
15648 /* Disable ISP interrupts. */
15649 ql_disable_intr(ha);
15650
15651 /* Shadow registers. */
15652
15653 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
15654 RD32_IO_REG(ha, io_base_addr);
15655
15656 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15657 WRT_REG_DWORD(ha, reg32, 0xB0000000);
15658 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15659 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
15660
15661 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15662 WRT_REG_DWORD(ha, reg32, 0xB0100000);
15663 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15664 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
15665
15666 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15667 WRT_REG_DWORD(ha, reg32, 0xB0200000);
15668 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15669 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
15670
15671 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15672 WRT_REG_DWORD(ha, reg32, 0xB0300000);
15673 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15674 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
15675
15676 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15677 WRT_REG_DWORD(ha, reg32, 0xB0400000);
15678 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15679 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
15680
15681 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15682 WRT_REG_DWORD(ha, reg32, 0xB0500000);
15683 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15684 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
15685
15686 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15687 WRT_REG_DWORD(ha, reg32, 0xB0600000);
15688 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15689 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
15690
15691 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15692 WRT_REG_DWORD(ha, reg32, 0xB0700000);
15693 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15694 fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
15695
15696 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15697 WRT_REG_DWORD(ha, reg32, 0xB0800000);
15698 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15699 fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
15700
15701 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15702 WRT_REG_DWORD(ha, reg32, 0xB0900000);
15703 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15704 fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
15705
15706 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15707 WRT_REG_DWORD(ha, reg32, 0xB0A00000);
15708 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15709 fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
15710
15711 /* RISC I/O register. */
15712
15713 WRT32_IO_REG(ha, io_base_addr, 0x0010);
15714 (void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
15715 1, 32);
15716
15717 /* Mailbox registers. */
15718
15719 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
15720 sizeof (fw->mailbox_reg) / 2, 16);
15721
15722 /* Transfer sequence registers. */
15723
15724 /* XSEQ GP */
15725 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
15726 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
15727 16, 32);
15728 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
15729 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15730 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
15731 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15732 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
15733 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15734 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
15735 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15736 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
15737 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15738 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
15739 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15740 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
15741 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15742
15743 /* XSEQ-0 */
15744 WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
15745 bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
15746 16, 32);
15747 WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
15748 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15749 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
15750 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15751
15752 /* XSEQ-1 */
15753 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
15754 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
15755 16, 32);
15756
15757 /* Receive sequence registers. */
15758
15759 /* RSEQ GP */
15760 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
15761 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
15762 16, 32);
15763 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
15764 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15765 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
15766 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15767 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
15768 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15769 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
15770 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15771 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
15772 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15773 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
15774 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15775 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
15776 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15777
15778 /* RSEQ-0 */
15779 WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
15780 bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
15781 16, 32);
15782 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
15783 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15784
15785 /* RSEQ-1 */
15786 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
15787 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
15788 sizeof (fw->rseq_1_reg) / 4, 32);
15789
15790 /* RSEQ-2 */
15791 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
15792 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
15793 sizeof (fw->rseq_2_reg) / 4, 32);
15794
15795 /* Auxiliary sequencer registers. */
15796
15797 /* ASEQ GP */
15798 WRT32_IO_REG(ha, io_base_addr, 0xB000);
15799 bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
15800 16, 32);
15801 WRT32_IO_REG(ha, io_base_addr, 0xB010);
15802 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15803 WRT32_IO_REG(ha, io_base_addr, 0xB020);
15804 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15805 WRT32_IO_REG(ha, io_base_addr, 0xB030);
15806 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15807 WRT32_IO_REG(ha, io_base_addr, 0xB040);
15808 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15809 WRT32_IO_REG(ha, io_base_addr, 0xB050);
15810 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15811 WRT32_IO_REG(ha, io_base_addr, 0xB060);
15812 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15813 WRT32_IO_REG(ha, io_base_addr, 0xB070);
15814 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15815
15816 /* ASEQ-0 */
15817 WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
15818 bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
15819 16, 32);
15820 WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
15821 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15822
15823 /* ASEQ-1 */
15824 WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
15825 (void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
15826 16, 32);
15827
15828 /* ASEQ-2 */
15829 WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
15830 (void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
15831 16, 32);
15832
15833 /* Command DMA registers. */
15834
15835 WRT32_IO_REG(ha, io_base_addr, 0x7100);
15836 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
15837 sizeof (fw->cmd_dma_reg) / 4, 32);
15838
15839 /* Queues. */
15840
15841 /* RequestQ0 */
15842 WRT32_IO_REG(ha, io_base_addr, 0x7200);
15843 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
15844 8, 32);
15845 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
15846
15847 /* ResponseQ0 */
15848 WRT32_IO_REG(ha, io_base_addr, 0x7300);
15849 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
15850 8, 32);
15851 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
15852
15853 /* RequestQ1 */
15854 WRT32_IO_REG(ha, io_base_addr, 0x7400);
15855 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
15856 8, 32);
15857 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
15858
15859 /* Transmit DMA registers. */
15860
15861 /* XMT0 */
15862 WRT32_IO_REG(ha, io_base_addr, 0x7600);
15863 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
15864 16, 32);
15865 WRT32_IO_REG(ha, io_base_addr, 0x7610);
15866 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15867
15868 /* XMT1 */
15869 WRT32_IO_REG(ha, io_base_addr, 0x7620);
15870 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
15871 16, 32);
15872 WRT32_IO_REG(ha, io_base_addr, 0x7630);
15873 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15874
15875 /* XMT2 */
15876 WRT32_IO_REG(ha, io_base_addr, 0x7640);
15877 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
15878 16, 32);
15879 WRT32_IO_REG(ha, io_base_addr, 0x7650);
15880 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15881
15882 /* XMT3 */
15883 WRT32_IO_REG(ha, io_base_addr, 0x7660);
15884 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
15885 16, 32);
15886 WRT32_IO_REG(ha, io_base_addr, 0x7670);
15887 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15888
15889 /* XMT4 */
15890 WRT32_IO_REG(ha, io_base_addr, 0x7680);
15891 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
15892 16, 32);
15893 WRT32_IO_REG(ha, io_base_addr, 0x7690);
15894 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15895
15896 /* XMT Common */
15897 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
15898 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
15899 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
15900
15901 /* Receive DMA registers. */
15902
15903 /* RCVThread0 */
15904 WRT32_IO_REG(ha, io_base_addr, 0x7700);
15905 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
15906 ha->iobase + 0xC0, 16, 32);
15907 WRT32_IO_REG(ha, io_base_addr, 0x7710);
15908 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15909
15910 /* RCVThread1 */
15911 WRT32_IO_REG(ha, io_base_addr, 0x7720);
15912 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
15913 ha->iobase + 0xC0, 16, 32);
15914 WRT32_IO_REG(ha, io_base_addr, 0x7730);
15915 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15916
15917 /* RISC registers. */
15918
15919 /* RISC GP */
15920 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
15921 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
15922 16, 32);
15923 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
15924 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15925 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
15926 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15927 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
15928 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15929 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
15930 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15931 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
15932 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15933 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
15934 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15935 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
15936 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15937
15938 /* Local memory controller (LMC) registers. */
15939
15940 /* LMC */
15941 WRT32_IO_REG(ha, io_base_addr, 0x3000);
15942 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
15943 16, 32);
15944 WRT32_IO_REG(ha, io_base_addr, 0x3010);
15945 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15946 WRT32_IO_REG(ha, io_base_addr, 0x3020);
15947 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15948 WRT32_IO_REG(ha, io_base_addr, 0x3030);
15949 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15950 WRT32_IO_REG(ha, io_base_addr, 0x3040);
15951 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15952 WRT32_IO_REG(ha, io_base_addr, 0x3050);
15953 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15954 WRT32_IO_REG(ha, io_base_addr, 0x3060);
15955 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15956 WRT32_IO_REG(ha, io_base_addr, 0x3070);
15957 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15958
15959 /* Fibre Protocol Module registers. */
15960
15961 /* FPM hardware */
15962 WRT32_IO_REG(ha, io_base_addr, 0x4000);
15963 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
15964 16, 32);
15965 WRT32_IO_REG(ha, io_base_addr, 0x4010);
15966 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15967 WRT32_IO_REG(ha, io_base_addr, 0x4020);
15968 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15969 WRT32_IO_REG(ha, io_base_addr, 0x4030);
15970 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15971 WRT32_IO_REG(ha, io_base_addr, 0x4040);
15972 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15973 WRT32_IO_REG(ha, io_base_addr, 0x4050);
15974 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15975 WRT32_IO_REG(ha, io_base_addr, 0x4060);
15976 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15977 WRT32_IO_REG(ha, io_base_addr, 0x4070);
15978 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15979 WRT32_IO_REG(ha, io_base_addr, 0x4080);
15980 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15981 WRT32_IO_REG(ha, io_base_addr, 0x4090);
15982 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15983 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
15984 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15985 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
15986 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15987 WRT32_IO_REG(ha, io_base_addr, 0x40C0);
15988 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15989 WRT32_IO_REG(ha, io_base_addr, 0x40D0);
15990 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15991
15992 /* Frame Buffer registers. */
15993
15994 /* FB hardware */
15995 WRT32_IO_REG(ha, io_base_addr, 0x6000);
15996 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
15997 16, 32);
15998 WRT32_IO_REG(ha, io_base_addr, 0x6010);
15999 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16000 WRT32_IO_REG(ha, io_base_addr, 0x6020);
16001 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16002 WRT32_IO_REG(ha, io_base_addr, 0x6030);
16003 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16004 WRT32_IO_REG(ha, io_base_addr, 0x6040);
16005 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16006 WRT32_IO_REG(ha, io_base_addr, 0x6100);
16007 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16008 WRT32_IO_REG(ha, io_base_addr, 0x6130);
16009 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16010 WRT32_IO_REG(ha, io_base_addr, 0x6150);
16011 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16012 WRT32_IO_REG(ha, io_base_addr, 0x6170);
16013 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16014 WRT32_IO_REG(ha, io_base_addr, 0x6190);
16015 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16016 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
16017 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16018 WRT32_IO_REG(ha, io_base_addr, 0x61C0);
16019 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16020 WRT32_IO_REG(ha, io_base_addr, 0x6F00);
16021 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16022 }
16023
16024 if (rval == QL_SUCCESS) {
16025 /* Get the Queue Pointers */
16026 dp = fw->req_rsp_ext_mem;
16027 for (index = 0; index < ha->rsp_queues_cnt; index++) {
16028 if (index == 0 && ha->flags & MULTI_QUEUE) {
16029 *dp = RD32_MBAR_REG(ha,
16030 ha->req_q[0]->mbar_req_in);
16031 LITTLE_ENDIAN_32(dp);
16032 dp++;
16033 *dp = RD32_MBAR_REG(ha,
16034 ha->req_q[0]->mbar_req_out);
16035 LITTLE_ENDIAN_32(dp);
16036 dp++;
16037 } else if (index == 1 && ha->flags & MULTI_QUEUE) {
16038 *dp = RD32_MBAR_REG(ha,
16039 ha->req_q[1]->mbar_req_in);
16040 LITTLE_ENDIAN_32(dp);
16041 dp++;
16042 *dp = RD32_MBAR_REG(ha,
16043 ha->req_q[1]->mbar_req_out);
16044 LITTLE_ENDIAN_32(dp);
16045 dp++;
16046 } else {
16047 *dp++ = 0;
16048 *dp++ = 0;
16049 }
16050 if (ha->flags & MULTI_QUEUE) {
16051 *dp = RD32_MBAR_REG(ha,
16052 ha->rsp_queues[index]->mbar_rsp_in);
16053 LITTLE_ENDIAN_32(dp);
16054 dp++;
16055 *dp = RD32_MBAR_REG(ha,
16056 ha->rsp_queues[index]->mbar_rsp_out);
16057 LITTLE_ENDIAN_32(dp);
16058 dp++;
16059 } else {
16060 *dp++ = 0;
16061 *dp++ = 0;
16062 }
16063 }
16064 /* Get the request queue */
16065 (void) ddi_dma_sync(ha->req_q[0]->req_ring.dma_handle, 0, 0,
16066 DDI_DMA_SYNC_FORCPU);
16067 w32ptr = (uint32_t *)ha->req_q[0]->req_ring.bp;
16068 for (cnt = 0; cnt < fw->req_q_size[0] / 4; cnt++) {
16069 *dp = *w32ptr++;
16070 LITTLE_ENDIAN_32(dp);
16071 dp++;
16072 }
16073 if (ha->req_q[1] != NULL) {
16074 (void) ddi_dma_sync(ha->req_q[1]->req_ring.dma_handle,
16075 0, 0, DDI_DMA_SYNC_FORCPU);
16076 w32ptr = (uint32_t *)ha->req_q[1]->req_ring.bp;
16077 for (cnt = 0; cnt < fw->req_q_size[1] / 4; cnt++) {
16078 *dp = *w32ptr++;
16079 LITTLE_ENDIAN_32(dp);
16080 dp++;
16081 }
16082 }
16083
16084 /* Get the response queues */
16085 for (index = 0; index < ha->rsp_queues_cnt; index++) {
16086 (void) ddi_dma_sync(
16087 ha->rsp_queues[index]->rsp_ring.dma_handle,
16088 0, 0, DDI_DMA_SYNC_FORCPU);
16089 w32ptr = (uint32_t *)
16090 ha->rsp_queues[index]->rsp_ring.bp;
16091 for (cnt = 0;
16092 cnt < ha->rsp_queues[index]->rsp_ring.size / 4;
16093 cnt++) {
16094 *dp = *w32ptr++;
16095 LITTLE_ENDIAN_32(dp);
16096 dp++;
16097 }
16098 }
16099 }
16100
16101 /* Reset RISC. */
16102 ql_reset_chip(ha);
16103
16104 /* Memory. */
16105 if (rval == QL_SUCCESS) {
16106 /* Code RAM. */
16107 rval = ql_read_risc_ram(ha, 0x20000,
16108 sizeof (fw->code_ram) / 4, fw->code_ram);
16109 }
16110 if (rval == QL_SUCCESS) {
16111 /* External Memory. */
16112 rval = ql_read_risc_ram(ha, 0x100000,
16113 ha->fw_ext_memory_size / 4, dp);
16114 }
16115
16116 /* Get the FC event trace buffer */
16117 if (rval == QL_SUCCESS) {
16118 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
16119 (ha->fwfcetracebuf.bp != NULL)) {
16120 uint32_t cnt;
16121 uint32_t *w32 = ha->fwfcetracebuf.bp;
16122
16123 /* Sync DMA buffer. */
16124 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
16125 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
16126
16127 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
16128 fw->fce_trace_buf[cnt] = *w32++;
16129 }
16130 }
16131 }
16132
16133 /* Get the extended trace buffer */
16134 if (rval == QL_SUCCESS) {
16135 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
16136 (ha->fwexttracebuf.bp != NULL)) {
16137 uint32_t cnt;
16138 uint32_t *w32 = ha->fwexttracebuf.bp;
16139
16140 /* Sync DMA buffer. */
16141 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
16142 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
16143
16144 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
16145 fw->ext_trace_buf[cnt] = *w32++;
16146 }
16147 }
16148 }
16149
16150 if (rval != QL_SUCCESS) {
16151 EL(ha, "failed=%xh\n", rval);
16152 } else {
16153 /*EMPTY*/
16154 QL_PRINT_3(ha, "done\n");
16155 }
16156
16157 return (rval);
16158 }
16159
16160 /*
16161 * ql_read_risc_ram
16162 * Reads RISC RAM one word at a time.
16163 * Risc interrupts must be disabled when this routine is called.
16164 *
16165 * Input:
16166 * ha: adapter state pointer.
16167 * risc_address: RISC code start address.
16168 * len: Number of words.
16169 * buf: buffer pointer.
16170 *
16171 * Returns:
16172 * ql local function return status code.
16173 *
16174 * Context:
16175 * Interrupt or Kernel context, no mailbox commands allowed.
16176 */
16177 static int
16178 ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
16179 void *buf)
16180 {
16181 uint32_t cnt;
16182 uint16_t stat;
16183 clock_t timer;
16184 uint16_t *buf16 = (uint16_t *)buf;
16185 uint32_t *buf32 = (uint32_t *)buf;
16186 int rval = QL_SUCCESS;
16187
16188 for (cnt = 0; cnt < len; cnt++, risc_address++) {
16189 WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_EXTENDED);
16190 WRT16_IO_REG(ha, mailbox_in[1], LSW(risc_address));
16191 WRT16_IO_REG(ha, mailbox_in[8], MSW(risc_address));
16192 if (CFG_IST(ha, CFG_CTRL_82XX)) {
16193 WRT32_IO_REG(ha, nx_host_int, NX_MBX_CMD);
16194 } else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
16195 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
16196 } else {
16197 WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
16198 }
16199 for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
16200 if (INTERRUPT_PENDING(ha)) {
16201 stat = (uint16_t)
16202 (RD16_IO_REG(ha, risc2host) & 0xff);
16203 if ((stat == 1) || (stat == 0x10)) {
16204 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
16205 buf32[cnt] = SHORT_TO_LONG(
16206 RD16_IO_REG(ha,
16207 mailbox_out[2]),
16208 RD16_IO_REG(ha,
16209 mailbox_out[3]));
16210 } else {
16211 buf16[cnt] =
16212 RD16_IO_REG(ha,
16213 mailbox_out[2]);
16214 }
16215
16216 break;
16217 } else if ((stat == 2) || (stat == 0x11)) {
16218 rval = RD16_IO_REG(ha, mailbox_out[0]);
16219 break;
16220 }
16221 if (CFG_IST(ha, CFG_CTRL_82XX)) {
16222 ql_8021_clr_hw_intr(ha);
16223 ql_8021_clr_fw_intr(ha);
16224 } else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
16225 WRT32_IO_REG(ha, hccr,
16226 HC24_CLR_RISC_INT);
16227 RD32_IO_REG(ha, hccr);
16228 } else {
16229 WRT16_IO_REG(ha, semaphore, 0);
16230 WRT16_IO_REG(ha, hccr,
16231 HC_CLR_RISC_INT);
16232 RD16_IO_REG(ha, hccr);
16233 }
16234 }
16235 drv_usecwait(5);
16236 }
16237 if (CFG_IST(ha, CFG_CTRL_82XX)) {
16238 ql_8021_clr_hw_intr(ha);
16239 ql_8021_clr_fw_intr(ha);
16240 } else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
16241 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
16242 RD32_IO_REG(ha, hccr);
16243 } else {
16244 WRT16_IO_REG(ha, semaphore, 0);
16245 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
16246 RD16_IO_REG(ha, hccr);
16247 }
16248
16249 if (timer == 0) {
16250 rval = QL_FUNCTION_TIMEOUT;
16251 }
16252 }
16253
16254 return (rval);
16255 }
16256
16257 /*
16258 * ql_read_regs
16259 * Reads adapter registers to buffer.
16260 *
16261 * Input:
16262 * ha: adapter state pointer.
16263 * buf: buffer pointer.
16264 * reg: start address.
16265 * count: number of registers.
16266 * wds: register size.
16267 *
16268 * Context:
16269 * Interrupt or Kernel context, no mailbox commands allowed.
16270 */
16271 static void *
16272 ql_read_regs(ql_adapter_state_t *ha, void *buf, void *reg, uint32_t count,
16273 uint8_t wds)
16274 {
16275 uint32_t *bp32, *reg32;
16276 uint16_t *bp16, *reg16;
16277 uint8_t *bp8, *reg8;
16278
16279 switch (wds) {
16280 case 32:
16281 bp32 = buf;
16282 reg32 = reg;
16283 while (count--) {
16284 *bp32++ = RD_REG_DWORD(ha, reg32++);
16285 }
16286 return (bp32);
16287 case 16:
16288 bp16 = buf;
16289 reg16 = reg;
16290 while (count--) {
16291 *bp16++ = RD_REG_WORD(ha, reg16++);
16292 }
16293 return (bp16);
16294 case 8:
16295 bp8 = buf;
16296 reg8 = reg;
16297 while (count--) {
16298 *bp8++ = RD_REG_BYTE(ha, reg8++);
16299 }
16300 return (bp8);
16301 default:
16302 EL(ha, "Unknown word size=%d\n", wds);
16303 return (buf);
16304 }
16305 }
16306
16307 static int
16308 ql_save_config_regs(dev_info_t *dip)
16309 {
16310 ql_adapter_state_t *ha;
16311 int ret;
16312 ql_config_space_t chs;
16313 caddr_t prop = "ql-config-space";
16314
16315 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
16316 if (ha == NULL) {
16317 QL_PRINT_2(NULL, "no adapter instance=%d\n",
16318 ddi_get_instance(dip));
16319 return (DDI_FAILURE);
16320 }
16321
16322 QL_PRINT_3(ha, "started\n");
16323
16324 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
16325 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
16326 1) {
16327 QL_PRINT_2(ha, "no prop exit\n");
16328 return (DDI_SUCCESS);
16329 }
16330
16331 chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
16332 chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
16333 PCI_CONF_HEADER);
16334 if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
16335 chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
16336 PCI_BCNF_BCNTRL);
16337 }
16338
16339 chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
16340 PCI_CONF_CACHE_LINESZ);
16341
16342 chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
16343 PCI_CONF_LATENCY_TIMER);
16344
16345 if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
16346 chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
16347 PCI_BCNF_LATENCY_TIMER);
16348 }
16349
16350 chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
16351 chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
16352 chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
16353 chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
16354 chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
16355 chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
16356
16357 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
16358 ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
16359 (uchar_t *)&chs, sizeof (ql_config_space_t));
16360
16361 if (ret != DDI_PROP_SUCCESS) {
16362 cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
16363 QL_NAME, ddi_get_instance(dip), prop);
16364 return (DDI_FAILURE);
16365 }
16366
16367 QL_PRINT_3(ha, "done\n");
16368
16369 return (DDI_SUCCESS);
16370 }
16371
16372 static int
16373 ql_restore_config_regs(dev_info_t *dip)
16374 {
16375 ql_adapter_state_t *ha;
16376 uint_t elements;
16377 ql_config_space_t *chs_p;
16378 caddr_t prop = "ql-config-space";
16379
16380 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
16381 if (ha == NULL) {
16382 QL_PRINT_2(NULL, "no adapter instance=%d\n",
16383 ddi_get_instance(dip));
16384 return (DDI_FAILURE);
16385 }
16386
16387 QL_PRINT_3(ha, "started\n");
16388
16389 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
16390 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
16391 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
16392 (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
16393 QL_PRINT_2(ha, "no prop exit\n");
16394 return (DDI_FAILURE);
16395 }
16396
16397 ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
16398
16399 if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
16400 ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
16401 chs_p->chs_bridge_control);
16402 }
16403
16404 ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
16405 chs_p->chs_cache_line_size);
16406
16407 ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
16408 chs_p->chs_latency_timer);
16409
16410 if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
16411 ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
16412 chs_p->chs_sec_latency_timer);
16413 }
16414
16415 ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
16416 ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
16417 ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
16418 ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
16419 ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
16420 ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
16421
16422 ddi_prop_free(chs_p);
16423
16424 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
16425 if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
16426 cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
16427 QL_NAME, ddi_get_instance(dip), prop);
16428 }
16429
16430 QL_PRINT_3(ha, "done\n");
16431
16432 return (DDI_SUCCESS);
16433 }
16434
16435 uint8_t
16436 ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
16437 {
16438 if (CFG_IST(ha, CFG_SBUS_CARD)) {
16439 return (ddi_get8(ha->sbus_config_handle,
16440 (uint8_t *)(ha->sbus_config_base + off)));
16441 }
16442
16443 #ifdef KERNEL_32
16444 return (pci_config_getb(ha->pci_handle, off));
16445 #else
16446 return (pci_config_get8(ha->pci_handle, off));
16447 #endif
16448 }
16449
16450 uint16_t
16451 ql_pci_config_get16(ql_adapter_state_t *ha, off_t off)
16452 {
16453 if (CFG_IST(ha, CFG_SBUS_CARD)) {
16454 return (ddi_get16(ha->sbus_config_handle,
16455 (uint16_t *)(ha->sbus_config_base + off)));
16456 }
16457
16458 #ifdef KERNEL_32
16459 return (pci_config_getw(ha->pci_handle, off));
16460 #else
16461 return (pci_config_get16(ha->pci_handle, off));
16462 #endif
16463 }
16464
16465 uint32_t
16466 ql_pci_config_get32(ql_adapter_state_t *ha, off_t off)
16467 {
16468 if (CFG_IST(ha, CFG_SBUS_CARD)) {
16469 return (ddi_get32(ha->sbus_config_handle,
16470 (uint32_t *)(ha->sbus_config_base + off)));
16471 }
16472
16473 #ifdef KERNEL_32
16474 return (pci_config_getl(ha->pci_handle, off));
16475 #else
16476 return (pci_config_get32(ha->pci_handle, off));
16477 #endif
16478 }
16479
16480 void
16481 ql_pci_config_put8(ql_adapter_state_t *ha, off_t off, uint8_t val)
16482 {
16483 if (CFG_IST(ha, CFG_SBUS_CARD)) {
16484 ddi_put8(ha->sbus_config_handle,
16485 (uint8_t *)(ha->sbus_config_base + off), val);
16486 } else {
16487 #ifdef KERNEL_32
16488 pci_config_putb(ha->pci_handle, off, val);
16489 #else
16490 pci_config_put8(ha->pci_handle, off, val);
16491 #endif
16492 }
16493 }
16494
16495 void
16496 ql_pci_config_put16(ql_adapter_state_t *ha, off_t off, uint16_t val)
16497 {
16498 if (CFG_IST(ha, CFG_SBUS_CARD)) {
16499 ddi_put16(ha->sbus_config_handle,
16500 (uint16_t *)(ha->sbus_config_base + off), val);
16501 } else {
16502 #ifdef KERNEL_32
16503 pci_config_putw(ha->pci_handle, off, val);
16504 #else
16505 pci_config_put16(ha->pci_handle, off, val);
16506 #endif
16507 }
16508 }
16509
16510 void
16511 ql_pci_config_put32(ql_adapter_state_t *ha, off_t off, uint32_t val)
16512 {
16513 if (CFG_IST(ha, CFG_SBUS_CARD)) {
16514 ddi_put32(ha->sbus_config_handle,
16515 (uint32_t *)(ha->sbus_config_base + off), val);
16516 } else {
16517 #ifdef KERNEL_32
16518 pci_config_putl(ha->pci_handle, off, val);
16519 #else
16520 pci_config_put32(ha->pci_handle, off, val);
16521 #endif
16522 }
16523 }
16524
16525 /*
16526 * ql_halt
16527 * Waits for commands that are running to finish and
16528 * if they do not, commands are aborted.
16529 * Finally the adapter is reset.
16530 *
16531 * Input:
16532 * ha: adapter state pointer.
16533 * pwr: power state.
16534 *
16535 * Context:
16536 * Kernel context.
16537 */
16538 static void
16539 ql_halt(ql_adapter_state_t *ha, int pwr)
16540 {
16541 ql_link_t *link;
16542 ql_response_q_t *rsp_q;
16543 ql_tgt_t *tq;
16544 ql_srb_t *sp;
16545 uint32_t cnt, i;
16546 uint16_t index;
16547
16548 QL_PRINT_3(ha, "started\n");
16549
16550 /* Wait for all commands running to finish. */
16551 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
16552 for (link = ha->dev[index].first; link != NULL;
16553 link = link->next) {
16554 tq = link->base_address;
16555 (void) ql_abort_device(ha, tq, 0);
16556
16557 /* Wait for 30 seconds for commands to finish. */
16558 for (cnt = 3000; cnt != 0; cnt--) {
16559 /* Acquire device queue lock. */
16560 DEVICE_QUEUE_LOCK(tq);
16561 if (tq->outcnt == 0) {
16562 /* Release device queue lock. */
16563 DEVICE_QUEUE_UNLOCK(tq);
16564 break;
16565 } else {
16566 /* Release device queue lock. */
16567 DEVICE_QUEUE_UNLOCK(tq);
16568 ql_delay(ha, 10000);
16569 }
16570 }
16571
16572 /* Finish any commands waiting for more status. */
16573 for (i = 0; i < ha->rsp_queues_cnt; i++) {
16574 if ((rsp_q = ha->rsp_queues[i]) != NULL &&
16575 (sp = rsp_q->status_srb) != NULL) {
16576 rsp_q->status_srb = NULL;
16577 sp->cmd.next = NULL;
16578 ql_done(&sp->cmd, B_FALSE);
16579 }
16580 }
16581
16582 /* Abort commands that did not finish. */
16583 if (cnt == 0) {
16584 for (cnt = 1; cnt < ha->osc_max_cnt;
16585 cnt++) {
16586 if (ha->pending_cmds.first != NULL) {
16587 ql_start_iocb(ha, NULL);
16588 cnt = 1;
16589 }
16590 sp = ha->outstanding_cmds[cnt];
16591 if (sp != NULL &&
16592 sp != QL_ABORTED_SRB(ha) &&
16593 sp->lun_queue->target_queue ==
16594 tq) {
16595 (void) ql_abort_io(ha, sp);
16596 sp->pkt->pkt_reason =
16597 CS_ABORTED;
16598 sp->cmd.next = NULL;
16599 ql_done(&sp->cmd, B_FALSE);
16600 }
16601 }
16602 }
16603 }
16604 }
16605
16606 /* Shutdown IP. */
16607 if (ha->flags & IP_INITIALIZED) {
16608 (void) ql_shutdown_ip(ha);
16609 }
16610
16611 /* Stop all timers. */
16612 ADAPTER_STATE_LOCK(ha);
16613 ha->port_retry_timer = 0;
16614 ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
16615 ha->watchdog_timer = 0;
16616 ADAPTER_STATE_UNLOCK(ha);
16617
16618 if (pwr == PM_LEVEL_D3 && ha->flags & ONLINE) {
16619 ADAPTER_STATE_LOCK(ha);
16620 ha->flags &= ~ONLINE;
16621 ADAPTER_STATE_UNLOCK(ha);
16622
16623 if (CFG_IST(ha, CFG_CTRL_82XX)) {
16624 ql_8021_clr_drv_active(ha);
16625 }
16626
16627 /* Reset ISP chip. */
16628 ql_reset_chip(ha);
16629 }
16630
16631 QL_PRINT_3(ha, "done\n");
16632 }
16633
16634 /*
16635 * ql_get_dma_mem
16636 * Function used to allocate dma memory.
16637 *
16638 * Input:
16639 * ha: adapter state pointer.
16640 * mem: pointer to dma memory object.
16641 * size: size of the request in bytes
16642 *
16643 * Returns:
16644 * qn local function return status code.
16645 *
16646 * Context:
16647 * Kernel context.
16648 */
16649 int
16650 ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
16651 mem_alloc_type_t allocation_type, mem_alignment_t alignment)
16652 {
16653 int rval;
16654
16655 QL_PRINT_3(ha, "started\n");
16656
16657 mem->size = size;
16658 mem->type = allocation_type;
16659 mem->max_cookie_count = 1;
16660
16661 switch (alignment) {
16662 case QL_DMA_DATA_ALIGN:
16663 mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
16664 break;
16665 case QL_DMA_RING_ALIGN:
16666 mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
16667 break;
16668 default:
16669 EL(ha, "failed, unknown alignment type %x\n", alignment);
16670 break;
16671 }
16672
16673 if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
16674 ql_free_phys(ha, mem);
16675 EL(ha, "failed, alloc_phys=%xh\n", rval);
16676 }
16677
16678 QL_PRINT_3(ha, "done\n");
16679
16680 return (rval);
16681 }
16682
16683 /*
16684 * ql_free_dma_resource
16685 * Function used to free dma memory.
16686 *
16687 * Input:
16688 * ha: adapter state pointer.
16689 * mem: pointer to dma memory object.
16690 * mem->dma_handle DMA memory handle.
16691 *
16692 * Context:
16693 * Kernel context.
16694 */
16695 void
16696 ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
16697 {
16698 QL_PRINT_3(ha, "started\n");
16699
16700 ql_free_phys(ha, mem);
16701
16702 QL_PRINT_3(ha, "done\n");
16703 }
16704
16705 /*
16706 * ql_alloc_phys
16707 * Function used to allocate memory and zero it.
16708 * Memory is below 4 GB.
16709 *
16710 * Input:
16711 * ha: adapter state pointer.
16712 * mem: pointer to dma memory object.
16713 * sleep: KM_SLEEP/KM_NOSLEEP flag.
16714 * mem->cookie_count number of segments allowed.
16715 * mem->type memory allocation type.
16716 * mem->size memory size.
16717 * mem->alignment memory alignment.
16718 *
16719 * Returns:
16720 * ql local function return status code.
16721 *
16722 * Context:
16723 * Kernel context.
16724 */
16725 int
16726 ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
16727 {
16728 size_t rlen;
16729 ddi_dma_attr_t dma_attr = ha->io_dma_attr;
16730 ddi_device_acc_attr_t acc_attr = ql_dev_acc_attr;
16731
16732 QL_PRINT_3(ha, "started\n");
16733
16734 dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
16735 dma_attr.dma_attr_sgllen = (int)mem->max_cookie_count;
16736
16737 /*
16738 * Workaround for SUN XMITS buffer must end and start on 8 byte
16739 * boundary. Else, hardware will overrun the buffer. Simple fix is
16740 * to make sure buffer has enough room for overrun.
16741 */
16742 if (mem->size & 7) {
16743 mem->size += 8 - (mem->size & 7);
16744 }
16745
16746 mem->flags = DDI_DMA_CONSISTENT;
16747
16748 /*
16749 * Allocate DMA memory for command.
16750 */
16751 if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
16752 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
16753 DDI_SUCCESS) {
16754 EL(ha, "failed, ddi_dma_alloc_handle\n");
16755 mem->dma_handle = NULL;
16756 return (QL_MEMORY_ALLOC_FAILED);
16757 }
16758
16759 switch (mem->type) {
16760 case KERNEL_MEM:
16761 mem->bp = kmem_zalloc(mem->size, sleep);
16762 break;
16763 case BIG_ENDIAN_DMA:
16764 case LITTLE_ENDIAN_DMA:
16765 case NO_SWAP_DMA:
16766 if (mem->type == BIG_ENDIAN_DMA) {
16767 acc_attr.devacc_attr_endian_flags =
16768 DDI_STRUCTURE_BE_ACC;
16769 } else if (mem->type == NO_SWAP_DMA) {
16770 acc_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
16771 }
16772 if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
16773 mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
16774 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&mem->bp, &rlen,
16775 &mem->acc_handle) == DDI_SUCCESS) {
16776 bzero(mem->bp, mem->size);
16777 /* ensure we got what we asked for (32bit) */
16778 if (dma_attr.dma_attr_addr_hi == NULL) {
16779 if (mem->cookie.dmac_notused != NULL) {
16780 EL(ha, "failed, ddi_dma_mem_alloc "
16781 "returned 64 bit DMA address\n");
16782 ql_free_phys(ha, mem);
16783 return (QL_MEMORY_ALLOC_FAILED);
16784 }
16785 }
16786 } else {
16787 mem->acc_handle = NULL;
16788 mem->bp = NULL;
16789 }
16790 break;
16791 default:
16792 EL(ha, "failed, unknown type=%xh\n", mem->type);
16793 mem->acc_handle = NULL;
16794 mem->bp = NULL;
16795 break;
16796 }
16797
16798 if (mem->bp == NULL) {
16799 EL(ha, "failed, ddi_dma_mem_alloc\n");
16800 ddi_dma_free_handle(&mem->dma_handle);
16801 mem->dma_handle = NULL;
16802 return (QL_MEMORY_ALLOC_FAILED);
16803 }
16804
16805 mem->flags |= DDI_DMA_RDWR;
16806
16807 if (qlc_fm_check_dma_handle(ha, mem->dma_handle)
16808 != DDI_FM_OK) {
16809 EL(ha, "failed, ddi_dma_addr_bind_handle\n");
16810 ql_free_phys(ha, mem);
16811 qlc_fm_report_err_impact(ha,
16812 QL_FM_EREPORT_DMA_HANDLE_CHECK);
16813 return (QL_MEMORY_ALLOC_FAILED);
16814 }
16815
16816 if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
16817 EL(ha, "failed, ddi_dma_addr_bind_handle\n");
16818 ql_free_phys(ha, mem);
16819 return (QL_MEMORY_ALLOC_FAILED);
16820 }
16821
16822 QL_PRINT_3(ha, "done\n");
16823
16824 return (QL_SUCCESS);
16825 }
16826
16827 /*
16828 * ql_free_phys
16829 * Function used to free physical memory.
16830 *
16831 * Input:
16832 * ha: adapter state pointer.
16833 * mem: pointer to dma memory object.
16834 *
16835 * Context:
16836 * Kernel context.
16837 */
16838 void
16839 ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
16840 {
16841 QL_PRINT_3(ha, "started\n");
16842
16843 if (mem != NULL) {
16844 if (mem->memflags == DDI_DMA_MAPPED) {
16845 ql_unbind_dma_buffer(ha, mem);
16846 }
16847
16848 switch (mem->type) {
16849 case KERNEL_MEM:
16850 if (mem->bp != NULL) {
16851 kmem_free(mem->bp, mem->size);
16852 mem->bp = NULL;
16853 }
16854 break;
16855 case LITTLE_ENDIAN_DMA:
16856 case BIG_ENDIAN_DMA:
16857 case NO_SWAP_DMA:
16858 if (mem->acc_handle != NULL) {
16859 ddi_dma_mem_free(&mem->acc_handle);
16860 mem->acc_handle = NULL;
16861 mem->bp = NULL;
16862 }
16863 break;
16864 default:
16865 break;
16866 }
16867 if (mem->dma_handle != NULL) {
16868 ddi_dma_free_handle(&mem->dma_handle);
16869 mem->dma_handle = NULL;
16870 }
16871 }
16872
16873 QL_PRINT_3(ha, "done\n");
16874 }
16875
16876 /*
16877 * ql_bind_dma_buffer
16878 * Binds DMA buffer.
16879 *
16880 * Input:
16881 * ha: adapter state pointer.
16882 * mem: pointer to dma memory object.
16883 * kmflags: KM_SLEEP or KM_NOSLEEP.
16884 * mem->dma_handle DMA memory handle.
16885 * mem->max_cookie_count number of segments allowed.
16886 * mem->type memory allocation type.
16887 * mem->size memory size.
16888 * mem->bp pointer to memory or struct buf
16889 *
16890 * Returns:
16891 * mem->cookies pointer to list of cookies.
16892 * mem->cookie_count number of cookies.
16893 * status success = DDI_DMA_MAPPED
16894 * DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
16895 * DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
16896 * DDI_DMA_TOOBIG
16897 *
16898 * Context:
16899 * Kernel context.
16900 */
16901 static int
16902 ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int kmflags)
16903 {
16904 ddi_dma_cookie_t *cookiep;
16905 uint32_t cnt;
16906
16907 QL_PRINT_3(ha, "started\n");
16908
16909 mem->memflags = ddi_dma_addr_bind_handle(mem->dma_handle, NULL,
16910 mem->bp, mem->size, mem->flags, (kmflags == KM_SLEEP) ?
16911 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
16912 &mem->cookie_count);
16913
16914 if (mem->memflags == DDI_DMA_MAPPED) {
16915 if (mem->cookie_count > mem->max_cookie_count) {
16916 (void) ddi_dma_unbind_handle(mem->dma_handle);
16917 EL(ha, "failed, cookie_count %d > %d\n",
16918 mem->cookie_count, mem->max_cookie_count);
16919 mem->memflags = (uint32_t)DDI_DMA_TOOBIG;
16920 } else {
16921 if (mem->cookie_count > 1) {
16922 if (mem->cookies = kmem_zalloc(
16923 sizeof (ddi_dma_cookie_t) *
16924 mem->cookie_count, kmflags)) {
16925 *mem->cookies = mem->cookie;
16926 cookiep = mem->cookies;
16927 for (cnt = 1; cnt < mem->cookie_count;
16928 cnt++) {
16929 ddi_dma_nextcookie(
16930 mem->dma_handle,
16931 ++cookiep);
16932 }
16933 } else {
16934 (void) ddi_dma_unbind_handle(
16935 mem->dma_handle);
16936 EL(ha, "failed, kmem_zalloc\n");
16937 mem->memflags = (uint32_t)
16938 DDI_DMA_NORESOURCES;
16939 }
16940 } else {
16941 /*
16942 * It has been reported that dmac_size at times
16943 * may be incorrect on sparc machines so for
16944 * sparc machines that only have one segment
16945 * use the buffer size instead.
16946 */
16947 mem->cookies = &mem->cookie;
16948 mem->cookies->dmac_size = mem->size;
16949 }
16950 }
16951 }
16952
16953 if (mem->memflags != DDI_DMA_MAPPED) {
16954 EL(ha, "failed=%xh\n", mem->memflags);
16955 } else {
16956 /*EMPTY*/
16957 QL_PRINT_3(ha, "done\n");
16958 }
16959
16960 return (mem->memflags);
16961 }
16962
16963 /*
16964 * ql_unbind_dma_buffer
16965 * Unbinds DMA buffer.
16966 *
16967 * Input:
16968 * ha: adapter state pointer.
16969 * mem: pointer to dma memory object.
16970 * mem->dma_handle DMA memory handle.
16971 * mem->cookies pointer to cookie list.
16972 * mem->cookie_count number of cookies.
16973 *
16974 * Context:
16975 * Kernel context.
16976 */
16977 /* ARGSUSED */
16978 static void
16979 ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
16980 {
16981 QL_PRINT_3(ha, "started\n");
16982
16983 if (mem->dma_handle != NULL && mem->memflags == DDI_DMA_MAPPED) {
16984 (void) ddi_dma_unbind_handle(mem->dma_handle);
16985 }
16986 if (mem->cookie_count > 1) {
16987 kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
16988 mem->cookie_count);
16989 mem->cookies = NULL;
16990 }
16991 mem->cookie_count = 0;
16992 mem->memflags = (uint32_t)DDI_DMA_NORESOURCES;
16993
16994 QL_PRINT_3(ha, "done\n");
16995 }
16996
16997 static int
16998 ql_suspend_adapter(ql_adapter_state_t *ha)
16999 {
17000 clock_t timer = (clock_t)(32 * drv_usectohz(1000000));
17001
17002 QL_PRINT_3(ha, "started\n");
17003
17004 (void) ql_wait_outstanding(ha);
17005
17006 /*
17007 * here we are sure that there will not be any mbox interrupt.
17008 * So, let's make sure that we return back all the outstanding
17009 * cmds as well as internally queued commands.
17010 */
17011 ql_halt(ha, PM_LEVEL_D0);
17012
17013 /*
17014 * First we will claim mbox ownership so that no
17015 * thread using mbox hangs when we disable the
17016 * interrupt in the middle of it.
17017 */
17018 MBX_REGISTER_LOCK(ha);
17019
17020 /* Check for mailbox available, if not wait for signal. */
17021 while (ha->mailbox_flags & MBX_BUSY_FLG) {
17022 ha->mailbox_flags = (uint8_t)
17023 (ha->mailbox_flags | MBX_WANT_FLG);
17024
17025 /* 30 seconds from now */
17026 if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
17027 timer, TR_CLOCK_TICK) == -1) {
17028
17029 /* Release mailbox register lock. */
17030 MBX_REGISTER_UNLOCK(ha);
17031 EL(ha, "failed, Suspend mbox");
17032 return (QL_FUNCTION_TIMEOUT);
17033 }
17034 }
17035
17036 /* Set busy flag. */
17037 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
17038 MBX_REGISTER_UNLOCK(ha);
17039
17040 if (ha->power_level != PM_LEVEL_D3) {
17041 /* Disable ISP interrupts. */
17042 ql_disable_intr(ha);
17043 }
17044
17045 MBX_REGISTER_LOCK(ha);
17046 /* Reset busy status. */
17047 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
17048
17049 /* If thread is waiting for mailbox go signal it to start. */
17050 if (ha->mailbox_flags & MBX_WANT_FLG) {
17051 ha->mailbox_flags = (uint8_t)
17052 (ha->mailbox_flags & ~MBX_WANT_FLG);
17053 cv_broadcast(&ha->cv_mbx_wait);
17054 }
17055 /* Release mailbox register lock. */
17056 MBX_REGISTER_UNLOCK(ha);
17057
17058 QL_PRINT_3(ha, "done\n");
17059
17060 return (QL_SUCCESS);
17061 }
17062
17063 /*
17064 * ql_add_link_b
17065 * Add link to the end of the chain.
17066 *
17067 * Input:
17068 * head = Head of link list.
17069 * link = link to be added.
17070 * LOCK must be already obtained.
17071 *
17072 * Context:
17073 * Interrupt or Kernel context, no mailbox commands allowed.
17074 */
17075 void
17076 ql_add_link_b(ql_head_t *head, ql_link_t *link)
17077 {
17078 if (link->head != NULL) {
17079 EL(NULL, "link in use by list=%ph\n", link->head);
17080 }
17081
17082 /* at the end there isn't a next */
17083 link->next = NULL;
17084
17085 if ((link->prev = head->last) == NULL) {
17086 head->first = link;
17087 } else {
17088 head->last->next = link;
17089 }
17090
17091 head->last = link;
17092 link->head = head; /* the queue we're on */
17093 }
17094
17095 /*
17096 * ql_add_link_t
17097 * Add link to the beginning of the chain.
17098 *
17099 * Input:
17100 * head = Head of link list.
17101 * link = link to be added.
17102 * LOCK must be already obtained.
17103 *
17104 * Context:
17105 * Interrupt or Kernel context, no mailbox commands allowed.
17106 */
17107 void
17108 ql_add_link_t(ql_head_t *head, ql_link_t *link)
17109 {
17110 if (link->head != NULL) {
17111 EL(NULL, "link in use by list=%ph\n", link->head);
17112 }
17113 link->prev = NULL;
17114
17115 if ((link->next = head->first) == NULL) {
17116 head->last = link;
17117 } else {
17118 head->first->prev = link;
17119 }
17120
17121 head->first = link;
17122 link->head = head; /* the queue we're on */
17123 }
17124
17125 /*
17126 * ql_remove_link
17127 * Remove a link from the chain.
17128 *
17129 * Input:
17130 * head = Head of link list.
17131 * link = link to be removed.
17132 * associated proper LOCK must be already obtained.
17133 *
17134 * Context:
17135 * Interrupt or Kernel context, no mailbox commands allowed.
17136 */
17137 void
17138 ql_remove_link(ql_head_t *head, ql_link_t *link)
17139 {
17140 if (head != NULL) {
17141 if (link->prev != NULL) {
17142 if ((link->prev->next = link->next) == NULL) {
17143 head->last = link->prev;
17144 } else {
17145 link->next->prev = link->prev;
17146 }
17147 } else if ((head->first = link->next) == NULL) {
17148 head->last = NULL;
17149 } else {
17150 head->first->prev = NULL;
17151 }
17152
17153 /* not on a queue any more */
17154 link->prev = link->next = NULL;
17155 link->head = NULL;
17156 }
17157 }
17158
17159 /*
17160 * ql_chg_endian
17161 * Change endianess of byte array.
17162 *
17163 * Input:
17164 * buf = array pointer.
17165 * size = size of array in bytes.
17166 *
17167 * Context:
17168 * Interrupt or Kernel context, no mailbox commands allowed.
17169 */
17170 void
17171 ql_chg_endian(uint8_t buf[], size_t size)
17172 {
17173 uint8_t byte;
17174 size_t cnt1;
17175 size_t cnt;
17176
17177 cnt1 = size - 1;
17178 for (cnt = 0; cnt < size / 2; cnt++) {
17179 byte = buf[cnt1];
17180 buf[cnt1] = buf[cnt];
17181 buf[cnt] = byte;
17182 cnt1--;
17183 }
17184 }
17185
17186 /*
17187 * ql_bstr_to_dec
17188 * Convert decimal byte string to number.
17189 *
17190 * Input:
17191 * s: byte string pointer.
17192 * ans: interger pointer for number.
17193 * size: number of ascii bytes.
17194 *
17195 * Returns:
17196 * success = number of ascii bytes processed.
17197 *
17198 * Context:
17199 * Kernel/Interrupt context.
17200 */
17201 static int
17202 ql_bstr_to_dec(char *s, uint32_t *ans, uint32_t size)
17203 {
17204 int mul, num, cnt, pos;
17205 char *str;
17206
17207 /* Calculate size of number. */
17208 if (size == 0) {
17209 for (str = s; *str >= '0' && *str <= '9'; str++) {
17210 size++;
17211 }
17212 }
17213
17214 *ans = 0;
17215 for (cnt = 0; *s != '\0' && size; size--, cnt++) {
17216 if (*s >= '0' && *s <= '9') {
17217 num = *s++ - '0';
17218 } else {
17219 break;
17220 }
17221
17222 for (mul = 1, pos = 1; pos < size; pos++) {
17223 mul *= 10;
17224 }
17225 *ans += num * mul;
17226 }
17227
17228 return (cnt);
17229 }
17230
17231 /*
17232 * ql_delay
17233 * Calls delay routine if threads are not suspended, otherwise, busy waits
17234 * Minimum = 1 tick = 10ms
17235 *
17236 * Input:
17237 * dly = delay time in microseconds.
17238 *
17239 * Context:
17240 * Kernel or Interrupt context, no mailbox commands allowed.
17241 */
17242 void
17243 ql_delay(ql_adapter_state_t *ha, clock_t usecs)
17244 {
17245 if (ha->flags & ADAPTER_SUSPENDED || ddi_in_panic() ||
17246 curthread->t_flag & T_INTR_THREAD) {
17247 drv_usecwait(usecs);
17248 } else {
17249 delay(drv_usectohz(usecs));
17250 }
17251 }
17252
17253 /*
17254 * ql_stall_drv
17255 * Stalls one or all driver instances, waits for 30 seconds.
17256 *
17257 * Input:
17258 * ha: adapter state pointer or NULL for all.
17259 * options: BIT_0 --> leave driver stalled on exit if
17260 * failed.
17261 *
17262 * Returns:
17263 * ql local function return status code.
17264 *
17265 * Context:
17266 * Kernel context.
17267 */
17268 int
17269 ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
17270 {
17271 ql_link_t *link;
17272 ql_adapter_state_t *ha2 = NULL;
17273 uint32_t timer;
17274
17275 QL_PRINT_3(ha, "started\n");
17276
17277 /* Tell all daemons to stall. */
17278 link = ha == NULL ? ql_hba.first : &ha->hba;
17279 while (link != NULL) {
17280 ha2 = link->base_address;
17281
17282 ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
17283
17284 link = ha == NULL ? link->next : NULL;
17285 }
17286
17287 /* Wait for 30 seconds for daemons stall. */
17288 timer = 3000;
17289 link = ha == NULL ? ql_hba.first : &ha->hba;
17290 while (link != NULL && timer) {
17291 ha2 = link->base_address;
17292
17293 if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
17294 (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
17295 (ha2->task_daemon_flags & FIRMWARE_UP) == 0 ||
17296 (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
17297 ql_wait_outstanding(ha2) == ha2->pha->osc_max_cnt)) {
17298 link = ha == NULL ? link->next : NULL;
17299 continue;
17300 }
17301
17302 QL_PRINT_2(ha2, "status, dtf=%xh, stf=%xh\n",
17303 ha2->task_daemon_flags, ha2->flags);
17304
17305 ql_delay(ha2, 10000);
17306 timer--;
17307 link = ha == NULL ? ql_hba.first : &ha->hba;
17308 }
17309
17310 if (ha2 != NULL && timer == 0) {
17311 EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
17312 ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
17313 "unstalled"));
17314 if (options & BIT_0) {
17315 ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
17316 }
17317 return (QL_FUNCTION_TIMEOUT);
17318 }
17319
17320 QL_PRINT_3(ha, "done\n");
17321
17322 return (QL_SUCCESS);
17323 }
17324
17325 /*
17326 * ql_restart_driver
17327 * Restarts one or all driver instances.
17328 *
17329 * Input:
17330 * ha: adapter state pointer or NULL for all.
17331 *
17332 * Context:
17333 * Kernel context.
17334 */
17335 void
17336 ql_restart_driver(ql_adapter_state_t *ha)
17337 {
17338 ql_link_t *link;
17339 ql_adapter_state_t *ha2;
17340 uint32_t timer;
17341
17342 QL_PRINT_3(ha, "started\n");
17343
17344 /* Tell all daemons to unstall. */
17345 link = ha == NULL ? ql_hba.first : &ha->hba;
17346 while (link != NULL) {
17347 ha2 = link->base_address;
17348
17349 ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
17350
17351 link = ha == NULL ? link->next : NULL;
17352 }
17353
17354 /* Wait for 30 seconds for all daemons unstall. */
17355 timer = 3000;
17356 link = ha == NULL ? ql_hba.first : &ha->hba;
17357 while (link != NULL && timer) {
17358 ha2 = link->base_address;
17359
17360 if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
17361 (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
17362 (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
17363 QL_PRINT_2(ha2, "restarted\n");
17364 ql_restart_queues(ha2);
17365 link = ha == NULL ? link->next : NULL;
17366 continue;
17367 }
17368
17369 QL_PRINT_2(ha2, "status, tdf=%xh\n", ha2->task_daemon_flags);
17370
17371 ql_delay(ha2, 10000);
17372 timer--;
17373 link = ha == NULL ? ql_hba.first : &ha->hba;
17374 }
17375
17376 QL_PRINT_3(ha, "done\n");
17377 }
17378
17379 /*
17380 * ql_setup_interrupts
17381 * Sets up interrupts based on the HBA's and platform's
17382 * capabilities (e.g., legacy / MSI / FIXED).
17383 *
17384 * Input:
17385 * ha = adapter state pointer.
17386 *
17387 * Returns:
17388 * DDI_SUCCESS or DDI_FAILURE.
17389 *
17390 * Context:
17391 * Kernel context.
17392 */
17393 static int
17394 ql_setup_interrupts(ql_adapter_state_t *ha)
17395 {
17396 int32_t rval = DDI_FAILURE;
17397 int32_t i;
17398 int32_t itypes = 0;
17399
17400 QL_PRINT_3(ha, "started\n");
17401
17402 /*
17403 * The Solaris Advanced Interrupt Functions (aif) are only
17404 * supported on s10U1 or greater.
17405 */
17406 if (ql_os_release_level < 10 || ql_disable_aif != 0) {
17407 EL(ha, "interrupt framework is not supported or is "
17408 "disabled, using legacy\n");
17409 return (ql_legacy_intr(ha));
17410 } else if (ql_os_release_level == 10) {
17411 /*
17412 * See if the advanced interrupt functions (aif) are
17413 * in the kernel
17414 */
17415 void *fptr = (void *)&ddi_intr_get_supported_types;
17416
17417 if (fptr == NULL) {
17418 EL(ha, "aif is not supported, using legacy "
17419 "interrupts (rev)\n");
17420 return (ql_legacy_intr(ha));
17421 }
17422 }
17423
17424 /* See what types of interrupts this HBA and platform support */
17425 if ((i = ddi_intr_get_supported_types(ha->dip, &itypes)) !=
17426 DDI_SUCCESS) {
17427 EL(ha, "get supported types failed, rval=%xh, "
17428 "assuming FIXED\n", i);
17429 itypes = DDI_INTR_TYPE_FIXED;
17430 }
17431
17432 EL(ha, "supported types are: %xh\n", itypes);
17433
17434 if ((itypes & DDI_INTR_TYPE_MSIX) &&
17435 (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
17436 EL(ha, "successful MSI-X setup\n");
17437 } else if ((itypes & DDI_INTR_TYPE_MSI) &&
17438 (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
17439 EL(ha, "successful MSI setup\n");
17440 } else {
17441 rval = ql_setup_fixed(ha);
17442 }
17443
17444 if (rval != DDI_SUCCESS) {
17445 EL(ha, "failed, aif, rval=%xh\n", rval);
17446 } else {
17447 /* Setup mutexes */
17448 if ((rval = ql_init_mutex(ha)) != DDI_SUCCESS) {
17449 EL(ha, "failed, mutex init ret=%xh\n", rval);
17450 ql_release_intr(ha);
17451 }
17452 QL_PRINT_3(ha, "done\n");
17453 }
17454
17455 return (rval);
17456 }
17457
17458 /*
17459 * ql_setup_msi
17460 * Set up aif MSI interrupts
17461 *
17462 * Input:
17463 * ha = adapter state pointer.
17464 *
17465 * Returns:
17466 * DDI_SUCCESS or DDI_FAILURE.
17467 *
17468 * Context:
17469 * Kernel context.
17470 */
17471 static int
17472 ql_setup_msi(ql_adapter_state_t *ha)
17473 {
17474 uint_t i;
17475 int32_t count = 0;
17476 int32_t avail = 0;
17477 int32_t actual = 0;
17478 int32_t msitype = DDI_INTR_TYPE_MSI;
17479 int32_t ret;
17480
17481 QL_PRINT_3(ha, "started\n");
17482
17483 if (ql_disable_msi != 0) {
17484 EL(ha, "MSI is disabled by user\n");
17485 return (DDI_FAILURE);
17486 }
17487
17488 /* MSI support is only suported on 24xx HBA's. */
17489 if (!CFG_IST(ha, CFG_MSI_SUPPORT)) {
17490 EL(ha, "HBA does not support MSI\n");
17491 return (DDI_FAILURE);
17492 }
17493
17494 /* Get number of MSI interrupts the system supports */
17495 if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
17496 DDI_SUCCESS) || count == 0) {
17497 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
17498 return (DDI_FAILURE);
17499 }
17500
17501 /* Get number of available MSI interrupts */
17502 if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
17503 DDI_SUCCESS) || avail == 0) {
17504 EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
17505 return (DDI_FAILURE);
17506 }
17507
17508 /* MSI requires only 1. */
17509 count = 1;
17510
17511 /* Allocate space for interrupt handles */
17512 ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
17513 ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
17514
17515 ha->iflags |= IFLG_INTR_MSI;
17516
17517 /* Allocate the interrupts */
17518 if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
17519 &actual, 0)) != DDI_SUCCESS || actual < count) {
17520 EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
17521 "actual=%xh\n", ret, count, actual);
17522 ql_release_intr(ha);
17523 return (DDI_FAILURE);
17524 }
17525 ha->intr_cnt = actual;
17526
17527 /* Get interrupt priority */
17528 if ((ret = ddi_intr_get_pri(ha->htable[0], &i)) != DDI_SUCCESS) {
17529 EL(ha, "failed, get_pri ret=%xh\n", ret);
17530 ql_release_intr(ha);
17531 return (ret);
17532 }
17533 ha->intr_pri = DDI_INTR_PRI(i);
17534
17535 /* Add the interrupt handler */
17536 if ((ret = ddi_intr_add_handler(ha->htable[0], ql_isr_aif,
17537 (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
17538 EL(ha, "failed, intr_add ret=%xh\n", ret);
17539 ql_release_intr(ha);
17540 return (ret);
17541 }
17542
17543 /* Get the capabilities */
17544 (void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
17545
17546 /* Enable interrupts */
17547 if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
17548 if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
17549 DDI_SUCCESS) {
17550 EL(ha, "failed, block enable, ret=%xh\n", ret);
17551 ql_release_intr(ha);
17552 return (ret);
17553 }
17554 } else {
17555 for (i = 0; i < actual; i++) {
17556 if ((ret = ddi_intr_enable(ha->htable[i])) !=
17557 DDI_SUCCESS) {
17558 EL(ha, "failed, intr enable, ret=%xh\n", ret);
17559 ql_release_intr(ha);
17560 return (ret);
17561 }
17562 }
17563 }
17564
17565 QL_PRINT_3(ha, "done\n");
17566
17567 return (DDI_SUCCESS);
17568 }
17569
17570 /*
17571 * ql_setup_msix
17572 * Set up aif MSI-X interrupts
17573 *
17574 * Input:
17575 * ha = adapter state pointer.
17576 *
17577 * Returns:
17578 * DDI_SUCCESS or DDI_FAILURE.
17579 *
17580 * Context:
17581 * Kernel context.
17582 */
17583 static int
17584 ql_setup_msix(ql_adapter_state_t *ha)
17585 {
17586 int hwvect;
17587 int32_t count = 0;
17588 int32_t avail = 0;
17589 int32_t actual = 0;
17590 int32_t msitype = DDI_INTR_TYPE_MSIX;
17591 int32_t ret;
17592 uint_t i;
17593
17594 QL_PRINT_3(ha, "started\n");
17595
17596 if (ql_disable_msix != 0) {
17597 EL(ha, "MSI-X is disabled by user\n");
17598 return (DDI_FAILURE);
17599 }
17600
17601 #ifdef __x86
17602 if (get_hwenv() == HW_VMWARE) {
17603 EL(ha, "running under hypervisor, disabling MSI-X\n");
17604 return (DDI_FAILURE);
17605 }
17606 #endif
17607
17608 /*
17609 * MSI-X support is only available on 24xx HBA's that have
17610 * rev A2 parts (revid = 3) or greater.
17611 */
17612 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1) ||
17613 (CFG_IST(ha, CFG_CTRL_24XX) && ha->rev_id < 3)) {
17614 EL(ha, "HBA does not support MSI-X\n");
17615 return (DDI_FAILURE);
17616 }
17617
17618 /* Per HP, these HP branded HBA's are not supported with MSI-X */
17619 if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
17620 ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
17621 EL(ha, "HBA does not support MSI-X (subdevid)\n");
17622 return (DDI_FAILURE);
17623 }
17624
17625 /* Get number of MSI-X interrupts the platform h/w supports */
17626 if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &hwvect)) !=
17627 DDI_SUCCESS) || hwvect == 0) {
17628 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, hwvect);
17629 return (DDI_FAILURE);
17630 }
17631 QL_PRINT_10(ha, "ddi_intr_get_nintrs, hwvect=%d\n", hwvect);
17632
17633 /* Get number of available system interrupts */
17634 if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
17635 DDI_SUCCESS) || avail == 0) {
17636 EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
17637 return (DDI_FAILURE);
17638 }
17639 QL_PRINT_10(ha, "ddi_intr_get_navail, avail=%d\n", avail);
17640
17641 /* Fill out the intr table */
17642 count = ha->interrupt_count;
17643 if (ha->flags & MULTI_QUEUE && count < ha->mq_msix_vectors) {
17644 count = ha->mq_msix_vectors;
17645 /* don't exceed the h/w capability */
17646 if (count > hwvect) {
17647 count = hwvect;
17648 }
17649 }
17650
17651 /* Allocate space for interrupt handles */
17652 ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
17653 ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
17654
17655 ha->iflags |= IFLG_INTR_MSIX;
17656
17657 /* Allocate the interrupts */
17658 if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
17659 DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
17660 actual < ha->interrupt_count) {
17661 EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
17662 "actual=%xh\n", ret, count, actual);
17663 ql_release_intr(ha);
17664 return (DDI_FAILURE);
17665 }
17666 ha->intr_cnt = actual;
17667 EL(ha, "min=%d, multi-q=%d, req=%d, rcv=%d\n",
17668 ha->interrupt_count, ha->mq_msix_vectors, count,
17669 ha->intr_cnt);
17670
17671 /* Get interrupt priority */
17672 if ((ret = ddi_intr_get_pri(ha->htable[0], &i)) != DDI_SUCCESS) {
17673 EL(ha, "failed, get_pri ret=%xh\n", ret);
17674 ql_release_intr(ha);
17675 return (ret);
17676 }
17677 ha->intr_pri = DDI_INTR_PRI(i);
17678
17679 /* Add the interrupt handlers */
17680 for (i = 0; i < actual; i++) {
17681 if ((ret = ddi_intr_add_handler(ha->htable[i], ql_isr_aif,
17682 (void *)ha, (void *)((ulong_t)i))) != DDI_SUCCESS) {
17683 EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
17684 actual, ret);
17685 ql_release_intr(ha);
17686 return (ret);
17687 }
17688 }
17689
17690 /*
17691 * duplicate the rest of the intr's
17692 * ddi_intr_dup_handler() isn't working on x86 just yet...
17693 */
17694 #ifdef __sparc
17695 for (i = actual; i < hwvect; i++) {
17696 if ((ret = ddi_intr_dup_handler(ha->htable[0], (int)i,
17697 &ha->htable[i])) != DDI_SUCCESS) {
17698 EL(ha, "failed, intr_dup#=%xh, act=%xh, ret=%xh\n",
17699 i, actual, ret);
17700 ql_release_intr(ha);
17701 return (ret);
17702 }
17703 if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
17704 EL(ha, "failed, intr enable, ret=%xh\n", ret);
17705 ql_release_intr(ha);
17706 return (ret);
17707 }
17708 }
17709 #endif
17710
17711 /* Get the capabilities */
17712 (void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
17713
17714 /* Enable interrupts */
17715 if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
17716 if ((ret = ddi_intr_block_enable(ha->htable, actual)) !=
17717 DDI_SUCCESS) {
17718 EL(ha, "failed, block enable, ret=%xh\n", ret);
17719 ql_release_intr(ha);
17720 return (ret);
17721 }
17722 QL_PRINT_10(ha, "intr_block_enable %d\n", actual);
17723 } else {
17724 for (i = 0; i < actual; i++) {
17725 if ((ret = ddi_intr_enable(ha->htable[i])) !=
17726 DDI_SUCCESS) {
17727 EL(ha, "failed, intr enable, ret=%xh\n", ret);
17728 ql_release_intr(ha);
17729 return (ret);
17730 }
17731 QL_PRINT_10(ha, "intr_enable %d\n", i);
17732 }
17733 }
17734
17735 QL_PRINT_3(ha, "done\n");
17736
17737 return (DDI_SUCCESS);
17738 }
17739
17740 /*
17741 * ql_setup_fixed
17742 * Sets up aif FIXED interrupts
17743 *
17744 * Input:
17745 * ha = adapter state pointer.
17746 *
17747 * Returns:
17748 * DDI_SUCCESS or DDI_FAILURE.
17749 *
17750 * Context:
17751 * Kernel context.
17752 */
17753 static int
17754 ql_setup_fixed(ql_adapter_state_t *ha)
17755 {
17756 int32_t count = 0;
17757 int32_t actual = 0;
17758 int32_t ret;
17759 uint_t i;
17760
17761 QL_PRINT_3(ha, "started\n");
17762
17763 if (ql_disable_intx != 0) {
17764 EL(ha, "INT-X is disabled by user\n");
17765 return (DDI_FAILURE);
17766 }
17767
17768 /* Get number of fixed interrupts the system supports */
17769 if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
17770 &count)) != DDI_SUCCESS) || count == 0) {
17771 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
17772 return (DDI_FAILURE);
17773 }
17774
17775 /* Allocate space for interrupt handles */
17776 ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
17777 ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
17778
17779 ha->iflags |= IFLG_INTR_FIXED;
17780
17781 /* Allocate the interrupts */
17782 if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
17783 0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
17784 actual < count) {
17785 EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
17786 "actual=%xh\n", ret, count, actual);
17787 ql_release_intr(ha);
17788 return (DDI_FAILURE);
17789 }
17790 ha->intr_cnt = actual;
17791
17792 /* Get interrupt priority */
17793 if ((ret = ddi_intr_get_pri(ha->htable[0], &i)) != DDI_SUCCESS) {
17794 EL(ha, "failed, get_pri ret=%xh\n", ret);
17795 ql_release_intr(ha);
17796 return (ret);
17797 }
17798 ha->intr_pri = DDI_INTR_PRI(i);
17799
17800 /* Add the interrupt handlers */
17801 for (i = 0; i < actual; i++) {
17802 if ((ret = ddi_intr_add_handler(ha->htable[i], ql_isr_aif,
17803 (void *)ha, (void *)((ulong_t)(i)))) != DDI_SUCCESS) {
17804 EL(ha, "failed, intr_add ret=%xh\n", ret);
17805 ql_release_intr(ha);
17806 return (ret);
17807 }
17808 }
17809
17810 /* Enable interrupts */
17811 for (i = 0; i < actual; i++) {
17812 if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
17813 EL(ha, "failed, intr enable, ret=%xh\n", ret);
17814 ql_release_intr(ha);
17815 return (ret);
17816 }
17817 }
17818
17819 EL(ha, "using FIXED interupts\n");
17820
17821 QL_PRINT_3(ha, "done\n");
17822
17823 return (DDI_SUCCESS);
17824 }
17825
17826 /*
17827 * ql_release_intr
17828 * Releases aif legacy interrupt resources
17829 *
17830 * Input:
17831 * ha = adapter state pointer.
17832 *
17833 * Returns:
17834 *
17835 * Context:
17836 * Kernel context.
17837 */
17838 static void
17839 ql_release_intr(ql_adapter_state_t *ha)
17840 {
17841 int32_t i, x;
17842
17843 QL_PRINT_3(ha, "started\n");
17844
17845 if (!(ha->iflags & IFLG_INTR_AIF)) {
17846 ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
17847 } else {
17848 ha->iflags &= ~(IFLG_INTR_AIF);
17849 if (ha->htable != NULL && ha->hsize > 0) {
17850 i = x = (int32_t)ha->hsize /
17851 (int32_t)sizeof (ddi_intr_handle_t);
17852 if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
17853 (void) ddi_intr_block_disable(ha->htable,
17854 ha->intr_cnt);
17855 } else {
17856 while (i-- > 0) {
17857 if (ha->htable[i] == 0) {
17858 EL(ha, "htable[%x]=0h\n", i);
17859 continue;
17860 }
17861
17862 (void) ddi_intr_disable(ha->htable[i]);
17863 }
17864 }
17865
17866 i = x;
17867 while (i-- > 0) {
17868 if (i < ha->intr_cnt) {
17869 (void) ddi_intr_remove_handler(
17870 ha->htable[i]);
17871 }
17872 (void) ddi_intr_free(ha->htable[i]);
17873 }
17874
17875 ha->intr_cnt = 0;
17876 ha->intr_cap = 0;
17877
17878 kmem_free(ha->htable, ha->hsize);
17879 ha->htable = NULL;
17880 ha->hsize = 0;
17881 }
17882 }
17883
17884 ha->intr_pri = NULL;
17885
17886 QL_PRINT_3(ha, "done\n");
17887 }
17888
17889 /*
17890 * ql_legacy_intr
17891 * Sets up legacy interrupts.
17892 *
17893 * NB: Only to be used if AIF (Advanced Interupt Framework)
17894 * if NOT in the kernel.
17895 *
17896 * Input:
17897 * ha = adapter state pointer.
17898 *
17899 * Returns:
17900 * DDI_SUCCESS or DDI_FAILURE.
17901 *
17902 * Context:
17903 * Kernel context.
17904 */
17905 static int
17906 ql_legacy_intr(ql_adapter_state_t *ha)
17907 {
17908 int rval;
17909
17910 QL_PRINT_3(ha, "started\n");
17911
17912 /* Get iblock cookies to initialize mutexes */
17913 if ((rval = ddi_get_iblock_cookie(ha->dip, 0, &ha->iblock_cookie)) !=
17914 DDI_SUCCESS) {
17915 EL(ha, "failed, get_iblock: %xh\n", rval);
17916 return (rval);
17917 }
17918 ha->intr_pri = (void *)ha->iblock_cookie;
17919
17920 /* Setup standard/legacy interrupt handler */
17921 if (ddi_add_intr(ha->dip, (uint_t)0, &ha->iblock_cookie,
17922 (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
17923 cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
17924 QL_NAME, ha->instance);
17925 return (rval);
17926 }
17927 ha->iflags |= IFLG_INTR_LEGACY;
17928
17929 /* Setup mutexes */
17930 if ((rval = ql_init_mutex(ha)) != DDI_SUCCESS) {
17931 EL(ha, "failed, mutex init ret=%xh\n", rval);
17932 ql_release_intr(ha);
17933 } else {
17934 EL(ha, "using legacy interrupts\n");
17935 }
17936 return (rval);
17937 }
17938
17939 /*
17940 * ql_init_mutex
17941 * Initializes mutex's
17942 *
17943 * Input:
17944 * ha = adapter state pointer.
17945 *
17946 * Returns:
17947 * DDI_SUCCESS or DDI_FAILURE.
17948 *
17949 * Context:
17950 * Kernel context.
17951 */
17952 static int
17953 ql_init_mutex(ql_adapter_state_t *ha)
17954 {
17955 QL_PRINT_3(ha, "started\n");
17956
17957 /* mutexes to protect the adapter state structure. */
17958 mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17959
17960 /* mutex to protect the ISP request ring. */
17961 mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17962
17963 /* I/O completion queue protection. */
17964 mutex_init(&ha->comp_q_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17965 cv_init(&ha->cv_comp_thread, NULL, CV_DRIVER, NULL);
17966
17967 /* mutex to protect the mailbox registers. */
17968 mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17969
17970 /* Mailbox wait and interrupt conditional variable. */
17971 cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
17972 cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
17973
17974 /* power management protection */
17975 mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17976
17977 /* Unsolicited buffer conditional variable. */
17978 mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17979 cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
17980
17981 /* mutex to protect task daemon context. */
17982 mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17983 cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
17984
17985 /* Suspended conditional variable. */
17986 cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
17987
17988 /* mutex to protect per instance f/w dump flags and buffer */
17989 mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17990
17991 QL_PRINT_3(ha, "done\n");
17992
17993 return (DDI_SUCCESS);
17994 }
17995
17996 /*
17997 * ql_destroy_mutex
17998 * Destroys mutex's
17999 *
18000 * Input:
18001 * ha = adapter state pointer.
18002 *
18003 * Returns:
18004 *
18005 * Context:
18006 * Kernel context.
18007 */
18008 static void
18009 ql_destroy_mutex(ql_adapter_state_t *ha)
18010 {
18011 QL_PRINT_3(ha, "started\n");
18012
18013 mutex_destroy(&ha->dump_mutex);
18014 cv_destroy(&ha->cv_dr_suspended);
18015 cv_destroy(&ha->cv_task_daemon);
18016 mutex_destroy(&ha->task_daemon_mutex);
18017 cv_destroy(&ha->cv_ub);
18018 mutex_destroy(&ha->ub_mutex);
18019 mutex_destroy(&ha->pm_mutex);
18020 cv_destroy(&ha->cv_mbx_intr);
18021 cv_destroy(&ha->cv_mbx_wait);
18022 mutex_destroy(&ha->mbx_mutex);
18023 cv_destroy(&ha->cv_comp_thread);
18024 mutex_destroy(&ha->comp_q_mutex);
18025 mutex_destroy(&ha->req_ring_mutex);
18026 mutex_destroy(&ha->mutex);
18027
18028 QL_PRINT_3(ha, "done\n");
18029 }
18030
18031 /*
18032 * ql_fwmodule_resolve
18033 * Loads and resolves external firmware module and symbols
18034 *
18035 * Input:
18036 * ha: adapter state pointer.
18037 *
18038 * Returns:
18039 * ql local function return status code:
18040 * QL_SUCCESS - external f/w module module and symbols resolved
18041 * QL_FW_NOT_SUPPORTED - Driver does not support ISP type
18042 * QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
18043 * QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
18044 * Context:
18045 * Kernel context.
18046 *
18047 * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time. We
18048 * could switch to a tighter scope around acutal download (and add an extra
18049 * ddi_modopen for module opens that occur before root is mounted).
18050 *
18051 */
18052 uint32_t
18053 ql_fwmodule_resolve(ql_adapter_state_t *ha)
18054 {
18055 int8_t module[128];
18056 int8_t fw_version[128];
18057 uint32_t rval = QL_SUCCESS;
18058 caddr_t code, code02, code03;
18059 uint8_t *p_ucfw;
18060 uint16_t *p_usaddr, *p_uslen;
18061 uint32_t *p_uiaddr, *p_uilen, *p_uifw;
18062 uint32_t *p_uiaddr02, *p_uilen02, *p_uilen03;
18063 struct fw_table *fwt;
18064 extern struct fw_table fw_table[];
18065
18066 QL_PRINT_3(ha, "started\n");
18067
18068 if (ha->fw_module != NULL) {
18069 EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
18070 ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
18071 ha->fw_subminor_version);
18072 return (rval);
18073 }
18074
18075 /* make sure the fw_class is in the fw_table of supported classes */
18076 for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
18077 if (fwt->fw_class == ha->fw_class)
18078 break; /* match */
18079 }
18080 if (fwt->fw_version == NULL) {
18081 cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
18082 "in driver's fw_table", QL_NAME, ha->instance,
18083 ha->fw_class);
18084 return (QL_FW_NOT_SUPPORTED);
18085 }
18086
18087 /*
18088 * open the module related to the fw_class
18089 */
18090 (void) snprintf(module, sizeof (module), "misc/qlc/qlc_fw_%x",
18091 ha->fw_class);
18092
18093 ha->fw_module = ddi_modopen(module, KRTLD_MODE_FIRST, NULL);
18094 if (ha->fw_module == NULL) {
18095 cmn_err(CE_WARN, "%s(%d): can't load firmware file %s",
18096 QL_NAME, ha->instance, module);
18097 return (QL_FWMODLOAD_FAILED);
18098 }
18099
18100 /*
18101 * resolve the fw module symbols, data types depend on fw_class
18102 */
18103
18104 switch (ha->fw_class) {
18105 case 0x2200:
18106 case 0x2300:
18107 case 0x6322:
18108
18109 if ((code = ddi_modsym(ha->fw_module, "risc_code01",
18110 NULL)) == NULL) {
18111 rval = QL_FWSYM_NOT_FOUND;
18112 EL(ha, "failed, f/w module %d rc01 symbol\n", module);
18113 } else if ((p_usaddr = ddi_modsym(ha->fw_module,
18114 "risc_code_addr01", NULL)) == NULL) {
18115 rval = QL_FWSYM_NOT_FOUND;
18116 EL(ha, "failed, f/w module %d rca01 symbol\n", module);
18117 } else if ((p_uslen = ddi_modsym(ha->fw_module,
18118 "risc_code_length01", NULL)) == NULL) {
18119 rval = QL_FWSYM_NOT_FOUND;
18120 EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
18121 } else if ((p_ucfw = ddi_modsym(ha->fw_module,
18122 "firmware_version", NULL)) == NULL) {
18123 rval = QL_FWSYM_NOT_FOUND;
18124 EL(ha, "failed, f/w module %d fwver symbol\n", module);
18125 }
18126
18127 if (rval == QL_SUCCESS) {
18128 ha->risc_fw[0].code = code;
18129 ha->risc_fw[0].addr = *p_usaddr;
18130 ha->risc_fw[0].length = *p_uslen;
18131
18132 (void) snprintf(fw_version, sizeof (fw_version),
18133 "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
18134 }
18135 break;
18136
18137 case 0x2400:
18138 case 0x2500:
18139 case 0x2700:
18140 case 0x8100:
18141 case 0x8301fc:
18142
18143 if ((code = ddi_modsym(ha->fw_module, "risc_code01",
18144 NULL)) == NULL) {
18145 rval = QL_FWSYM_NOT_FOUND;
18146 EL(ha, "failed, f/w module %d rc01 symbol\n", module);
18147 } else if ((p_uiaddr = ddi_modsym(ha->fw_module,
18148 "risc_code_addr01", NULL)) == NULL) {
18149 rval = QL_FWSYM_NOT_FOUND;
18150 EL(ha, "failed, f/w module %d rca01 symbol\n", module);
18151 } else if ((p_uilen = ddi_modsym(ha->fw_module,
18152 "risc_code_length01", NULL)) == NULL) {
18153 rval = QL_FWSYM_NOT_FOUND;
18154 EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
18155 } else if ((p_uifw = ddi_modsym(ha->fw_module,
18156 "firmware_version", NULL)) == NULL) {
18157 rval = QL_FWSYM_NOT_FOUND;
18158 EL(ha, "failed, f/w module %d fwver symbol\n", module);
18159 }
18160
18161 if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
18162 NULL)) == NULL) {
18163 rval = QL_FWSYM_NOT_FOUND;
18164 EL(ha, "failed, f/w module %d rc02 symbol\n", module);
18165 } else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
18166 "risc_code_addr02", NULL)) == NULL) {
18167 rval = QL_FWSYM_NOT_FOUND;
18168 EL(ha, "failed, f/w module %d rca02 symbol\n", module);
18169 } else if ((p_uilen02 = ddi_modsym(ha->fw_module,
18170 "risc_code_length02", NULL)) == NULL) {
18171 rval = QL_FWSYM_NOT_FOUND;
18172 EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
18173 }
18174
18175 if (rval == QL_SUCCESS) {
18176 if (ha->fw_class == 0x2700) {
18177 if ((code03 = ddi_modsym(ha->fw_module,
18178 "tmplt_code01", NULL)) == NULL) {
18179 EL(ha, "failed, f/w module %d "
18180 "tmplt_code01 symbol\n", module);
18181 } else if ((p_uilen03 = ddi_modsym(
18182 ha->fw_module, "tmplt_code_length01",
18183 NULL)) == NULL) {
18184 code03 = NULL;
18185 EL(ha, "failed, f/w module %d "
18186 "tmplt_code_length01 symbol\n",
18187 module);
18188 }
18189 ha->risc_fw[2].code = code03;
18190 if ((ha->risc_fw[2].code = code03) != NULL) {
18191 ha->risc_fw[2].length = *p_uilen03;
18192 }
18193 }
18194 ha->risc_fw[0].code = code;
18195 ha->risc_fw[0].addr = *p_uiaddr;
18196 ha->risc_fw[0].length = *p_uilen;
18197 ha->risc_fw[1].code = code02;
18198 ha->risc_fw[1].addr = *p_uiaddr02;
18199 ha->risc_fw[1].length = *p_uilen02;
18200
18201 (void) snprintf(fw_version, sizeof (fw_version),
18202 "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
18203 }
18204 break;
18205
18206 default:
18207 EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
18208 rval = QL_FW_NOT_SUPPORTED;
18209 }
18210
18211 if (rval != QL_SUCCESS) {
18212 cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
18213 "module %s (%x)", QL_NAME, ha->instance, module, rval);
18214 if (ha->fw_module != NULL) {
18215 (void) ddi_modclose(ha->fw_module);
18216 ha->fw_module = NULL;
18217 }
18218 } else {
18219 /*
18220 * check for firmware version mismatch between module and
18221 * compiled in fw_table version.
18222 */
18223
18224 if (strcmp(fwt->fw_version, fw_version) != 0) {
18225
18226 /*
18227 * If f/w / driver version mismatches then
18228 * return a successful status -- however warn
18229 * the user that this is NOT recommended.
18230 */
18231
18232 cmn_err(CE_WARN, "%s(%d): driver / f/w version "
18233 "mismatch for %x: driver-%s module-%s", QL_NAME,
18234 ha->instance, ha->fw_class, fwt->fw_version,
18235 fw_version);
18236 }
18237 }
18238
18239 QL_PRINT_3(ha, "done\n");
18240
18241 return (rval);
18242 }
18243
18244 /*
18245 * ql_port_state
18246 * Set the state on all adapter ports.
18247 *
18248 * Input:
18249 * ha: parent adapter state pointer.
18250 * state: port state.
18251 * flags: task daemon flags to set.
18252 *
18253 * Context:
18254 * Interrupt or Kernel context, no mailbox commands allowed.
18255 */
18256 void
18257 ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
18258 {
18259 ql_adapter_state_t *vha;
18260
18261 QL_PRINT_3(ha, "started\n");
18262
18263 TASK_DAEMON_LOCK(ha);
18264 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
18265 if (FC_PORT_STATE_MASK(vha->state) != state) {
18266 vha->state = state != FC_STATE_OFFLINE ?
18267 (FC_PORT_SPEED_MASK(vha->state) | state) : state;
18268 vha->task_daemon_flags |= flags;
18269 }
18270 }
18271 ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
18272 TASK_DAEMON_UNLOCK(ha);
18273
18274 QL_PRINT_3(ha, "done\n");
18275 }
18276
18277 /*
18278 * ql_el_trace_alloc - Construct an extended logging trace descriptor.
18279 *
18280 * Input: Pointer to the adapter state structure.
18281 * Context: Kernel context.
18282 */
18283 void
18284 ql_el_trace_alloc(ql_adapter_state_t *ha)
18285 {
18286 ql_trace_entry_t *entry;
18287 size_t maxsize;
18288
18289 ha->ql_trace_desc =
18290 (ql_trace_desc_t *)kmem_zalloc(
18291 sizeof (ql_trace_desc_t), KM_SLEEP);
18292
18293 /* ql_log_entries could be adjusted in /etc/system */
18294 maxsize = ql_log_entries * sizeof (ql_trace_entry_t);
18295 entry = kmem_zalloc(maxsize, KM_SLEEP);
18296
18297 mutex_init(&ha->ql_trace_desc->mutex, NULL,
18298 MUTEX_DRIVER, NULL);
18299
18300 ha->ql_trace_desc->trace_buffer = entry;
18301 ha->ql_trace_desc->trace_buffer_size = maxsize;
18302 ha->ql_trace_desc->nindex = 0;
18303
18304 ha->ql_trace_desc->nentries = ql_log_entries;
18305 ha->ql_trace_desc->start = ha->ql_trace_desc->end = 0;
18306 ha->ql_trace_desc->csize = 0;
18307 ha->ql_trace_desc->count = 0;
18308 }
18309
18310 /*
18311 * ql_el_trace_dealloc - Destroy an extended logging trace descriptor.
18312 *
18313 * Input: Pointer to the adapter state structure.
18314 * Context: Kernel context.
18315 */
18316 void
18317 ql_el_trace_dealloc(ql_adapter_state_t *ha)
18318 {
18319 if (ha->ql_trace_desc != NULL) {
18320 if (ha->ql_trace_desc->trace_buffer != NULL) {
18321 kmem_free(ha->ql_trace_desc->trace_buffer,
18322 ha->ql_trace_desc->trace_buffer_size);
18323 }
18324 mutex_destroy(&ha->ql_trace_desc->mutex);
18325 kmem_free(ha->ql_trace_desc,
18326 sizeof (ql_trace_desc_t));
18327 }
18328 }
18329
18330 /*
18331 * els_cmd_text - Return a pointer to a string describing the command
18332 *
18333 * Input: els_cmd = the els command opcode.
18334 * Returns: pointer to a string.
18335 * Context: Kernel context.
18336 */
18337 char *
18338 els_cmd_text(int els_cmd)
18339 {
18340 cmd_table_t *entry = &els_cmd_tbl[0];
18341
18342 return (cmd_text(entry, els_cmd));
18343 }
18344
18345 /*
18346 * mbx_cmd_text - Return a pointer to a string describing the command
18347 *
18348 * Input: mbx_cmd = the mailbox command opcode.
18349 * Returns: pointer to a string.
18350 * Context: Kernel context.
18351 */
18352 char *
18353 mbx_cmd_text(int mbx_cmd)
18354 {
18355 cmd_table_t *entry = &mbox_cmd_tbl[0];
18356
18357 return (cmd_text(entry, mbx_cmd));
18358 }
18359
18360 /*
18361 * cmd_text Return a pointer to a string describing the command
18362 *
18363 * Input: entry = the command table
18364 * cmd = the command.
18365 * Returns: pointer to a string.
18366 * Context: Kernel context.
18367 */
18368 char *
18369 cmd_text(cmd_table_t *entry, int cmd)
18370 {
18371 for (; entry->cmd != 0; entry++) {
18372 if (entry->cmd == cmd) {
18373 break;
18374 }
18375 }
18376 return (entry->string);
18377 }
18378
18379 /*
18380 * ql_els_24xx_iocb
18381 * els request indication.
18382 *
18383 * Input:
18384 * ha: adapter state pointer.
18385 * req_q: request queue structure pointer.
18386 * srb: scsi request block pointer.
18387 * arg: els passthru entry iocb pointer.
18388 *
18389 * Returns:
18390 *
18391 * Context: Kernel context.
18392 */
18393 void
18394 ql_els_24xx_iocb(ql_adapter_state_t *ha, ql_request_q_t *req_q, ql_srb_t *srb,
18395 void *arg)
18396 {
18397 els_descriptor_t els_desc;
18398
18399 /* Extract the ELS information */
18400 ql_fca_isp_els_request(ha, req_q, (fc_packet_t *)srb->pkt,
18401 &els_desc);
18402
18403 /* Construct the passthru entry */
18404 ql_isp_els_request_ctor(&els_desc, (els_passthru_entry_t *)arg);
18405
18406 /* Ensure correct endianness */
18407 ql_isp_els_handle_cmd_endian(ha, srb);
18408 }
18409
18410 /*
18411 * ql_fca_isp_els_request
18412 * Extract into an els descriptor the info required
18413 * to build an els_passthru iocb from an fc packet.
18414 *
18415 * Input:
18416 * ha: adapter state pointer.
18417 * req_q: request queue structure pointer.
18418 * pkt: fc packet pointer
18419 * els_desc: els descriptor pointer
18420 *
18421 * Context:
18422 * Kernel context.
18423 */
18424 static void
18425 ql_fca_isp_els_request(ql_adapter_state_t *ha, ql_request_q_t *req_q,
18426 fc_packet_t *pkt, els_descriptor_t *els_desc)
18427 {
18428 ls_code_t els;
18429
18430 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
18431 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
18432
18433 els_desc->els = els.ls_code;
18434
18435 els_desc->els_handle = req_q->req_ring.acc_handle;
18436 els_desc->d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
18437 els_desc->s_id.b24 = pkt->pkt_cmd_fhdr.s_id;
18438 /* if n_port_handle is not < 0x7d use 0 */
18439 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
18440 els_desc->n_port_handle = ha->n_port->n_port_handle;
18441 } else {
18442 els_desc->n_port_handle = 0;
18443 }
18444 els_desc->control_flags = 0;
18445 els_desc->cmd_byte_count = pkt->pkt_cmdlen;
18446 /*
18447 * Transmit DSD. This field defines the Fibre Channel Frame payload
18448 * (without the frame header) in system memory.
18449 */
18450 els_desc->tx_dsd.addr[0] = LSD(pkt->pkt_cmd_cookie->dmac_laddress);
18451 els_desc->tx_dsd.addr[1] = MSD(pkt->pkt_cmd_cookie->dmac_laddress);
18452 els_desc->tx_dsd.length = (uint32_t)pkt->pkt_cmd_cookie->dmac_size;
18453
18454 els_desc->rsp_byte_count = pkt->pkt_rsplen;
18455 /*
18456 * Receive DSD. This field defines the ELS response payload buffer
18457 * for the ISP24xx firmware transferring the received ELS
18458 * response frame to a location in host memory.
18459 */
18460 els_desc->rx_dsd.addr[0] = LSD(pkt->pkt_resp_cookie->dmac_laddress);
18461 els_desc->rx_dsd.addr[1] = MSD(pkt->pkt_resp_cookie->dmac_laddress);
18462 els_desc->rx_dsd.length = (uint32_t)pkt->pkt_resp_cookie->dmac_size;
18463 }
18464
18465 /*
18466 * ql_isp_els_request_ctor - Construct an els_passthru_entry iocb
18467 * using the els descriptor.
18468 *
18469 * Input: ha = adapter state pointer.
18470 * els_desc = els descriptor pointer.
18471 * els_entry = els passthru entry iocb pointer.
18472 * Returns:
18473 * Context: Kernel context.
18474 */
18475 static void
18476 ql_isp_els_request_ctor(els_descriptor_t *els_desc,
18477 els_passthru_entry_t *els_entry)
18478 {
18479 uint32_t *ptr32;
18480
18481 /*
18482 * Construct command packet.
18483 */
18484 ddi_put8(els_desc->els_handle, &els_entry->entry_type,
18485 (uint8_t)ELS_PASSTHRU_TYPE);
18486 ddi_put16(els_desc->els_handle, &els_entry->n_port_hdl,
18487 els_desc->n_port_handle);
18488 ddi_put8(els_desc->els_handle, &els_entry->sof_type, (uint8_t)BIT_4);
18489 ddi_put32(els_desc->els_handle, &els_entry->rcv_exch_address,
18490 (uint32_t)0);
18491 ddi_put8(els_desc->els_handle, &els_entry->els_cmd_opcode,
18492 els_desc->els);
18493 ddi_put8(els_desc->els_handle, &els_entry->d_id_7_0,
18494 els_desc->d_id.b.al_pa);
18495 ddi_put8(els_desc->els_handle, &els_entry->d_id_15_8,
18496 els_desc->d_id.b.area);
18497 ddi_put8(els_desc->els_handle, &els_entry->d_id_23_16,
18498 els_desc->d_id.b.domain);
18499 ddi_put8(els_desc->els_handle, &els_entry->s_id_7_0,
18500 els_desc->s_id.b.al_pa);
18501 ddi_put8(els_desc->els_handle, &els_entry->s_id_15_8,
18502 els_desc->s_id.b.area);
18503 ddi_put8(els_desc->els_handle, &els_entry->s_id_23_16,
18504 els_desc->s_id.b.domain);
18505 ddi_put16(els_desc->els_handle, &els_entry->control_flags,
18506 els_desc->control_flags);
18507 ddi_put32(els_desc->els_handle, &els_entry->rcv_payld_data_bcnt,
18508 els_desc->rsp_byte_count);
18509 ddi_put32(els_desc->els_handle, &els_entry->xmt_payld_data_bcnt,
18510 els_desc->cmd_byte_count);
18511 /* Load transmit data segments and count. */
18512 ptr32 = (uint32_t *)&els_entry->dseg;
18513 ddi_put16(els_desc->els_handle, &els_entry->xmt_dseg_count, 1);
18514 ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[0]);
18515 ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[1]);
18516 ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.length);
18517 ddi_put16(els_desc->els_handle, &els_entry->rcv_dseg_count, 1);
18518 ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[0]);
18519 ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[1]);
18520 ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.length);
18521 }
18522
18523 /*
18524 * ql_isp_els_handle_cmd_endian - els requests must be in big endian
18525 * in host memory.
18526 *
18527 * Input: ha = adapter state pointer.
18528 * srb = scsi request block
18529 * Returns:
18530 * Context: Kernel context.
18531 */
18532 void
18533 ql_isp_els_handle_cmd_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
18534 {
18535 ls_code_t els;
18536 fc_packet_t *pkt;
18537 uint8_t *ptr;
18538
18539 pkt = srb->pkt;
18540
18541 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
18542 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
18543
18544 ptr = (uint8_t *)pkt->pkt_cmd;
18545
18546 ql_isp_els_handle_endian(ha, ptr, els.ls_code);
18547 }
18548
18549 /*
18550 * ql_isp_els_handle_rsp_endian - els responses must be in big endian
18551 * in host memory.
18552 * Input: ha = adapter state pointer.
18553 * srb = scsi request block
18554 * Returns:
18555 * Context: Kernel context.
18556 */
18557 void
18558 ql_isp_els_handle_rsp_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
18559 {
18560 ls_code_t els;
18561 fc_packet_t *pkt;
18562 uint8_t *ptr;
18563
18564 pkt = srb->pkt;
18565
18566 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
18567 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
18568
18569 ptr = (uint8_t *)pkt->pkt_resp;
18570 BIG_ENDIAN_32(&els);
18571 ql_isp_els_handle_endian(ha, ptr, els.ls_code);
18572 }
18573
18574 /*
18575 * ql_isp_els_handle_endian - els requests/responses must be in big endian
18576 * in host memory.
18577 * Input: ha = adapter state pointer.
18578 * ptr = els request/response buffer pointer.
18579 * ls_code = els command code.
18580 * Returns:
18581 * Context: Kernel context.
18582 */
18583 void
18584 ql_isp_els_handle_endian(ql_adapter_state_t *ha, uint8_t *ptr, uint8_t ls_code)
18585 {
18586 switch (ls_code) {
18587 case LA_ELS_PLOGI: {
18588 BIG_ENDIAN_32(ptr); /* Command Code */
18589 ptr += 4;
18590 BIG_ENDIAN_16(ptr); /* FC-PH version */
18591 ptr += 2;
18592 BIG_ENDIAN_16(ptr); /* b2b credit */
18593 ptr += 2;
18594 BIG_ENDIAN_16(ptr); /* Cmn Feature flags */
18595 ptr += 2;
18596 BIG_ENDIAN_16(ptr); /* Rcv data size */
18597 ptr += 2;
18598 BIG_ENDIAN_16(ptr); /* Concurrent Seq */
18599 ptr += 2;
18600 BIG_ENDIAN_16(ptr); /* Rel offset */
18601 ptr += 2;
18602 BIG_ENDIAN_32(ptr); /* E_D_TOV */
18603 ptr += 4; /* Port Name */
18604 ptr += 8; /* Node Name */
18605 ptr += 8; /* Class 1 */
18606 ptr += 16; /* Class 2 */
18607 ptr += 16; /* Class 3 */
18608 BIG_ENDIAN_16(ptr); /* Service options */
18609 ptr += 2;
18610 BIG_ENDIAN_16(ptr); /* Initiator control */
18611 ptr += 2;
18612 BIG_ENDIAN_16(ptr); /* Recipient Control */
18613 ptr += 2;
18614 BIG_ENDIAN_16(ptr); /* Rcv size */
18615 ptr += 2;
18616 BIG_ENDIAN_16(ptr); /* Concurrent Seq */
18617 ptr += 2;
18618 BIG_ENDIAN_16(ptr); /* N_Port e2e credit */
18619 ptr += 2;
18620 BIG_ENDIAN_16(ptr); /* Open Seq/Exch */
18621 break;
18622 }
18623 case LA_ELS_PRLI: {
18624 BIG_ENDIAN_32(ptr); /* Command Code/Page length */
18625 ptr += 4; /* Type */
18626 ptr += 2;
18627 BIG_ENDIAN_16(ptr); /* Flags */
18628 ptr += 2;
18629 BIG_ENDIAN_32(ptr); /* Originator Process associator */
18630 ptr += 4;
18631 BIG_ENDIAN_32(ptr); /* Responder Process associator */
18632 ptr += 4;
18633 BIG_ENDIAN_32(ptr); /* Flags */
18634 break;
18635 }
18636 default:
18637 EL(ha, "can't handle els code %x\n", ls_code);
18638 break;
18639 }
18640 }
18641
18642 /*
18643 * ql_n_port_plogi
18644 * In N port 2 N port topology where an N Port has logged in with the
18645 * firmware because it has the N_Port login initiative, we send up
18646 * a plogi by proxy which stimulates the login procedure to continue.
18647 *
18648 * Input:
18649 * ha = adapter state pointer.
18650 * Returns:
18651 *
18652 * Context:
18653 * Kernel context.
18654 */
18655 static int
18656 ql_n_port_plogi(ql_adapter_state_t *ha)
18657 {
18658 int rval;
18659 ql_tgt_t *tq = NULL;
18660 ql_head_t done_q = { NULL, NULL };
18661
18662 rval = QL_SUCCESS;
18663
18664 if (ha->topology & QL_N_PORT) {
18665 /* if we're doing this the n_port_handle must be good */
18666 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
18667 tq = ql_loop_id_to_queue(ha,
18668 ha->n_port->n_port_handle);
18669 if (tq != NULL) {
18670 (void) ql_send_plogi(ha, tq, &done_q);
18671 } else {
18672 EL(ha, "n_port_handle = %x, tq = %x\n",
18673 ha->n_port->n_port_handle, tq);
18674 }
18675 } else {
18676 EL(ha, "n_port_handle = %x, tq = %x\n",
18677 ha->n_port->n_port_handle, tq);
18678 }
18679 if (done_q.first != NULL) {
18680 ql_done(done_q.first, B_FALSE);
18681 }
18682 }
18683 return (rval);
18684 }
18685
18686 /*
18687 * Compare two WWNs. The NAA is omitted for comparison.
18688 *
18689 * Note particularly that the indentation used in this
18690 * function isn't according to Sun recommendations. It
18691 * is indented to make reading a bit easy.
18692 *
18693 * Return Values:
18694 * if first == second return 0
18695 * if first > second return 1
18696 * if first < second return -1
18697 */
18698 /* ARGSUSED */
18699 int
18700 ql_wwn_cmp(ql_adapter_state_t *ha, la_wwn_t *first, la_wwn_t *second)
18701 {
18702 la_wwn_t t1, t2;
18703 int rval;
18704
18705 /*
18706 * Fibre Channel protocol is big endian, so compare
18707 * as big endian values
18708 */
18709 t1.i_wwn[0] = BE_32(first->i_wwn[0]);
18710 t1.i_wwn[1] = BE_32(first->i_wwn[1]);
18711
18712 t2.i_wwn[0] = BE_32(second->i_wwn[0]);
18713 t2.i_wwn[1] = BE_32(second->i_wwn[1]);
18714
18715 if (t1.i_wwn[0] == t2.i_wwn[0]) {
18716 if (t1.i_wwn[1] == t2.i_wwn[1]) {
18717 rval = 0;
18718 } else if (t1.i_wwn[1] > t2.i_wwn[1]) {
18719 rval = 1;
18720 } else {
18721 rval = -1;
18722 }
18723 } else {
18724 if (t1.i_wwn[0] > t2.i_wwn[0]) {
18725 rval = 1;
18726 } else {
18727 rval = -1;
18728 }
18729 }
18730 return (rval);
18731 }
18732
18733 /*
18734 * ql_nvram_cache_desc_ctor - Construct an nvram cache descriptor.
18735 *
18736 * Input: Pointer to the adapter state structure.
18737 * Returns: Success or Failure.
18738 * Context: Kernel context.
18739 */
18740 int
18741 ql_nvram_cache_desc_ctor(ql_adapter_state_t *ha)
18742 {
18743 int rval = DDI_SUCCESS;
18744
18745 QL_PRINT_3(ha, "started\n");
18746
18747 ha->nvram_cache =
18748 (nvram_cache_desc_t *)kmem_zalloc(sizeof (nvram_cache_desc_t),
18749 KM_SLEEP);
18750
18751 if (ha->nvram_cache == NULL) {
18752 cmn_err(CE_WARN, "%s(%d): can't construct nvram cache"
18753 " descriptor", QL_NAME, ha->instance);
18754 rval = DDI_FAILURE;
18755 } else {
18756 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
18757 ha->nvram_cache->size = sizeof (nvram_24xx_t);
18758 } else {
18759 ha->nvram_cache->size = sizeof (nvram_t);
18760 }
18761 ha->nvram_cache->cache =
18762 (void *)kmem_zalloc(ha->nvram_cache->size, KM_SLEEP);
18763 if (ha->nvram_cache->cache == NULL) {
18764 cmn_err(CE_WARN, "%s(%d): can't get nvram cache buffer",
18765 QL_NAME, ha->instance);
18766 kmem_free(ha->nvram_cache,
18767 sizeof (nvram_cache_desc_t));
18768 ha->nvram_cache = 0;
18769 rval = DDI_FAILURE;
18770 } else {
18771 ha->nvram_cache->valid = 0;
18772 }
18773 }
18774
18775 QL_PRINT_3(ha, "done\n");
18776
18777 return (rval);
18778 }
18779
18780 /*
18781 * ql_nvram_cache_desc_dtor - Destroy an nvram cache descriptor.
18782 *
18783 * Input: Pointer to the adapter state structure.
18784 * Returns: Success or Failure.
18785 * Context: Kernel context.
18786 */
18787 int
18788 ql_nvram_cache_desc_dtor(ql_adapter_state_t *ha)
18789 {
18790 int rval = DDI_SUCCESS;
18791
18792 QL_PRINT_3(ha, "started\n");
18793
18794 if (ha->nvram_cache == NULL) {
18795 cmn_err(CE_WARN, "%s(%d): can't destroy nvram descriptor",
18796 QL_NAME, ha->instance);
18797 rval = DDI_FAILURE;
18798 } else {
18799 if (ha->nvram_cache->cache != NULL) {
18800 kmem_free(ha->nvram_cache->cache,
18801 ha->nvram_cache->size);
18802 }
18803 kmem_free(ha->nvram_cache, sizeof (nvram_cache_desc_t));
18804 }
18805
18806 QL_PRINT_3(ha, "done\n");
18807
18808 return (rval);
18809 }
18810
18811 /*
18812 * ql_plogi_params_desc_ctor - Construct an plogi retry params descriptor.
18813 *
18814 * Input: Pointer to the adapter state structure.
18815 * Returns: Success or Failure.
18816 * Context: Kernel context.
18817 */
18818 int
18819 ql_plogi_params_desc_ctor(ql_adapter_state_t *ha)
18820 {
18821 int rval = DDI_SUCCESS;
18822
18823 QL_PRINT_3(ha, "started\n");
18824
18825 ha->plogi_params =
18826 (plogi_params_desc_t *)kmem_zalloc(sizeof (plogi_params_desc_t),
18827 KM_SLEEP);
18828
18829 if (ha->plogi_params == NULL) {
18830 cmn_err(CE_WARN, "%s(%d): can't construct plogi params"
18831 " descriptor", QL_NAME, ha->instance);
18832 rval = DDI_FAILURE;
18833 } else {
18834 /* default initializers. */
18835 ha->plogi_params->retry_cnt = QL_PLOGI_RETRY_CNT;
18836 ha->plogi_params->retry_dly_usec = QL_PLOGI_RETRY_DLY_USEC;
18837 }
18838
18839 QL_PRINT_3(ha, "done\n");
18840
18841 return (rval);
18842 }
18843
18844 /*
18845 * ql_plogi_params_desc_dtor - Destroy an plogi retry params descriptor.
18846 *
18847 * Input: Pointer to the adapter state structure.
18848 * Returns: Success or Failure.
18849 * Context: Kernel context.
18850 */
18851 int
18852 ql_plogi_params_desc_dtor(ql_adapter_state_t *ha)
18853 {
18854 int rval = DDI_SUCCESS;
18855
18856 QL_PRINT_3(ha, "started\n");
18857
18858 if (ha->plogi_params == NULL) {
18859 cmn_err(CE_WARN, "%s(%d): can't destroy plogi params"
18860 " descriptor", QL_NAME, ha->instance);
18861 rval = DDI_FAILURE;
18862 } else {
18863 kmem_free(ha->plogi_params, sizeof (plogi_params_desc_t));
18864 }
18865
18866 QL_PRINT_3(ha, "done\n");
18867
18868 return (rval);
18869 }
18870
18871 /*
18872 * ql_toggle_loop_state
18873 * Changes looop state to offline and then online.
18874 *
18875 * Input:
18876 * ha: adapter state pointer.
18877 *
18878 * Context:
18879 * Kernel context.
18880 */
18881 void
18882 ql_toggle_loop_state(ql_adapter_state_t *ha)
18883 {
18884 uint32_t timer;
18885
18886 if (LOOP_READY(ha)) {
18887 ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
18888 ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
18889 for (timer = 30; timer; timer--) {
18890 if (!(ha->task_daemon_flags & FC_STATE_CHANGE)) {
18891 break;
18892 }
18893 delay(100);
18894 }
18895 ql_loop_online(ha);
18896 }
18897 }
18898
18899 /*
18900 * ql_create_queues
18901 * Allocate request/response queues.
18902 *
18903 * Input:
18904 * ha: adapter state pointer.
18905 *
18906 * Returns:
18907 * ql driver local function return status codes
18908 *
18909 * Context:
18910 * Kernel context.
18911 */
18912 static int
18913 ql_create_queues(ql_adapter_state_t *ha)
18914 {
18915 int rval;
18916 uint16_t cnt;
18917
18918 QL_PRINT_10(ha, "started\n");
18919
18920 if (ha->req_q[0] != NULL) {
18921 QL_PRINT_10(ha, "done, queues already exist\n");
18922 return (QL_SUCCESS);
18923 }
18924 if (ha->vp_index != 0) {
18925 QL_PRINT_10(ha, "done, no multi-req-q \n");
18926 ha->req_q[0] = ha->pha->req_q[0];
18927 ha->req_q[1] = ha->pha->req_q[1];
18928 ha->rsp_queues = ha->pha->rsp_queues;
18929 return (QL_SUCCESS);
18930 }
18931
18932 /* Setup request queue buffer pointers. */
18933 ha->req_q[0] = kmem_zalloc(sizeof (ql_request_q_t), KM_SLEEP);
18934
18935 /* Allocate request queue. */
18936 ha->req_q[0]->req_entry_cnt = REQUEST_ENTRY_CNT;
18937 ha->req_q[0]->req_ring.size = ha->req_q[0]->req_entry_cnt *
18938 REQUEST_ENTRY_SIZE;
18939 if (ha->flags & QUEUE_SHADOW_PTRS) {
18940 ha->req_q[0]->req_ring.size += SHADOW_ENTRY_SIZE;
18941 }
18942 ha->req_q[0]->req_ring.type = LITTLE_ENDIAN_DMA;
18943 ha->req_q[0]->req_ring.max_cookie_count = 1;
18944 ha->req_q[0]->req_ring.alignment = 64;
18945 if ((rval = ql_alloc_phys(ha, &ha->req_q[0]->req_ring, KM_SLEEP)) !=
18946 QL_SUCCESS) {
18947 EL(ha, "request queue status=%xh", rval);
18948 ql_delete_queues(ha);
18949 return (rval);
18950 }
18951 if (ha->flags & QUEUE_SHADOW_PTRS) {
18952 ha->req_q[0]->req_out_shadow_ofst =
18953 ha->req_q[0]->req_entry_cnt * REQUEST_ENTRY_SIZE;
18954 ha->req_q[0]->req_out_shadow_ptr = (uint32_t *)
18955 ((caddr_t)ha->req_q[0]->req_ring.bp +
18956 ha->req_q[0]->req_out_shadow_ofst);
18957 }
18958 ha->fw_transfer_size = ha->req_q[0]->req_ring.size;
18959 if (ha->flags & MULTI_QUEUE) {
18960 ha->req_q[0]->mbar_req_in = MBAR2_REQ_IN;
18961 ha->req_q[0]->mbar_req_out = MBAR2_REQ_OUT;
18962 if (ha->req_q[0]->mbar_req_in >= ha->mbar_size) {
18963 EL(ha, "req_q index=0 exceeds mbar size=%xh",
18964 ha->mbar_size);
18965 ql_delete_queues(ha);
18966 return (QL_FUNCTION_PARAMETER_ERROR);
18967 }
18968 }
18969
18970 /* Allocate response queues. */
18971 if (ha->rsp_queues == NULL) {
18972 if (ha->intr_cnt > 1) {
18973 ha->rsp_queues_cnt = (uint8_t)(ha->intr_cnt - 1);
18974 } else {
18975 ha->rsp_queues_cnt = 1;
18976 }
18977 ha->io_min_rsp_q_number = 0;
18978 if (ha->rsp_queues_cnt > 1) {
18979 /* Setup request queue buffer pointers. */
18980 ha->req_q[1] = kmem_zalloc(sizeof (ql_request_q_t),
18981 KM_SLEEP);
18982
18983 /* Allocate request queue. */
18984 ha->req_q[1]->req_entry_cnt = REQUEST_ENTRY_CNT;
18985 ha->req_q[1]->req_ring.size =
18986 ha->req_q[1]->req_entry_cnt * REQUEST_ENTRY_SIZE;
18987 if (ha->flags & QUEUE_SHADOW_PTRS) {
18988 ha->req_q[1]->req_ring.size +=
18989 SHADOW_ENTRY_SIZE;
18990 }
18991 ha->req_q[1]->req_ring.type = LITTLE_ENDIAN_DMA;
18992 ha->req_q[1]->req_ring.max_cookie_count = 1;
18993 ha->req_q[1]->req_ring.alignment = 64;
18994 if ((rval = ql_alloc_phys(ha, &ha->req_q[1]->req_ring,
18995 KM_SLEEP)) != QL_SUCCESS) {
18996 EL(ha, "ha request queue status=%xh", rval);
18997 ql_delete_queues(ha);
18998 return (rval);
18999 }
19000 if (ha->flags & QUEUE_SHADOW_PTRS) {
19001 ha->req_q[1]->req_out_shadow_ofst =
19002 ha->req_q[1]->req_entry_cnt *
19003 REQUEST_ENTRY_SIZE;
19004 ha->req_q[1]->req_out_shadow_ptr = (uint32_t *)
19005 ((caddr_t)ha->req_q[1]->req_ring.bp +
19006 ha->req_q[1]->req_out_shadow_ofst);
19007 }
19008 ha->req_q[1]->req_q_number = 1;
19009 if (ha->flags & MULTI_QUEUE) {
19010 ha->req_q[1]->mbar_req_in =
19011 ha->mbar_queue_offset + MBAR2_REQ_IN;
19012 ha->req_q[1]->mbar_req_out =
19013 ha->mbar_queue_offset + MBAR2_REQ_OUT;
19014 if (ha->req_q[1]->mbar_req_in >=
19015 ha->mbar_size) {
19016 EL(ha, "ha req_q index=1 exceeds mbar "
19017 "size=%xh", ha->mbar_size);
19018 ql_delete_queues(ha);
19019 return (QL_FUNCTION_PARAMETER_ERROR);
19020 }
19021 }
19022 }
19023
19024 /* Allocate enough rsp_queue descriptors for IRM */
19025 ha->rsp_queues_size = (ha->hsize / sizeof (ddi_intr_handle_t)) *
19026 sizeof (ql_response_q_t *);
19027 ha->rsp_queues = kmem_zalloc(ha->rsp_queues_size, KM_SLEEP);
19028
19029 /* Create rsp_queues for the current rsp_queue_cnt */
19030 for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
19031 rval = ql_create_rsp_queue(ha, cnt);
19032 if (rval != QL_SUCCESS) {
19033 ql_delete_queues(ha);
19034 return (rval);
19035 }
19036 }
19037 }
19038
19039 if (CFG_IST(ha, CFG_FCIP_TYPE_1)) {
19040 /* Allocate IP receive queue. */
19041 ha->rcv_ring.size = RCVBUF_QUEUE_SIZE;
19042 ha->rcv_ring.type = LITTLE_ENDIAN_DMA;
19043 ha->rcv_ring.max_cookie_count = 1;
19044 ha->rcv_ring.alignment = 64;
19045 if ((rval = ql_alloc_phys(ha, &ha->rcv_ring, KM_SLEEP)) !=
19046 QL_SUCCESS) {
19047 EL(ha, "receive queue status=%xh", rval);
19048 ql_delete_queues(ha);
19049 return (rval);
19050 }
19051 }
19052
19053 QL_PRINT_10(ha, "done\n");
19054
19055 return (rval);
19056 }
19057
19058 /*
19059 * ql_create_rsp_queue
19060 * Allocate a response queues.
19061 *
19062 * Input:
19063 * ha: adapter state pointer.
19064 *
19065 * Returns:
19066 * ql driver local function return status codes
19067 *
19068 * Context:
19069 * Kernel context.
19070 */
19071 static int
19072 ql_create_rsp_queue(ql_adapter_state_t *ha, uint16_t rsp_q_indx)
19073 {
19074 ql_response_q_t *rsp_q;
19075 int rval = QL_SUCCESS;
19076
19077 QL_PRINT_3(ha, "started\n");
19078
19079 ha->rsp_queues[rsp_q_indx] = rsp_q =
19080 kmem_zalloc(sizeof (ql_response_q_t), KM_SLEEP);
19081 /* ISP response ring and interrupt protection. */
19082 mutex_init(&rsp_q->intr_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
19083 rsp_q->rsp_q_number = rsp_q_indx;
19084 rsp_q->msi_x_vector = (uint16_t)(rsp_q_indx + 1);
19085 if (ha->flags & MULTI_QUEUE) {
19086 rsp_q->mbar_rsp_in = rsp_q->rsp_q_number *
19087 ha->mbar_queue_offset + MBAR2_RESP_IN;
19088 rsp_q->mbar_rsp_out = rsp_q->rsp_q_number *
19089 ha->mbar_queue_offset + MBAR2_RESP_OUT;
19090 if (rsp_q->mbar_rsp_in >= ha->mbar_size) {
19091 EL(ha, "rsp_q index=%xh exceeds mbar size=%xh",
19092 rsp_q_indx, ha->mbar_size);
19093 return (QL_FUNCTION_PARAMETER_ERROR);
19094 }
19095 }
19096
19097 rsp_q->rsp_entry_cnt = RESPONSE_ENTRY_CNT;
19098 rsp_q->rsp_ring.size = rsp_q->rsp_entry_cnt * RESPONSE_ENTRY_SIZE;
19099 if (ha->flags & QUEUE_SHADOW_PTRS) {
19100 rsp_q->rsp_ring.size += SHADOW_ENTRY_SIZE;
19101 }
19102 rsp_q->rsp_ring.type = LITTLE_ENDIAN_DMA;
19103 rsp_q->rsp_ring.max_cookie_count = 1;
19104 rsp_q->rsp_ring.alignment = 64;
19105 rval = ql_alloc_phys(ha, &rsp_q->rsp_ring, KM_SLEEP);
19106 if (rval != QL_SUCCESS) {
19107 EL(ha, "response queue status=%xh", rval);
19108 }
19109 if (ha->flags & QUEUE_SHADOW_PTRS) {
19110 rsp_q->rsp_in_shadow_ofst =
19111 rsp_q->rsp_entry_cnt * RESPONSE_ENTRY_SIZE;
19112 rsp_q->rsp_in_shadow_ptr = (uint32_t *)
19113 ((caddr_t)rsp_q->rsp_ring.bp +
19114 rsp_q->rsp_in_shadow_ofst);
19115 }
19116
19117 QL_PRINT_3(ha, "done\n");
19118 return (rval);
19119 }
19120
19121 /*
19122 * ql_delete_queues
19123 * Deletes request/response queues.
19124 *
19125 * Input:
19126 * ha = adapter state pointer.
19127 *
19128 * Context:
19129 * Kernel context.
19130 */
19131 static void
19132 ql_delete_queues(ql_adapter_state_t *ha)
19133 {
19134 uint32_t cnt;
19135
19136 QL_PRINT_10(ha, "started\n");
19137
19138 if (ha->vp_index != 0) {
19139 QL_PRINT_10(ha, "done, no multi-req-q \n");
19140 ha->req_q[0] = ha->req_q[1] = NULL;
19141 return;
19142 }
19143 if (ha->req_q[0] != NULL) {
19144 ql_free_phys(ha, &ha->req_q[0]->req_ring);
19145 kmem_free(ha->req_q[0], sizeof (ql_request_q_t));
19146 ha->req_q[0] = NULL;
19147 }
19148 if (ha->req_q[1] != NULL) {
19149 ql_free_phys(ha, &ha->req_q[1]->req_ring);
19150 kmem_free(ha->req_q[1], sizeof (ql_request_q_t));
19151 ha->req_q[1] = NULL;
19152 }
19153
19154 if (ha->rsp_queues != NULL) {
19155 ql_response_q_t *rsp_q;
19156
19157 for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
19158 if ((rsp_q = ha->rsp_queues[cnt]) == NULL) {
19159 continue;
19160 }
19161
19162 mutex_destroy(&rsp_q->intr_mutex);
19163 ql_free_phys(ha, &rsp_q->rsp_ring);
19164 kmem_free(rsp_q, sizeof (ql_response_q_t));
19165 ha->rsp_queues[cnt] = NULL;
19166 }
19167 kmem_free(ha->rsp_queues, ha->rsp_queues_size);
19168 ha->rsp_queues = NULL;
19169 }
19170
19171 QL_PRINT_10(ha, "done\n");
19172 }
19173
19174 /*
19175 * ql_multi_queue_support
19176 * Test 2500 or 8100 adapters for support of multi-queue
19177 *
19178 * Input:
19179 * ha: adapter state pointer.
19180 *
19181 * Returns:
19182 * ql local function return status code.
19183 *
19184 * Context:
19185 * Kernel context.
19186 */
19187 static int
19188 ql_multi_queue_support(ql_adapter_state_t *ha)
19189 {
19190 uint32_t data;
19191 int rval;
19192
19193 data = ql_get_cap_ofst(ha, PCI_CAP_ID_MSI_X);
19194 if ((ql_pci_config_get16(ha, data + PCI_MSIX_CTRL) &
19195 PCI_MSIX_TBL_SIZE_MASK) > 2) {
19196 ha->mbar_size = MBAR2_MULTI_Q_MAX * MBAR2_REG_OFFSET;
19197
19198 if (ql_map_mem_bar(ha, &ha->mbar_dev_handle, &ha->mbar,
19199 PCI_CONF_BASE3, ha->mbar_size) != DDI_SUCCESS) {
19200 return (QL_FUNCTION_FAILED);
19201 }
19202 if ((rval = qlc_fm_check_acc_handle(ha,
19203 ha->mbar_dev_handle)) != DDI_FM_OK) {
19204 qlc_fm_report_err_impact(ha,
19205 QL_FM_EREPORT_ACC_HANDLE_CHECK);
19206 EL(ha, "fm_check_acc_handle mbar_dev_handle "
19207 "status=%xh\n", rval);
19208 return (QL_FUNCTION_FAILED);
19209 }
19210 return (QL_SUCCESS);
19211 }
19212 return (QL_FUNCTION_FAILED);
19213 }
19214
19215 /*
19216 * ql_get_cap_ofst
19217 * Locates PCI configuration space capability pointer
19218 *
19219 * Input:
19220 * ha: adapter state pointer.
19221 * cap_id: Capability ID.
19222 *
19223 * Returns:
19224 * capability offset
19225 *
19226 * Context:
19227 * Kernel context.
19228 */
19229 int
19230 ql_get_cap_ofst(ql_adapter_state_t *ha, uint8_t cap_id)
19231 {
19232 int cptr = PCI_CAP_NEXT_PTR_NULL;
19233
19234 QL_PRINT_3(ha, "started\n");
19235
19236 if (ql_pci_config_get16(ha, PCI_CONF_STAT) & PCI_STAT_CAP) {
19237 cptr = ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
19238
19239 while (cptr != PCI_CAP_NEXT_PTR_NULL) {
19240 if (ql_pci_config_get8(ha, cptr) == cap_id) {
19241 break;
19242 }
19243 cptr = ql_pci_config_get8(ha, cptr + PCI_CAP_NEXT_PTR);
19244 }
19245 }
19246
19247 QL_PRINT_3(ha, "done\n");
19248 return (cptr);
19249 }
19250
19251 /*
19252 * ql_map_mem_bar
19253 * Map Mem BAR
19254 *
19255 * Input:
19256 * ha: adapter state pointer.
19257 * handlep: access handle pointer.
19258 * addrp: address structure pointer.
19259 * ofst: BAR offset.
19260 * len: address space length.
19261 *
19262 * Returns:
19263 * DDI_SUCCESS or DDI_FAILURE.
19264 *
19265 * Context:
19266 * Kernel context.
19267 */
19268 static int
19269 ql_map_mem_bar(ql_adapter_state_t *ha, ddi_acc_handle_t *handlep,
19270 caddr_t *addrp, uint32_t ofst, uint32_t len)
19271 {
19272 caddr_t nreg;
19273 pci_regspec_t *reg, *reg2;
19274 int rval;
19275 uint_t rlen;
19276 uint32_t rcnt, w32, nreg_size;
19277
19278 QL_PRINT_10(ha, "started\n");
19279
19280 /* Check for Mem BAR */
19281 w32 = ql_pci_config_get32(ha, ofst);
19282 if (w32 == 0) {
19283 EL(ha, "no Mem BAR %xh\n", ofst);
19284 return (DDI_FAILURE);
19285 }
19286
19287 /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
19288 if ((rval = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, ha->dip,
19289 DDI_PROP_DONTPASS, "reg", (int **)®, &rlen)) !=
19290 DDI_PROP_SUCCESS) {
19291 EL(ha, "ddi_prop_lookup_int_array status=%xh\n", rval);
19292 return (DDI_FAILURE);
19293 }
19294 rlen = (uint_t)(rlen * sizeof (int)); /* in bytes */
19295 rcnt = (uint32_t)(rlen / sizeof (pci_regspec_t));
19296
19297 /* Check if register already added. */
19298 reg2 = reg;
19299 for (w32 = 0; w32 < rcnt; w32++) {
19300 if ((reg2->pci_phys_hi & PCI_REG_REG_M) == ofst) {
19301 EL(ha, "already mapped\n");
19302 break;
19303 }
19304 reg2++;
19305 }
19306 if (w32 == rcnt) {
19307 /*
19308 * Allocate memory for the existing reg(s) plus one and then
19309 * build it.
19310 */
19311 nreg_size = (uint32_t)(rlen + sizeof (pci_regspec_t));
19312 nreg = kmem_zalloc(nreg_size, KM_SLEEP);
19313
19314 /*
19315 * Find a current map memory reg to copy.
19316 */
19317 reg2 = reg;
19318 while ((reg2->pci_phys_hi & PCI_REG_ADDR_M) !=
19319 PCI_ADDR_MEM32 && (reg2->pci_phys_hi & PCI_REG_ADDR_M) !=
19320 PCI_ADDR_MEM64) {
19321 reg2++;
19322 if ((caddr_t)reg2 >= (caddr_t)reg + rlen) {
19323 reg2 = reg;
19324 break;
19325 }
19326 }
19327 w32 = (reg2->pci_phys_hi & ~PCI_REG_REG_M) | ofst;
19328
19329 bcopy(reg, nreg, rlen);
19330 reg2 = (pci_regspec_t *)(nreg + rlen);
19331
19332 reg2->pci_phys_hi = w32;
19333 reg2->pci_phys_mid = 0;
19334 reg2->pci_phys_low = 0;
19335 reg2->pci_size_hi = 0;
19336 reg2->pci_size_low = len;
19337
19338 /*
19339 * Write out the new "reg" property
19340 */
19341 /*LINTED [Solaris DDI_DEV_T_NONE Lint error]*/
19342 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, ha->dip,
19343 "reg", (int *)nreg, (uint_t)(nreg_size / sizeof (int)));
19344
19345 w32 = (uint_t)(nreg_size / sizeof (pci_regspec_t) - 1);
19346 kmem_free((caddr_t)nreg, nreg_size);
19347 }
19348
19349 ddi_prop_free(reg);
19350
19351 /* Map register */
19352 rval = ddi_regs_map_setup(ha->dip, w32, addrp, 0, len,
19353 &ql_dev_acc_attr, handlep);
19354 if (rval != DDI_SUCCESS || *addrp == NULL || *handlep == NULL) {
19355 EL(ha, "regs_map status=%xh, base=%xh, handle=%xh\n",
19356 rval, *addrp, *handlep);
19357 if (*handlep != NULL) {
19358 ddi_regs_map_free(handlep);
19359 *handlep = NULL;
19360 }
19361 }
19362
19363 QL_PRINT_10(ha, "done\n");
19364
19365 return (rval);
19366 }
19367
19368 /*
19369 * ql_intr_lock
19370 * Acquires all interrupt locks.
19371 *
19372 * Input:
19373 * ha: adapter state pointer.
19374 *
19375 * Context:
19376 * Kernel/Interrupt context.
19377 */
19378 void
19379 ql_intr_lock(ql_adapter_state_t *ha)
19380 {
19381 uint16_t cnt;
19382
19383 QL_PRINT_3(ha, "started\n");
19384
19385 if (ha->rsp_queues != NULL) {
19386 for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
19387 if (ha->rsp_queues[cnt] != NULL) {
19388 INDX_INTR_LOCK(ha, cnt);
19389 }
19390 }
19391 }
19392 QL_PRINT_3(ha, "done\n");
19393 }
19394
19395 /*
19396 * ql_intr_unlock
19397 * Releases all interrupt locks.
19398 *
19399 * Input:
19400 * ha: adapter state pointer.
19401 *
19402 * Context:
19403 * Kernel/Interrupt context.
19404 */
19405 void
19406 ql_intr_unlock(ql_adapter_state_t *ha)
19407 {
19408 uint16_t cnt;
19409
19410 QL_PRINT_3(ha, "started\n");
19411
19412 if (ha->rsp_queues != NULL) {
19413 for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
19414 if (ha->rsp_queues[cnt] != NULL) {
19415 INDX_INTR_UNLOCK(ha, cnt);
19416 }
19417 }
19418 }
19419 QL_PRINT_3(ha, "done\n");
19420 }
19421
19422 /*
19423 * ql_completion_thread
19424 * I/O completion thread.
19425 *
19426 * Input:
19427 * arg: port info pointer.
19428 * COMP_Q_LOCK must be acquired prior to call.
19429 *
19430 * Context:
19431 * Kernel context.
19432 */
19433 static void
19434 ql_completion_thread(void *arg)
19435 {
19436 ql_srb_t *sp;
19437 ql_adapter_state_t *ha = arg;
19438
19439 QL_PRINT_3(ha, "started, hsp=%p\n", (void *)&sp);
19440
19441 COMP_Q_LOCK(ha);
19442 ha->comp_thds_active++;
19443 ha->comp_thds_awake++;
19444 while (!(ha->flags & COMP_THD_TERMINATE)) {
19445 /* process completion queue items */
19446 while (ha->comp_q.first != NULL) {
19447 sp = (ha->comp_q.first)->base_address;
19448 /* Remove command from completion queue */
19449 ql_remove_link(&ha->comp_q, &sp->cmd);
19450 COMP_Q_UNLOCK(ha);
19451 QL_PRINT_3(ha, "pkt_comp, sp=%p, pkt_state=%xh, "
19452 "hsp=%p\n", (void*)sp, sp->pkt->pkt_state,
19453 (void *)&sp);
19454 (sp->pkt->pkt_comp)(sp->pkt);
19455 COMP_Q_LOCK(ha);
19456 }
19457 ha->comp_thds_awake--;
19458 QL_PRINT_3(ha, "sleep, hsp=%p\n", (void *)&sp);
19459 cv_wait(&ha->cv_comp_thread, &ha->comp_q_mutex);
19460 QL_PRINT_3(ha, "awoke, hsp=%p\n", (void *)&sp);
19461 }
19462 ha->comp_thds_awake--;
19463 ha->comp_thds_active--;
19464 COMP_Q_UNLOCK(ha);
19465
19466 QL_PRINT_3(ha, "done\n");
19467 }
19468
19469 /*
19470 * ql_io_comp
19471 * Transport I/O completion
19472 *
19473 * Input:
19474 * sp: SRB structure pointer
19475 *
19476 * Context:
19477 * Kernel context.
19478 */
19479 void
19480 ql_io_comp(ql_srb_t *sp)
19481 {
19482 ql_adapter_state_t *ha = sp->ha->pha;
19483
19484 QL_PRINT_3(ha, "started, sp=%ph, d_id=%xh\n", (void*)sp,
19485 sp->pkt->pkt_cmd_fhdr.d_id);
19486
19487 if (sp->pkt->pkt_comp && !ddi_in_panic()) {
19488 QL_PRINT_3(ha, "added to comp_q\n");
19489 COMP_Q_LOCK(ha);
19490 ql_add_link_b(&ha->comp_q, &sp->cmd);
19491 if (ha->comp_thds_awake < ha->comp_thds_active) {
19492 ha->comp_thds_awake++;
19493 QL_PRINT_3(ha, "signal\n");
19494 cv_signal(&ha->cv_comp_thread);
19495 }
19496 COMP_Q_UNLOCK(ha);
19497 }
19498
19499 QL_PRINT_3(ha, "done\n");
19500 }
19501
19502 /*
19503 * ql_process_comp_queue
19504 * Process completion queue entries.
19505 *
19506 * Input:
19507 * arg: adapter state pointer.
19508 *
19509 * Context:
19510 * Kernel context.
19511 */
19512 static void
19513 ql_process_comp_queue(void *arg)
19514 {
19515 ql_srb_t *sp;
19516 ql_adapter_state_t *ha = arg;
19517
19518 QL_PRINT_3(ha, "started\n");
19519
19520 COMP_Q_LOCK(ha);
19521
19522 /* process completion queue items */
19523 while (ha->comp_q.first != NULL) {
19524 sp = (ha->comp_q.first)->base_address;
19525 QL_PRINT_3(ha, "sending comp=0x%p\n", (void *)sp);
19526 /* Remove command from completion queue */
19527 ql_remove_link(&ha->comp_q, &sp->cmd);
19528 COMP_Q_UNLOCK(ha);
19529 (sp->pkt->pkt_comp)(sp->pkt);
19530 COMP_Q_LOCK(ha);
19531 }
19532
19533 COMP_Q_UNLOCK(ha);
19534
19535 QL_PRINT_3(ha, "done\n");
19536 }
19537
19538 /*
19539 * ql_abort_io
19540 * Abort I/O.
19541 *
19542 * Input:
19543 * ha: adapter state pointer.
19544 * sp: SRB pointer.
19545 *
19546 * Returns:
19547 * ql local function return status code.
19548 *
19549 * Context:
19550 * Kernel context.
19551 */
19552 static int
19553 ql_abort_io(ql_adapter_state_t *vha, ql_srb_t *sp)
19554 {
19555 ql_link_t *link;
19556 ql_srb_t *sp2;
19557 ql_tgt_t *tq;
19558 ql_lun_t *lq;
19559 int rval = QL_FUNCTION_FAILED;
19560 ql_adapter_state_t *ha = vha->pha;
19561
19562 QL_PRINT_10(ha, "started, sp=%ph, handle=%xh\n", (void *)sp,
19563 sp->handle);
19564
19565 if ((lq = sp->lun_queue) != NULL) {
19566 tq = lq->target_queue;
19567 } else {
19568 tq = NULL;
19569 }
19570
19571 /* Acquire target queue lock. */
19572 if (tq) {
19573 DEVICE_QUEUE_LOCK(tq);
19574 }
19575 REQUEST_RING_LOCK(ha);
19576
19577 /* If command not already started. */
19578 if (!(sp->flags & SRB_ISP_STARTED)) {
19579 rval = QL_FUNCTION_PARAMETER_ERROR;
19580
19581 /* Check pending queue for command. */
19582 for (link = ha->pending_cmds.first; link != NULL;
19583 link = link->next) {
19584 sp2 = link->base_address;
19585 if (sp2 == sp) {
19586 rval = QL_SUCCESS;
19587 /* Remove srb from pending command queue */
19588 ql_remove_link(&ha->pending_cmds, &sp->cmd);
19589 break;
19590 }
19591 }
19592
19593 if (link == NULL && lq) {
19594 /* Check for cmd on device queue. */
19595 for (link = lq->cmd.first; link != NULL;
19596 link = link->next) {
19597 sp2 = link->base_address;
19598 if (sp2 == sp) {
19599 rval = QL_SUCCESS;
19600 /* Remove srb from device queue. */
19601 ql_remove_link(&lq->cmd, &sp->cmd);
19602 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
19603 break;
19604 }
19605 }
19606 }
19607 }
19608
19609 REQUEST_RING_UNLOCK(ha);
19610 if (tq) {
19611 DEVICE_QUEUE_UNLOCK(tq);
19612 }
19613
19614 if (sp->flags & SRB_ISP_COMPLETED || rval == QL_SUCCESS) {
19615 rval = QL_SUCCESS;
19616 } else {
19617 uint32_t index;
19618
19619 INTR_LOCK(ha);
19620 sp->flags |= SRB_ABORTING;
19621 if (sp->handle != 0) {
19622 index = sp->handle & OSC_INDEX_MASK;
19623 if (ha->outstanding_cmds[index] == sp) {
19624 ha->outstanding_cmds[index] =
19625 QL_ABORTED_SRB(ha);
19626 }
19627 /* Decrement outstanding commands on device. */
19628 if (tq != NULL && tq->outcnt != 0) {
19629 tq->outcnt--;
19630 }
19631 if (lq != NULL && sp->flags & SRB_FCP_CMD_PKT &&
19632 lq->lun_outcnt != 0) {
19633 lq->lun_outcnt--;
19634 }
19635 /* Remove command from watchdog queue. */
19636 if (sp->flags & SRB_WATCHDOG_ENABLED) {
19637 if (tq != NULL) {
19638 ql_remove_link(&tq->wdg, &sp->wdg);
19639 }
19640 sp->flags &= ~SRB_WATCHDOG_ENABLED;
19641 }
19642 INTR_UNLOCK(ha);
19643 (void) ql_abort_command(ha, sp);
19644 sp->handle = 0;
19645 } else {
19646 INTR_UNLOCK(ha);
19647 }
19648 rval = QL_SUCCESS;
19649 }
19650
19651 if (rval != QL_SUCCESS) {
19652 EL(ha, "sp=%p not aborted=%xh\n", (void *)sp, rval);
19653 } else {
19654 /*EMPTY*/
19655 QL_PRINT_10(ha, "done\n");
19656 }
19657 return (rval);
19658 }
19659
19660 /*
19661 * ql_idc
19662 * Inter driver communication thread.
19663 *
19664 * Input:
19665 * ha = adapter state pointer.
19666 *
19667 * Context:
19668 * Kernel context.
19669 */
19670 static void
19671 ql_idc(ql_adapter_state_t *ha)
19672 {
19673 int rval;
19674 uint32_t timer = 300;
19675
19676 QL_PRINT_10(ha, "started\n");
19677
19678 for (;;) {
19679 /* IDC Stall needed. */
19680 if (ha->flags & IDC_STALL_NEEDED) {
19681 ADAPTER_STATE_LOCK(ha);
19682 ha->flags &= ~IDC_STALL_NEEDED;
19683 ADAPTER_STATE_UNLOCK(ha);
19684 TASK_DAEMON_LOCK(ha);
19685 ha->task_daemon_flags |= DRIVER_STALL;
19686 TASK_DAEMON_UNLOCK(ha);
19687 if (LOOP_READY(ha)) {
19688 if ((ha->idc_mb[1] & IDC_TIMEOUT_MASK) <
19689 IDC_TIMEOUT_MASK) {
19690 ha->idc_mb[1] = (uint16_t)
19691 (ha->idc_mb[1] | IDC_TIMEOUT_MASK);
19692 rval = ql_idc_time_extend(ha);
19693 if (rval != QL_SUCCESS) {
19694 EL(ha, "idc_time_extend status"
19695 "=%xh\n", rval);
19696 }
19697 }
19698 (void) ql_wait_outstanding(ha);
19699 }
19700 }
19701
19702 /* IDC ACK needed. */
19703 if (ha->flags & IDC_ACK_NEEDED) {
19704 ADAPTER_STATE_LOCK(ha);
19705 ha->flags &= ~IDC_ACK_NEEDED;
19706 ADAPTER_STATE_UNLOCK(ha);
19707 rval = ql_idc_ack(ha);
19708 if (rval != QL_SUCCESS) {
19709 EL(ha, "idc_ack status=%xh\n", rval);
19710 ADAPTER_STATE_LOCK(ha);
19711 ha->flags |= IDC_RESTART_NEEDED;
19712 ADAPTER_STATE_UNLOCK(ha);
19713 }
19714 }
19715
19716 /* IDC Restart needed. */
19717 if (timer-- == 0 || ha->flags & ADAPTER_SUSPENDED ||
19718 (ha->flags & IDC_RESTART_NEEDED &&
19719 !(ha->flags & LOOPBACK_ACTIVE))) {
19720 ADAPTER_STATE_LOCK(ha);
19721 ha->flags &= ~(IDC_RESTART_NEEDED | IDC_STALL_NEEDED |
19722 IDC_ACK_NEEDED);
19723 ADAPTER_STATE_UNLOCK(ha);
19724 TASK_DAEMON_LOCK(ha);
19725 ha->task_daemon_flags &= ~DRIVER_STALL;
19726 TASK_DAEMON_UNLOCK(ha);
19727 if (LOOP_READY(ha)) {
19728 ql_restart_queues(ha);
19729 }
19730 break;
19731 }
19732 delay(10);
19733 }
19734
19735 QL_PRINT_10(ha, "done\n");
19736 }
19737
19738 /*
19739 * ql_get_lun_addr
19740 * get the lunslun address.
19741 *
19742 * Input:
19743 * tq: target queue pointer.
19744 * lun: the lun number.
19745 *
19746 * Returns:
19747 * the lun address.
19748 *
19749 * Context:
19750 * Interrupt or Kernel context, no mailbox commands allowed.
19751 */
19752 uint64_t
19753 ql_get_lun_addr(ql_tgt_t *tq, uint16_t lun)
19754 {
19755 ql_lun_t *lq;
19756 ql_link_t *link = NULL;
19757 uint64_t lun_addr = 0;
19758 fcp_ent_addr_t *fcp_ent_addr = (fcp_ent_addr_t *)&lun_addr;
19759
19760 /* If the lun queue exists */
19761 if (tq) {
19762 for (link = tq->lun_queues.first; link != NULL;
19763 link = link->next) {
19764 lq = link->base_address;
19765 if (lq->lun_no == lun) {
19766 break;
19767 }
19768 }
19769 }
19770 if (link == NULL) {
19771 /* create an fcp_ent_addr from the lun number */
19772 if (MSB(lun)) {
19773 fcp_ent_addr->ent_addr_0 = CHAR_TO_SHORT(lobyte(lun),
19774 (hibyte(lun) | QL_LUN_AM_FLAT));
19775 } else {
19776 fcp_ent_addr->ent_addr_0 = CHAR_TO_SHORT(lobyte(lun),
19777 hibyte(lun));
19778 }
19779 } else {
19780 lun_addr = lq->lun_addr;
19781 }
19782
19783 return (lun_addr);
19784 }
19785
19786
19787 /*
19788 * ql_83xx_binary_fw_dump
19789 *
19790 * Input:
19791 * ha: adapter state pointer.
19792 * fw: firmware dump context pointer.
19793 *
19794 * Returns:
19795 * ql local function return status code.
19796 *
19797 * Context:
19798 * Interrupt or Kernel context, no mailbox commands allowed.
19799 */
19800 static int
19801 ql_83xx_binary_fw_dump(ql_adapter_state_t *ha, ql_83xx_fw_dump_t *fw)
19802 {
19803 uint32_t *reg32, cnt, *w32ptr, index, *dp;
19804 void *bp;
19805 clock_t timer;
19806 int rv, rval = QL_SUCCESS;
19807
19808 QL_PRINT_3(ha, "started\n");
19809
19810 fw->req_q_size[0] = ha->req_q[0]->req_ring.size;
19811 if (ha->req_q[1] != NULL) {
19812 fw->req_q_size[1] = ha->req_q[1]->req_ring.size;
19813 }
19814 fw->rsp_q_size = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
19815
19816 fw->hccr = RD32_IO_REG(ha, hccr);
19817 fw->r2h_status = RD32_IO_REG(ha, risc2host);
19818 fw->aer_ues = ql_pci_config_get32(ha, 0x104);
19819
19820 /* Disable ISP interrupts. */
19821 ql_disable_intr(ha);
19822
19823 /* Pause RISC. */
19824 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
19825 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
19826 for (timer = 30000;
19827 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
19828 rval == QL_SUCCESS; timer--) {
19829 if (timer) {
19830 drv_usecwait(100);
19831 if (timer % 10000 == 0) {
19832 EL(ha, "risc pause %d\n", timer);
19833 }
19834 } else {
19835 EL(ha, "risc pause timeout\n");
19836 rval = QL_FUNCTION_TIMEOUT;
19837 }
19838 }
19839 }
19840
19841 WRT32_IO_REG(ha, io_base_addr, 0x6000);
19842 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0);
19843 WRT_REG_DWORD(ha, ha->iobase + 0xcc, 0);
19844
19845 WRT32_IO_REG(ha, io_base_addr, 0x6010);
19846 WRT_REG_DWORD(ha, ha->iobase + 0xd4, 0);
19847
19848 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
19849 WRT_REG_DWORD(ha, ha->iobase + 0xf0, 0x60000000);
19850
19851 /* Host Interface registers */
19852
19853 /* HostRisc registers. */
19854 WRT32_IO_REG(ha, io_base_addr, 0x7000);
19855 bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
19856 16, 32);
19857 WRT32_IO_REG(ha, io_base_addr, 0x7010);
19858 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19859 WRT32_IO_REG(ha, io_base_addr, 0x7040);
19860 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19861
19862 /* PCIe registers. */
19863 WRT32_IO_REG(ha, io_base_addr, 0x7c00);
19864 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
19865 bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
19866 3, 32);
19867 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
19868 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
19869
19870 /* Host interface registers. */
19871 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
19872 sizeof (fw->host_reg) / 4, 32);
19873
19874 /* Shadow registers. */
19875
19876 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
19877 RD32_IO_REG(ha, io_base_addr);
19878
19879 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19880 WRT_REG_DWORD(ha, reg32, 0xB0000000);
19881 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19882 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
19883
19884 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19885 WRT_REG_DWORD(ha, reg32, 0xB0100000);
19886 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19887 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
19888
19889 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19890 WRT_REG_DWORD(ha, reg32, 0xB0200000);
19891 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19892 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
19893
19894 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19895 WRT_REG_DWORD(ha, reg32, 0xB0300000);
19896 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19897 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
19898
19899 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19900 WRT_REG_DWORD(ha, reg32, 0xB0400000);
19901 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19902 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
19903
19904 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19905 WRT_REG_DWORD(ha, reg32, 0xB0500000);
19906 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19907 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
19908
19909 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19910 WRT_REG_DWORD(ha, reg32, 0xB0600000);
19911 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19912 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
19913
19914 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19915 WRT_REG_DWORD(ha, reg32, 0xB0700000);
19916 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19917 fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
19918
19919 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19920 WRT_REG_DWORD(ha, reg32, 0xB0800000);
19921 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19922 fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
19923
19924 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19925 WRT_REG_DWORD(ha, reg32, 0xB0900000);
19926 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19927 fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
19928
19929 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19930 WRT_REG_DWORD(ha, reg32, 0xB0A00000);
19931 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19932 fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
19933
19934 /* RISC I/O register. */
19935
19936 WRT32_IO_REG(ha, io_base_addr, 0x0010);
19937 (void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
19938 1, 32);
19939
19940 /* Mailbox registers. */
19941
19942 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
19943 sizeof (fw->mailbox_reg) / 2, 16);
19944
19945 /* Transfer sequence registers. */
19946
19947 /* XSEQ GP */
19948 WRT32_IO_REG(ha, io_base_addr, 0xBE00);
19949 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
19950 16, 32);
19951 WRT32_IO_REG(ha, io_base_addr, 0xBE10);
19952 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19953 WRT32_IO_REG(ha, io_base_addr, 0xBE20);
19954 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19955 WRT32_IO_REG(ha, io_base_addr, 0xBE30);
19956 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19957 WRT32_IO_REG(ha, io_base_addr, 0xBE40);
19958 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19959 WRT32_IO_REG(ha, io_base_addr, 0xBE50);
19960 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19961 WRT32_IO_REG(ha, io_base_addr, 0xBE60);
19962 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19963 WRT32_IO_REG(ha, io_base_addr, 0xBE70);
19964 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19965 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
19966 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19967 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
19968 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19969 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
19970 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19971 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
19972 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19973 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
19974 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19975 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
19976 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19977 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
19978 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19979 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
19980 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19981
19982 /* XSEQ-0 */
19983 WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
19984 bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0, 16, 32);
19985 WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
19986 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19987 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
19988 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19989
19990 /* XSEQ-1 */
19991 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
19992 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
19993 16, 32);
19994
19995 /* XSEQ-2 */
19996 WRT32_IO_REG(ha, io_base_addr, 0xBEF0);
19997 (void) ql_read_regs(ha, fw->xseq_2_reg, ha->iobase + 0xC0,
19998 16, 32);
19999
20000 /* Receive sequence registers. */
20001
20002 /* RSEQ GP */
20003 WRT32_IO_REG(ha, io_base_addr, 0xFE00);
20004 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0, 16, 32);
20005 WRT32_IO_REG(ha, io_base_addr, 0xFE10);
20006 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20007 WRT32_IO_REG(ha, io_base_addr, 0xFE20);
20008 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20009 WRT32_IO_REG(ha, io_base_addr, 0xFE30);
20010 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20011 WRT32_IO_REG(ha, io_base_addr, 0xFE40);
20012 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20013 WRT32_IO_REG(ha, io_base_addr, 0xFE50);
20014 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20015 WRT32_IO_REG(ha, io_base_addr, 0xFE60);
20016 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20017 WRT32_IO_REG(ha, io_base_addr, 0xFE70);
20018 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20019 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
20020 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20021 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
20022 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20023 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
20024 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20025 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
20026 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20027 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
20028 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20029 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
20030 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20031 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
20032 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20033 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
20034 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20035
20036 /* RSEQ-0 */
20037 WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
20038 bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
20039 16, 32);
20040 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
20041 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20042
20043 /* RSEQ-1 */
20044 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
20045 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
20046 sizeof (fw->rseq_1_reg) / 4, 32);
20047
20048 /* RSEQ-2 */
20049 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
20050 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
20051 sizeof (fw->rseq_2_reg) / 4, 32);
20052
20053 /* RSEQ-3 */
20054 WRT32_IO_REG(ha, io_base_addr, 0xFEF0);
20055 (void) ql_read_regs(ha, fw->rseq_3_reg, ha->iobase + 0xC0,
20056 sizeof (fw->rseq_3_reg) / 4, 32);
20057
20058 /* Auxiliary sequencer registers. */
20059
20060 /* ASEQ GP */
20061 WRT32_IO_REG(ha, io_base_addr, 0xB000);
20062 bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0, 16, 32);
20063 WRT32_IO_REG(ha, io_base_addr, 0xB010);
20064 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20065 WRT32_IO_REG(ha, io_base_addr, 0xB020);
20066 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20067 WRT32_IO_REG(ha, io_base_addr, 0xB030);
20068 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20069 WRT32_IO_REG(ha, io_base_addr, 0xB040);
20070 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20071 WRT32_IO_REG(ha, io_base_addr, 0xB050);
20072 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20073 WRT32_IO_REG(ha, io_base_addr, 0xB060);
20074 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20075 WRT32_IO_REG(ha, io_base_addr, 0xB070);
20076 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20077 WRT32_IO_REG(ha, io_base_addr, 0xB100);
20078 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20079 WRT32_IO_REG(ha, io_base_addr, 0xB110);
20080 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20081 WRT32_IO_REG(ha, io_base_addr, 0xB120);
20082 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20083 WRT32_IO_REG(ha, io_base_addr, 0xB130);
20084 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20085 WRT32_IO_REG(ha, io_base_addr, 0xB140);
20086 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20087 WRT32_IO_REG(ha, io_base_addr, 0xB150);
20088 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20089 WRT32_IO_REG(ha, io_base_addr, 0xB160);
20090 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20091 WRT32_IO_REG(ha, io_base_addr, 0xB170);
20092 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20093
20094 /* ASEQ-0 */
20095 WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
20096 bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
20097 16, 32);
20098 WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
20099 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20100
20101 /* ASEQ-1 */
20102 WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
20103 (void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
20104 16, 32);
20105
20106 /* ASEQ-2 */
20107 WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
20108 (void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
20109 16, 32);
20110
20111 /* ASEQ-3 */
20112 WRT32_IO_REG(ha, io_base_addr, 0xB1F0);
20113 (void) ql_read_regs(ha, fw->aseq_3_reg, ha->iobase + 0xC0,
20114 16, 32);
20115
20116 /* Command DMA registers. */
20117
20118 WRT32_IO_REG(ha, io_base_addr, 0x7100);
20119 bp = ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
20120 16, 32);
20121 WRT32_IO_REG(ha, io_base_addr, 0x7120);
20122 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20123 WRT32_IO_REG(ha, io_base_addr, 0x7130);
20124 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20125 WRT32_IO_REG(ha, io_base_addr, 0x71f0);
20126 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20127
20128 /* Queues. */
20129
20130 /* RequestQ0 */
20131 WRT32_IO_REG(ha, io_base_addr, 0x7200);
20132 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
20133 8, 32);
20134 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
20135
20136 /* ResponseQ0 */
20137 WRT32_IO_REG(ha, io_base_addr, 0x7300);
20138 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
20139 8, 32);
20140 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
20141
20142 /* RequestQ1 */
20143 WRT32_IO_REG(ha, io_base_addr, 0x7400);
20144 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
20145 8, 32);
20146 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
20147
20148 /* Transmit DMA registers. */
20149
20150 /* XMT0 */
20151 WRT32_IO_REG(ha, io_base_addr, 0x7600);
20152 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
20153 16, 32);
20154 WRT32_IO_REG(ha, io_base_addr, 0x7610);
20155 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20156
20157 /* XMT1 */
20158 WRT32_IO_REG(ha, io_base_addr, 0x7620);
20159 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
20160 16, 32);
20161 WRT32_IO_REG(ha, io_base_addr, 0x7630);
20162 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20163
20164 /* XMT2 */
20165 WRT32_IO_REG(ha, io_base_addr, 0x7640);
20166 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
20167 16, 32);
20168 WRT32_IO_REG(ha, io_base_addr, 0x7650);
20169 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20170
20171 /* XMT3 */
20172 WRT32_IO_REG(ha, io_base_addr, 0x7660);
20173 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
20174 16, 32);
20175 WRT32_IO_REG(ha, io_base_addr, 0x7670);
20176 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20177
20178 /* XMT4 */
20179 WRT32_IO_REG(ha, io_base_addr, 0x7680);
20180 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
20181 16, 32);
20182 WRT32_IO_REG(ha, io_base_addr, 0x7690);
20183 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20184
20185 /* XMT Common */
20186 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
20187 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
20188 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
20189
20190 /* Receive DMA registers. */
20191
20192 /* RCVThread0 */
20193 WRT32_IO_REG(ha, io_base_addr, 0x7700);
20194 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
20195 ha->iobase + 0xC0, 16, 32);
20196 WRT32_IO_REG(ha, io_base_addr, 0x7710);
20197 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20198
20199 /* RCVThread1 */
20200 WRT32_IO_REG(ha, io_base_addr, 0x7720);
20201 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
20202 ha->iobase + 0xC0, 16, 32);
20203 WRT32_IO_REG(ha, io_base_addr, 0x7730);
20204 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20205
20206 /* RISC registers. */
20207
20208 /* RISC GP */
20209 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
20210 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0, 16, 32);
20211 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
20212 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20213 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
20214 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20215 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
20216 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20217 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
20218 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20219 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
20220 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20221 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
20222 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20223 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
20224 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20225
20226 /* Local memory controller (LMC) registers. */
20227
20228 /* LMC */
20229 WRT32_IO_REG(ha, io_base_addr, 0x3000);
20230 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0, 16, 32);
20231 WRT32_IO_REG(ha, io_base_addr, 0x3010);
20232 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20233 WRT32_IO_REG(ha, io_base_addr, 0x3020);
20234 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20235 WRT32_IO_REG(ha, io_base_addr, 0x3030);
20236 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20237 WRT32_IO_REG(ha, io_base_addr, 0x3040);
20238 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20239 WRT32_IO_REG(ha, io_base_addr, 0x3050);
20240 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20241 WRT32_IO_REG(ha, io_base_addr, 0x3060);
20242 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20243 WRT32_IO_REG(ha, io_base_addr, 0x3070);
20244 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20245
20246 /* Fibre Protocol Module registers. */
20247
20248 /* FPM hardware */
20249 WRT32_IO_REG(ha, io_base_addr, 0x4000);
20250 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0, 16, 32);
20251 WRT32_IO_REG(ha, io_base_addr, 0x4010);
20252 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20253 WRT32_IO_REG(ha, io_base_addr, 0x4020);
20254 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20255 WRT32_IO_REG(ha, io_base_addr, 0x4030);
20256 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20257 WRT32_IO_REG(ha, io_base_addr, 0x4040);
20258 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20259 WRT32_IO_REG(ha, io_base_addr, 0x4050);
20260 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20261 WRT32_IO_REG(ha, io_base_addr, 0x4060);
20262 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20263 WRT32_IO_REG(ha, io_base_addr, 0x4070);
20264 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20265 WRT32_IO_REG(ha, io_base_addr, 0x4080);
20266 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20267 WRT32_IO_REG(ha, io_base_addr, 0x4090);
20268 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20269 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
20270 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20271 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
20272 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20273 WRT32_IO_REG(ha, io_base_addr, 0x40C0);
20274 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20275 WRT32_IO_REG(ha, io_base_addr, 0x40D0);
20276 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20277 WRT32_IO_REG(ha, io_base_addr, 0x40E0);
20278 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20279 WRT32_IO_REG(ha, io_base_addr, 0x40F0);
20280 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20281
20282 /* Pointer arrays registers */
20283
20284 /* RQ0 Array registers. */
20285 WRT32_IO_REG(ha, io_base_addr, 0x5C00);
20286 bp = ql_read_regs(ha, fw->rq0_array_reg, ha->iobase + 0xC0,
20287 16, 32);
20288 WRT32_IO_REG(ha, io_base_addr, 0x5C10);
20289 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20290 WRT32_IO_REG(ha, io_base_addr, 0x5C20);
20291 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20292 WRT32_IO_REG(ha, io_base_addr, 0x5C30);
20293 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20294 WRT32_IO_REG(ha, io_base_addr, 0x5C40);
20295 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20296 WRT32_IO_REG(ha, io_base_addr, 0x5C50);
20297 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20298 WRT32_IO_REG(ha, io_base_addr, 0x5C60);
20299 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20300 WRT32_IO_REG(ha, io_base_addr, 0x5C70);
20301 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20302 WRT32_IO_REG(ha, io_base_addr, 0x5C80);
20303 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20304 WRT32_IO_REG(ha, io_base_addr, 0x5C90);
20305 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20306 WRT32_IO_REG(ha, io_base_addr, 0x5CA0);
20307 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20308 WRT32_IO_REG(ha, io_base_addr, 0x5CB0);
20309 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20310 WRT32_IO_REG(ha, io_base_addr, 0x5CC0);
20311 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20312 WRT32_IO_REG(ha, io_base_addr, 0x5CD0);
20313 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20314 WRT32_IO_REG(ha, io_base_addr, 0x5CE0);
20315 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20316 WRT32_IO_REG(ha, io_base_addr, 0x5CF0);
20317 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20318
20319 /* RQ1 Array registers. */
20320 WRT32_IO_REG(ha, io_base_addr, 0x5D00);
20321 bp = ql_read_regs(ha, fw->rq1_array_reg, ha->iobase + 0xC0, 16, 32);
20322 WRT32_IO_REG(ha, io_base_addr, 0x5D10);
20323 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20324 WRT32_IO_REG(ha, io_base_addr, 0x5D20);
20325 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20326 WRT32_IO_REG(ha, io_base_addr, 0x5D30);
20327 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20328 WRT32_IO_REG(ha, io_base_addr, 0x5D40);
20329 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20330 WRT32_IO_REG(ha, io_base_addr, 0x5D50);
20331 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20332 WRT32_IO_REG(ha, io_base_addr, 0x5D60);
20333 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20334 WRT32_IO_REG(ha, io_base_addr, 0x5D70);
20335 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20336 WRT32_IO_REG(ha, io_base_addr, 0x5D80);
20337 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20338 WRT32_IO_REG(ha, io_base_addr, 0x5D90);
20339 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20340 WRT32_IO_REG(ha, io_base_addr, 0x5DA0);
20341 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20342 WRT32_IO_REG(ha, io_base_addr, 0x5DB0);
20343 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20344 WRT32_IO_REG(ha, io_base_addr, 0x5DC0);
20345 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20346 WRT32_IO_REG(ha, io_base_addr, 0x5DD0);
20347 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20348 WRT32_IO_REG(ha, io_base_addr, 0x5DE0);
20349 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20350 WRT32_IO_REG(ha, io_base_addr, 0x5DF0);
20351 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20352
20353 /* RP0 Array registers. */
20354 WRT32_IO_REG(ha, io_base_addr, 0x5E00);
20355 bp = ql_read_regs(ha, fw->rp0_array_reg, ha->iobase + 0xC0, 16, 32);
20356 WRT32_IO_REG(ha, io_base_addr, 0x5E10);
20357 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20358 WRT32_IO_REG(ha, io_base_addr, 0x5E20);
20359 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20360 WRT32_IO_REG(ha, io_base_addr, 0x5E30);
20361 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20362 WRT32_IO_REG(ha, io_base_addr, 0x5E40);
20363 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20364 WRT32_IO_REG(ha, io_base_addr, 0x5E50);
20365 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20366 WRT32_IO_REG(ha, io_base_addr, 0x5E60);
20367 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20368 WRT32_IO_REG(ha, io_base_addr, 0x5E70);
20369 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20370 WRT32_IO_REG(ha, io_base_addr, 0x5E80);
20371 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20372 WRT32_IO_REG(ha, io_base_addr, 0x5E90);
20373 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20374 WRT32_IO_REG(ha, io_base_addr, 0x5EA0);
20375 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20376 WRT32_IO_REG(ha, io_base_addr, 0x5EB0);
20377 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20378 WRT32_IO_REG(ha, io_base_addr, 0x5EC0);
20379 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20380 WRT32_IO_REG(ha, io_base_addr, 0x5ED0);
20381 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20382 WRT32_IO_REG(ha, io_base_addr, 0x5EE0);
20383 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20384 WRT32_IO_REG(ha, io_base_addr, 0x5EF0);
20385 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20386
20387 /* RP1 Array registers. */
20388 WRT32_IO_REG(ha, io_base_addr, 0x5F00);
20389 bp = ql_read_regs(ha, fw->rp1_array_reg, ha->iobase + 0xC0, 16, 32);
20390 WRT32_IO_REG(ha, io_base_addr, 0x5F10);
20391 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20392 WRT32_IO_REG(ha, io_base_addr, 0x5F20);
20393 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20394 WRT32_IO_REG(ha, io_base_addr, 0x5F30);
20395 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20396 WRT32_IO_REG(ha, io_base_addr, 0x5F40);
20397 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20398 WRT32_IO_REG(ha, io_base_addr, 0x5F50);
20399 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20400 WRT32_IO_REG(ha, io_base_addr, 0x5F60);
20401 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20402 WRT32_IO_REG(ha, io_base_addr, 0x5F70);
20403 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20404 WRT32_IO_REG(ha, io_base_addr, 0x5F80);
20405 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20406 WRT32_IO_REG(ha, io_base_addr, 0x5F90);
20407 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20408 WRT32_IO_REG(ha, io_base_addr, 0x5FA0);
20409 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20410 WRT32_IO_REG(ha, io_base_addr, 0x5FB0);
20411 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20412 WRT32_IO_REG(ha, io_base_addr, 0x5FC0);
20413 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20414 WRT32_IO_REG(ha, io_base_addr, 0x5FD0);
20415 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20416 WRT32_IO_REG(ha, io_base_addr, 0x5FE0);
20417 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20418 WRT32_IO_REG(ha, io_base_addr, 0x5FF0);
20419 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20420
20421 /* AT0 Array Registers */
20422 WRT32_IO_REG(ha, io_base_addr, 0x7080);
20423 bp = ql_read_regs(ha, fw->ato_array_reg, ha->iobase + 0xC0, 16, 32);
20424 WRT32_IO_REG(ha, io_base_addr, 0x7090);
20425 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20426 WRT32_IO_REG(ha, io_base_addr, 0x70A0);
20427 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20428 WRT32_IO_REG(ha, io_base_addr, 0x70B0);
20429 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20430 WRT32_IO_REG(ha, io_base_addr, 0x70C0);
20431 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20432 WRT32_IO_REG(ha, io_base_addr, 0x70D0);
20433 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20434 WRT32_IO_REG(ha, io_base_addr, 0x70E0);
20435 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20436 WRT32_IO_REG(ha, io_base_addr, 0x70F0);
20437 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20438
20439 /* I/O queue control registers */
20440
20441 /* Queue Control Registers. */
20442 WRT32_IO_REG(ha, io_base_addr, 0x7800);
20443 (void) ql_read_regs(ha, fw->queue_control_reg, ha->iobase + 0xC0,
20444 16, 32);
20445
20446 /* Frame Buffer registers. */
20447
20448 /* FB hardware */
20449 WRT32_IO_REG(ha, io_base_addr, 0x6000);
20450 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0, 16, 32);
20451 WRT32_IO_REG(ha, io_base_addr, 0x6010);
20452 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20453 WRT32_IO_REG(ha, io_base_addr, 0x6020);
20454 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20455 WRT32_IO_REG(ha, io_base_addr, 0x6030);
20456 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20457 WRT32_IO_REG(ha, io_base_addr, 0x6040);
20458 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20459 WRT32_IO_REG(ha, io_base_addr, 0x6060);
20460 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20461 WRT32_IO_REG(ha, io_base_addr, 0x6070);
20462 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20463 WRT32_IO_REG(ha, io_base_addr, 0x6100);
20464 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20465 WRT32_IO_REG(ha, io_base_addr, 0x6130);
20466 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20467 WRT32_IO_REG(ha, io_base_addr, 0x6150);
20468 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20469 WRT32_IO_REG(ha, io_base_addr, 0x6170);
20470 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20471 WRT32_IO_REG(ha, io_base_addr, 0x6190);
20472 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20473 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
20474 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20475 WRT32_IO_REG(ha, io_base_addr, 0x61C0);
20476 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20477 WRT32_IO_REG(ha, io_base_addr, 0x6530);
20478 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20479 WRT32_IO_REG(ha, io_base_addr, 0x6540);
20480 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20481 WRT32_IO_REG(ha, io_base_addr, 0x6550);
20482 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20483 WRT32_IO_REG(ha, io_base_addr, 0x6560);
20484 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20485 WRT32_IO_REG(ha, io_base_addr, 0x6570);
20486 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20487 WRT32_IO_REG(ha, io_base_addr, 0x6580);
20488 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20489 WRT32_IO_REG(ha, io_base_addr, 0x6590);
20490 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20491 WRT32_IO_REG(ha, io_base_addr, 0x65A0);
20492 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20493 WRT32_IO_REG(ha, io_base_addr, 0x65B0);
20494 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20495 WRT32_IO_REG(ha, io_base_addr, 0x65C0);
20496 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20497 WRT32_IO_REG(ha, io_base_addr, 0x65D0);
20498 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20499 WRT32_IO_REG(ha, io_base_addr, 0x65E0);
20500 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20501 WRT32_IO_REG(ha, io_base_addr, 0x6F00);
20502 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20503
20504 /* Get the Queue Pointers */
20505 dp = fw->req_rsp_ext_mem;
20506 for (index = 0; index < ha->rsp_queues_cnt; index++) {
20507 if (index == 0) {
20508 *dp = RD32_MBAR_REG(ha, ha->req_q[0]->mbar_req_in);
20509 LITTLE_ENDIAN_32(dp);
20510 dp++;
20511 *dp = RD32_MBAR_REG(ha, ha->req_q[0]->mbar_req_out);
20512 LITTLE_ENDIAN_32(dp);
20513 dp++;
20514 } else if (index == 1) {
20515 *dp = RD32_MBAR_REG(ha, ha->req_q[1]->mbar_req_in);
20516 LITTLE_ENDIAN_32(dp);
20517 dp++;
20518 *dp = RD32_MBAR_REG(ha, ha->req_q[1]->mbar_req_out);
20519 LITTLE_ENDIAN_32(dp);
20520 dp++;
20521 } else {
20522 *dp++ = 0;
20523 *dp++ = 0;
20524 }
20525 *dp = RD32_MBAR_REG(ha, ha->rsp_queues[index]->mbar_rsp_in);
20526 LITTLE_ENDIAN_32(dp);
20527 dp++;
20528 *dp = RD32_MBAR_REG(ha, ha->rsp_queues[index]->mbar_rsp_out);
20529 LITTLE_ENDIAN_32(dp);
20530 dp++;
20531 }
20532
20533 /* Get the request queue */
20534 (void) ddi_dma_sync(ha->req_q[0]->req_ring.dma_handle, 0, 0,
20535 DDI_DMA_SYNC_FORCPU);
20536 w32ptr = (uint32_t *)ha->req_q[0]->req_ring.bp;
20537 for (cnt = 0; cnt < fw->req_q_size[0] / 4; cnt++) {
20538 *dp = *w32ptr++;
20539 LITTLE_ENDIAN_32(dp);
20540 dp++;
20541 }
20542 if (ha->req_q[1] != NULL) {
20543 (void) ddi_dma_sync(ha->req_q[1]->req_ring.dma_handle, 0, 0,
20544 DDI_DMA_SYNC_FORCPU);
20545 w32ptr = (uint32_t *)ha->req_q[1]->req_ring.bp;
20546 for (cnt = 0; cnt < fw->req_q_size[1] / 4; cnt++) {
20547 *dp = *w32ptr++;
20548 LITTLE_ENDIAN_32(dp);
20549 dp++;
20550 }
20551 }
20552
20553 /* Get the response queues */
20554 for (index = 0; index < ha->rsp_queues_cnt; index++) {
20555 (void) ddi_dma_sync(ha->rsp_queues[index]->rsp_ring.dma_handle,
20556 0, 0, DDI_DMA_SYNC_FORCPU);
20557 w32ptr = (uint32_t *)ha->rsp_queues[index]->rsp_ring.bp;
20558 for (cnt = 0; cnt < ha->rsp_queues[index]->rsp_ring.size / 4;
20559 cnt++) {
20560 *dp = *w32ptr++;
20561 LITTLE_ENDIAN_32(dp);
20562 dp++;
20563 }
20564 }
20565
20566 /* Reset RISC. */
20567 ql_reset_chip(ha);
20568
20569 /* Code RAM. */
20570 rv = ql_read_risc_ram(ha, 0x20000, sizeof (fw->code_ram) / 4,
20571 fw->code_ram);
20572 if (rval == QL_SUCCESS) {
20573 rval = rv;
20574 }
20575 rv = ql_read_risc_ram(ha, 0x100000,
20576 ha->fw_ext_memory_size / 4, dp);
20577 if (rval == QL_SUCCESS) {
20578 rval = rv;
20579 }
20580
20581 /* Get the extended trace buffer */
20582 if (ha->fwexttracebuf.dma_handle != NULL) {
20583 /* Sync DMA buffer. */
20584 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
20585 FWEXTSIZE, DDI_DMA_SYNC_FORCPU);
20586
20587 w32ptr = ha->fwexttracebuf.bp;
20588 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
20589 fw->ext_trace_buf[cnt] = *w32ptr++;
20590 }
20591 }
20592
20593 /* Get the FC event trace buffer */
20594 if (ha->fwfcetracebuf.dma_handle != NULL) {
20595 /* Sync DMA buffer. */
20596 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
20597 FWFCESIZE, DDI_DMA_SYNC_FORCPU);
20598
20599 w32ptr = ha->fwfcetracebuf.bp;
20600 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
20601 fw->fce_trace_buf[cnt] = *w32ptr++;
20602 }
20603 }
20604
20605 if (rval != QL_SUCCESS) {
20606 EL(ha, "failed, rval = %xh\n", rval);
20607 } else {
20608 /*EMPTY*/
20609 QL_PRINT_10(ha, "done\n");
20610 }
20611 return (QL_SUCCESS);
20612 }
20613
20614 /*
20615 * ql_83xx_ascii_fw_dump
20616 * Converts ISP83xx firmware binary dump to ascii.
20617 *
20618 * Input:
20619 * ha = adapter state pointer.
20620 * bptr = buffer pointer.
20621 *
20622 * Returns:
20623 * Amount of data buffer used.
20624 *
20625 * Context:
20626 * Kernel context.
20627 */
20628 static size_t
20629 ql_83xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
20630 {
20631 uint32_t cnt, cnt1, len, *dp, *dp2;
20632 caddr_t bp = bufp;
20633 ql_83xx_fw_dump_t *fw = ha->ql_dump_ptr;
20634
20635 QL_PRINT_3(ha, "started\n");
20636
20637 if ((len = ha->risc_dump_size) == 0) {
20638 QL_PRINT_10(ha, "no buffer\n");
20639 return (0);
20640 }
20641 (void) snprintf(bp, len, "\nISP FW Version %d.%02d.%02d Attributes "
20642 "%X\n", ha->fw_major_version, ha->fw_minor_version,
20643 ha->fw_subminor_version, ha->fw_attributes);
20644 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20645 return (strlen(bufp));
20646 }
20647
20648 (void) snprintf(bp, len, "\nHCCR Register\n%08x\n", fw->hccr);
20649 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20650 return (strlen(bufp));
20651 }
20652
20653 (void) snprintf(bp, len, "\nR2H Status Register\n%08x\n",
20654 fw->r2h_status);
20655 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20656 return (strlen(bufp));
20657 }
20658
20659 (void) snprintf(bp, len,
20660 "\nAER Uncorrectable Error Status Register\n%08x\n", fw->aer_ues);
20661 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20662 return (strlen(bufp));
20663 }
20664
20665 (void) snprintf(bp, len, "\nHostRisc Registers");
20666 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20667 return (strlen(bufp));
20668 }
20669 for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
20670 if (cnt % 8 == 0) {
20671 (void) snprintf(bp, len, "\n");
20672 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20673 return (strlen(bufp));
20674 }
20675 }
20676 (void) snprintf(bp, len, "%08x ", fw->hostrisc_reg[cnt]);
20677 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20678 return (strlen(bufp));
20679 }
20680 }
20681
20682 (void) snprintf(bp, len, "\n\nPCIe Registers");
20683 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20684 return (strlen(bufp));
20685 }
20686 for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
20687 if (cnt % 8 == 0) {
20688 (void) snprintf(bp, len, "\n");
20689 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20690 return (strlen(bufp));
20691 }
20692 }
20693 (void) snprintf(bp, len, "%08x ", fw->pcie_reg[cnt]);
20694 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20695 return (strlen(bufp));
20696 }
20697 }
20698
20699 dp = fw->req_rsp_ext_mem;
20700 for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
20701 (void) snprintf(bp, len, "\n\nQueue Pointers #%d:\n", cnt);
20702 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20703 return (strlen(bufp));
20704 }
20705 for (cnt1 = 0; cnt1 < 4; cnt1++) {
20706 (void) snprintf(bp, len, "%08x ", *dp++);
20707 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20708 return (strlen(bufp));
20709 }
20710 }
20711 }
20712
20713 (void) snprintf(bp, len, "\n\nHost Interface Registers");
20714 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20715 return (strlen(bufp));
20716 }
20717 for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
20718 if (cnt % 8 == 0) {
20719 (void) snprintf(bp, len, "\n");
20720 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20721 return (strlen(bufp));
20722 }
20723 }
20724 (void) snprintf(bp, len, "%08x ", fw->host_reg[cnt]);
20725 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20726 return (strlen(bufp));
20727 }
20728 }
20729
20730 (void) snprintf(bp, len, "\n\nShadow Registers");
20731 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20732 return (strlen(bufp));
20733 }
20734 for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
20735 if (cnt % 8 == 0) {
20736 (void) snprintf(bp, len, "\n");
20737 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20738 return (strlen(bufp));
20739 }
20740 }
20741 (void) snprintf(bp, len, "%08x ", fw->shadow_reg[cnt]);
20742 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20743 return (strlen(bufp));
20744 }
20745 }
20746
20747 (void) snprintf(bp, len, "\n\nRISC IO Register\n%08x", fw->risc_io);
20748 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20749 return (strlen(bufp));
20750 }
20751
20752 (void) snprintf(bp, len, "\n\nMailbox Registers");
20753 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20754 return (strlen(bufp));
20755 }
20756 for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
20757 if (cnt % 16 == 0) {
20758 (void) snprintf(bp, len, "\n");
20759 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20760 return (strlen(bufp));
20761 }
20762 }
20763 (void) snprintf(bp, len, "%04x ", fw->mailbox_reg[cnt]);
20764 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20765 return (strlen(bufp));
20766 }
20767 }
20768
20769 (void) snprintf(bp, len, "\n\nXSEQ GP Registers");
20770 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20771 return (strlen(bufp));
20772 }
20773 for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
20774 if (cnt % 8 == 0) {
20775 (void) snprintf(bp, len, "\n");
20776 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20777 return (strlen(bufp));
20778 }
20779 }
20780 (void) snprintf(bp, len, "%08x ", fw->xseq_gp_reg[cnt]);
20781 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20782 return (strlen(bufp));
20783 }
20784 }
20785
20786 (void) snprintf(bp, len, "\n\nXSEQ-0 Registers");
20787 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20788 return (strlen(bufp));
20789 }
20790 for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
20791 if (cnt % 8 == 0) {
20792 (void) snprintf(bp, len, "\n");
20793 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20794 return (strlen(bufp));
20795 }
20796 }
20797 (void) snprintf(bp, len, "%08x ", fw->xseq_0_reg[cnt]);
20798 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20799 return (strlen(bufp));
20800 }
20801 }
20802
20803 (void) snprintf(bp, len, "\n\nXSEQ-1 Registers");
20804 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20805 return (strlen(bufp));
20806 }
20807 for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
20808 if (cnt % 8 == 0) {
20809 (void) snprintf(bp, len, "\n");
20810 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20811 return (strlen(bufp));
20812 }
20813 }
20814 (void) snprintf(bp, len, "%08x ", fw->xseq_1_reg[cnt]);
20815 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20816 return (strlen(bufp));
20817 }
20818 }
20819
20820 (void) snprintf(bp, len, "\n\nXSEQ-2 Registers");
20821 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20822 return (strlen(bufp));
20823 }
20824 for (cnt = 0; cnt < sizeof (fw->xseq_2_reg) / 4; cnt++) {
20825 if (cnt % 8 == 0) {
20826 (void) snprintf(bp, len, "\n");
20827 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20828 return (strlen(bufp));
20829 }
20830 }
20831 (void) snprintf(bp, len, "%08x ", fw->xseq_2_reg[cnt]);
20832 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20833 return (strlen(bufp));
20834 }
20835 }
20836
20837 (void) snprintf(bp, len, "\n\nRSEQ GP Registers");
20838 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20839 return (strlen(bufp));
20840 }
20841 for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
20842 if (cnt % 8 == 0) {
20843 (void) snprintf(bp, len, "\n");
20844 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20845 return (strlen(bufp));
20846 }
20847 }
20848 (void) snprintf(bp, len, "%08x ", fw->rseq_gp_reg[cnt]);
20849 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20850 return (strlen(bufp));
20851 }
20852 }
20853
20854 (void) snprintf(bp, len, "\n\nRSEQ-0 Registers");
20855 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20856 return (strlen(bufp));
20857 }
20858 for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
20859 if (cnt % 8 == 0) {
20860 (void) snprintf(bp, len, "\n");
20861 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20862 return (strlen(bufp));
20863 }
20864 }
20865 (void) snprintf(bp, len, "%08x ", fw->rseq_0_reg[cnt]);
20866 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20867 return (strlen(bufp));
20868 }
20869 }
20870
20871 (void) snprintf(bp, len, "\n\nRSEQ-1 Registers");
20872 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20873 return (strlen(bufp));
20874 }
20875 for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
20876 if (cnt % 8 == 0) {
20877 (void) snprintf(bp, len, "\n");
20878 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20879 return (strlen(bufp));
20880 }
20881 }
20882 (void) snprintf(bp, len, "%08x ", fw->rseq_1_reg[cnt]);
20883 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20884 return (strlen(bufp));
20885 }
20886 }
20887
20888 (void) snprintf(bp, len, "\n\nRSEQ-2 Registers");
20889 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20890 return (strlen(bufp));
20891 }
20892 for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
20893 if (cnt % 8 == 0) {
20894 (void) snprintf(bp, len, "\n");
20895 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20896 return (strlen(bufp));
20897 }
20898 }
20899 (void) snprintf(bp, len, "%08x ", fw->rseq_2_reg[cnt]);
20900 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20901 return (strlen(bufp));
20902 }
20903 }
20904
20905 (void) snprintf(bp, len, "\n\nRSEQ-3 Registers");
20906 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20907 return (strlen(bufp));
20908 }
20909 for (cnt = 0; cnt < sizeof (fw->rseq_3_reg) / 4; cnt++) {
20910 if (cnt % 8 == 0) {
20911 (void) snprintf(bp, len, "\n");
20912 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20913 return (strlen(bufp));
20914 }
20915 }
20916 (void) snprintf(bp, len, "%08x ", fw->rseq_3_reg[cnt]);
20917 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20918 return (strlen(bufp));
20919 }
20920 }
20921
20922 (void) snprintf(bp, len, "\n\nASEQ GP Registers");
20923 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20924 return (strlen(bufp));
20925 }
20926 for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
20927 if (cnt % 8 == 0) {
20928 (void) snprintf(bp, len, "\n");
20929 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20930 return (strlen(bufp));
20931 }
20932 }
20933 (void) snprintf(bp, len, "%08x ", fw->aseq_gp_reg[cnt]);
20934 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20935 return (strlen(bufp));
20936 }
20937 }
20938
20939 (void) snprintf(bp, len, "\n\nASEQ-0 Registers");
20940 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20941 return (strlen(bufp));
20942 }
20943 for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
20944 if (cnt % 8 == 0) {
20945 (void) snprintf(bp, len, "\n");
20946 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20947 return (strlen(bufp));
20948 }
20949 }
20950 (void) snprintf(bp, len, "%08x ", fw->aseq_0_reg[cnt]);
20951 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20952 return (strlen(bufp));
20953 }
20954 }
20955
20956 (void) snprintf(bp, len, "\n\nASEQ-1 Registers");
20957 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20958 return (strlen(bufp));
20959 }
20960 for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
20961 if (cnt % 8 == 0) {
20962 (void) snprintf(bp, len, "\n");
20963 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20964 return (strlen(bufp));
20965 }
20966 }
20967 (void) snprintf(bp, len, "%08x ", fw->aseq_1_reg[cnt]);
20968 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20969 return (strlen(bufp));
20970 }
20971 }
20972
20973 (void) snprintf(bp, len, "\n\nASEQ-2 Registers");
20974 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20975 return (strlen(bufp));
20976 }
20977 for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
20978 if (cnt % 8 == 0) {
20979 (void) snprintf(bp, len, "\n");
20980 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20981 return (strlen(bufp));
20982 }
20983 }
20984 (void) snprintf(bp, len, "%08x ", fw->aseq_2_reg[cnt]);
20985 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20986 return (strlen(bufp));
20987 }
20988 }
20989
20990 (void) snprintf(bp, len, "\n\nASEQ-3 Registers");
20991 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20992 return (strlen(bufp));
20993 }
20994 for (cnt = 0; cnt < sizeof (fw->aseq_3_reg) / 4; cnt++) {
20995 if (cnt % 8 == 0) {
20996 (void) snprintf(bp, len, "\n");
20997 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20998 return (strlen(bufp));
20999 }
21000 }
21001 (void) snprintf(bp, len, "%08x ", fw->aseq_3_reg[cnt]);
21002 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21003 return (strlen(bufp));
21004 }
21005 }
21006
21007 (void) snprintf(bp, len, "\n\nCommand DMA Registers");
21008 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21009 return (strlen(bufp));
21010 }
21011 for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
21012 if (cnt % 8 == 0) {
21013 (void) snprintf(bp, len, "\n");
21014 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21015 return (strlen(bufp));
21016 }
21017 }
21018 (void) snprintf(bp, len, "%08x ", fw->cmd_dma_reg[cnt]);
21019 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21020 return (strlen(bufp));
21021 }
21022 }
21023
21024 (void) snprintf(bp, len, "\n\nRequest0 Queue DMA Channel Registers");
21025 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21026 return (strlen(bufp));
21027 }
21028 for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
21029 if (cnt % 8 == 0) {
21030 (void) snprintf(bp, len, "\n");
21031 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21032 return (strlen(bufp));
21033 }
21034 }
21035 (void) snprintf(bp, len, "%08x ", fw->req0_dma_reg[cnt]);
21036 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21037 return (strlen(bufp));
21038 }
21039 }
21040
21041 (void) snprintf(bp, len, "\n\nResponse0 Queue DMA Channel Registers");
21042 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21043 return (strlen(bufp));
21044 }
21045 for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
21046 if (cnt % 8 == 0) {
21047 (void) snprintf(bp, len, "\n");
21048 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21049 return (strlen(bufp));
21050 }
21051 }
21052 (void) snprintf(bp, len, "%08x ", fw->resp0_dma_reg[cnt]);
21053 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21054 return (strlen(bufp));
21055 }
21056 }
21057
21058 (void) snprintf(bp, len, "\n\nRequest1 Queue DMA Channel Registers");
21059 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21060 return (strlen(bufp));
21061 }
21062 for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
21063 if (cnt % 8 == 0) {
21064 (void) snprintf(bp, len, "\n");
21065 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21066 return (strlen(bufp));
21067 }
21068 }
21069 (void) snprintf(bp, len, "%08x ", fw->req1_dma_reg[cnt]);
21070 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21071 return (strlen(bufp));
21072 }
21073 }
21074
21075 (void) snprintf(bp, len, "\n\nXMT0 Data DMA Registers");
21076 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21077 return (strlen(bufp));
21078 }
21079 for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
21080 if (cnt % 8 == 0) {
21081 (void) snprintf(bp, len, "\n");
21082 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21083 return (strlen(bufp));
21084 }
21085 }
21086 (void) snprintf(bp, len, "%08x ", fw->xmt0_dma_reg[cnt]);
21087 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21088 return (strlen(bufp));
21089 }
21090 }
21091
21092 (void) snprintf(bp, len, "\n\nXMT1 Data DMA Registers");
21093 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21094 return (strlen(bufp));
21095 }
21096 for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
21097 if (cnt % 8 == 0) {
21098 (void) snprintf(bp, len, "\n");
21099 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21100 return (strlen(bufp));
21101 }
21102 }
21103 (void) snprintf(bp, len, "%08x ", fw->xmt1_dma_reg[cnt]);
21104 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21105 return (strlen(bufp));
21106 }
21107 }
21108
21109 (void) snprintf(bp, len, "\n\nXMT2 Data DMA Registers");
21110 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21111 return (strlen(bufp));
21112 }
21113 for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
21114 if (cnt % 8 == 0) {
21115 (void) snprintf(bp, len, "\n");
21116 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21117 return (strlen(bufp));
21118 }
21119 }
21120 (void) snprintf(bp, len, "%08x ", fw->xmt2_dma_reg[cnt]);
21121 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21122 return (strlen(bufp));
21123 }
21124 }
21125
21126 (void) snprintf(bp, len, "\n\nXMT3 Data DMA Registers");
21127 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21128 return (strlen(bufp));
21129 }
21130 for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
21131 if (cnt % 8 == 0) {
21132 (void) snprintf(bp, len, "\n");
21133 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21134 return (strlen(bufp));
21135 }
21136 }
21137 (void) snprintf(bp, len, "%08x ", fw->xmt3_dma_reg[cnt]);
21138 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21139 return (strlen(bufp));
21140 }
21141 }
21142
21143 (void) snprintf(bp, len, "\n\nXMT4 Data DMA Registers");
21144 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21145 return (strlen(bufp));
21146 }
21147 for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
21148 if (cnt % 8 == 0) {
21149 (void) snprintf(bp, len, "\n");
21150 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21151 return (strlen(bufp));
21152 }
21153 }
21154 (void) snprintf(bp, len, "%08x ", fw->xmt4_dma_reg[cnt]);
21155 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21156 return (strlen(bufp));
21157 }
21158 }
21159
21160 (void) snprintf(bp, len, "\n\nXMT Data DMA Common Registers");
21161 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21162 return (strlen(bufp));
21163 }
21164 for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
21165 if (cnt % 8 == 0) {
21166 (void) snprintf(bp, len, "\n");
21167 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21168 return (strlen(bufp));
21169 }
21170 }
21171 (void) snprintf(bp, len, "%08x ", fw->xmt_data_dma_reg[cnt]);
21172 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21173 return (strlen(bufp));
21174 }
21175 }
21176
21177 (void) snprintf(bp, len, "\n\nRCV Thread 0 Data DMA Registers");
21178 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21179 return (strlen(bufp));
21180 }
21181 for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
21182 if (cnt % 8 == 0) {
21183 (void) snprintf(bp, len, "\n");
21184 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21185 return (strlen(bufp));
21186 }
21187 }
21188 (void) snprintf(bp, len, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
21189 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21190 return (strlen(bufp));
21191 }
21192 }
21193
21194 (void) snprintf(bp, len, "\n\nRCV Thread 1 Data DMA Registers");
21195 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21196 return (strlen(bufp));
21197 }
21198 for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
21199 if (cnt % 8 == 0) {
21200 (void) snprintf(bp, len, "\n");
21201 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21202 return (strlen(bufp));
21203 }
21204 }
21205 (void) snprintf(bp, len, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
21206 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21207 return (strlen(bufp));
21208 }
21209 }
21210
21211 (void) snprintf(bp, len, "\n\nRISC GP Registers");
21212 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21213 return (strlen(bufp));
21214 }
21215 for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
21216 if (cnt % 8 == 0) {
21217 (void) snprintf(bp, len, "\n");
21218 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21219 return (strlen(bufp));
21220 }
21221 }
21222 (void) snprintf(bp, len, "%08x ", fw->risc_gp_reg[cnt]);
21223 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21224 return (strlen(bufp));
21225 }
21226 }
21227
21228 (void) snprintf(bp, len, "\n\nLMC Registers");
21229 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21230 return (strlen(bufp));
21231 }
21232 for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
21233 if (cnt % 8 == 0) {
21234 (void) snprintf(bp, len, "\n");
21235 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21236 return (strlen(bufp));
21237 }
21238 }
21239 (void) snprintf(bp, len, "%08x ", fw->lmc_reg[cnt]);
21240 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21241 return (strlen(bufp));
21242 }
21243 }
21244
21245 (void) snprintf(bp, len, "\n\nFPM Hardware Registers");
21246 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21247 return (strlen(bufp));
21248 }
21249 cnt1 = (uint32_t)(sizeof (fw->fpm_hdw_reg));
21250 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21251 if (cnt % 8 == 0) {
21252 (void) snprintf(bp, len, "\n");
21253 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21254 return (strlen(bufp));
21255 }
21256 }
21257 (void) snprintf(bp, len, "%08x ", fw->fpm_hdw_reg[cnt]);
21258 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21259 return (strlen(bufp));
21260 }
21261 }
21262
21263 (void) snprintf(bp, len, "\n\nRQ0 Array Registers");
21264 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21265 return (strlen(bufp));
21266 }
21267 cnt1 = (uint32_t)(sizeof (fw->rq0_array_reg));
21268 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21269 if (cnt % 8 == 0) {
21270 (void) snprintf(bp, len, "\n");
21271 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21272 return (strlen(bufp));
21273 }
21274 }
21275 (void) snprintf(bp, len, "%08x ", fw->rq0_array_reg[cnt]);
21276 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21277 return (strlen(bufp));
21278 }
21279 }
21280
21281 (void) snprintf(bp, len, "\n\nRQ1 Array Registers");
21282 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21283 return (strlen(bufp));
21284 }
21285 cnt1 = (uint32_t)(sizeof (fw->rq1_array_reg));
21286 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21287 if (cnt % 8 == 0) {
21288 (void) snprintf(bp, len, "\n");
21289 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21290 return (strlen(bufp));
21291 }
21292 }
21293 (void) snprintf(bp, len, "%08x ", fw->rq1_array_reg[cnt]);
21294 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21295 return (strlen(bufp));
21296 }
21297 }
21298
21299 (void) snprintf(bp, len, "\n\nRP0 Array Registers");
21300 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21301 return (strlen(bufp));
21302 }
21303 cnt1 = (uint32_t)(sizeof (fw->rp0_array_reg));
21304 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21305 if (cnt % 8 == 0) {
21306 (void) snprintf(bp, len, "\n");
21307 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21308 return (strlen(bufp));
21309 }
21310 }
21311 (void) snprintf(bp, len, "%08x ", fw->rp0_array_reg[cnt]);
21312 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21313 return (strlen(bufp));
21314 }
21315 }
21316
21317 (void) snprintf(bp, len, "\n\nRP1 Array Registers");
21318 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21319 return (strlen(bufp));
21320 }
21321 cnt1 = (uint32_t)(sizeof (fw->rp1_array_reg));
21322 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21323 if (cnt % 8 == 0) {
21324 (void) snprintf(bp, len, "\n");
21325 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21326 return (strlen(bufp));
21327 }
21328 }
21329 (void) snprintf(bp, len, "%08x ", fw->rp1_array_reg[cnt]);
21330 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21331 return (strlen(bufp));
21332 }
21333 }
21334
21335 (void) snprintf(bp, len, "\n\nAT0 Array Registers");
21336 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21337 return (strlen(bufp));
21338 }
21339 cnt1 = (uint32_t)(sizeof (fw->ato_array_reg));
21340 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21341 if (cnt % 8 == 0) {
21342 (void) snprintf(bp, len, "\n");
21343 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21344 return (strlen(bufp));
21345 }
21346 }
21347 (void) snprintf(bp, len, "%08x ", fw->ato_array_reg[cnt]);
21348 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21349 return (strlen(bufp));
21350 }
21351 }
21352
21353 (void) snprintf(bp, len, "\n\nQueue Control Registers");
21354 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21355 return (strlen(bufp));
21356 }
21357 cnt1 = (uint32_t)(sizeof (fw->queue_control_reg));
21358 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21359 if (cnt % 8 == 0) {
21360 (void) snprintf(bp, len, "\n");
21361 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21362 return (strlen(bufp));
21363 }
21364 }
21365 (void) snprintf(bp, len, "%08x ", fw->queue_control_reg[cnt]);
21366 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21367 return (strlen(bufp));
21368 }
21369 }
21370
21371 (void) snprintf(bp, len, "\n\nFB Hardware Registers");
21372 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21373 return (strlen(bufp));
21374 }
21375 cnt1 = (uint32_t)(sizeof (fw->fb_hdw_reg));
21376 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21377 if (cnt % 8 == 0) {
21378 (void) snprintf(bp, len, "\n");
21379 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21380 return (strlen(bufp));
21381 }
21382 }
21383 (void) snprintf(bp, len, "%08x ", fw->fb_hdw_reg[cnt]);
21384 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21385 return (strlen(bufp));
21386 }
21387 }
21388
21389 (void) snprintf(bp, len, "\n\nCode RAM");
21390 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21391 return (strlen(bufp));
21392 }
21393 for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
21394 if (cnt % 8 == 0) {
21395 (void) snprintf(bp, len, "\n%08x: ", cnt + 0x20000);
21396 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21397 return (strlen(bufp));
21398 }
21399 }
21400 (void) snprintf(bp, len, "%08x ", fw->code_ram[cnt]);
21401 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21402 return (strlen(bufp));
21403 }
21404 }
21405
21406 (void) snprintf(bp, len, "\n\nExternal Memory");
21407 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21408 return (strlen(bufp));
21409 }
21410 dp = (uint32_t *)((caddr_t)fw->req_rsp_ext_mem + fw->req_q_size[0] +
21411 fw->req_q_size[1] + fw->rsp_q_size + (ha->rsp_queues_cnt * 16));
21412 for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
21413 if (cnt % 8 == 0) {
21414 (void) snprintf(bp, len, "\n%08x: ", cnt + 0x100000);
21415 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21416 return (strlen(bufp));
21417 }
21418 }
21419 (void) snprintf(bp, len, "%08x ", *dp++);
21420 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21421 return (strlen(bufp));
21422 }
21423 }
21424
21425 (void) snprintf(bp, len, "\n\n[<==END] ISP Debug Dump");
21426 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21427 return (strlen(bufp));
21428 }
21429
21430 dp = fw->req_rsp_ext_mem + (ha->rsp_queues_cnt * 4);
21431 for (cnt = 0; cnt < 2 && fw->req_q_size[cnt]; cnt++) {
21432 dp2 = dp;
21433 for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
21434 if (*dp2++) {
21435 break;
21436 }
21437 }
21438 if (cnt1 == fw->req_q_size[cnt] / 4) {
21439 dp = dp2;
21440 continue;
21441 }
21442 (void) snprintf(bp, len, "\n\nRequest Queue\nQueue %d:", cnt);
21443 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21444 return (strlen(bufp));
21445 }
21446 for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
21447 if (cnt1 % 8 == 0) {
21448 (void) snprintf(bp, len, "\n%08x: ", cnt1);
21449 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21450 return (strlen(bufp));
21451 }
21452 }
21453 (void) snprintf(bp, len, "%08x ", *dp++);
21454 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21455 return (strlen(bufp));
21456 }
21457 }
21458 }
21459
21460 for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
21461 dp2 = dp;
21462 for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
21463 cnt1++) {
21464 if (*dp2++) {
21465 break;
21466 }
21467 }
21468 if (cnt1 == ha->rsp_queues[cnt]->rsp_ring.size / 4) {
21469 dp = dp2;
21470 continue;
21471 }
21472 (void) snprintf(bp, len, "\n\nResponse Queue\nQueue %d:", cnt);
21473 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21474 return (strlen(bufp));
21475 }
21476 for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
21477 cnt1++) {
21478 if (cnt1 % 8 == 0) {
21479 (void) snprintf(bp, len, "\n%08x: ", cnt1);
21480 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21481 return (strlen(bufp));
21482 }
21483 }
21484 (void) snprintf(bp, len, "%08x ", *dp++);
21485 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21486 return (strlen(bufp));
21487 }
21488 }
21489 }
21490
21491 if (ha->fwexttracebuf.dma_handle != NULL) {
21492 uint32_t cnt_b;
21493 uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
21494
21495 (void) snprintf(bp, len, "\n\nExtended Trace Buffer Memory");
21496 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21497 return (strlen(bufp));
21498 }
21499 /* show data address as a byte address, data as long words */
21500 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
21501 cnt_b = cnt * 4;
21502 if (cnt_b % 32 == 0) {
21503 (void) snprintf(bp, len, "\n%08x: ",
21504 (int)(w64 + cnt_b));
21505 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21506 return (strlen(bufp));
21507 }
21508 }
21509 (void) snprintf(bp, len, "%08x ",
21510 fw->ext_trace_buf[cnt]);
21511 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21512 return (strlen(bufp));
21513 }
21514 }
21515 }
21516
21517 if (ha->fwfcetracebuf.dma_handle != NULL) {
21518 uint32_t cnt_b;
21519 uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
21520
21521 (void) snprintf(bp, len, "\n\nFC Event Trace Buffer Memory");
21522 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21523 return (strlen(bufp));
21524 }
21525 /* show data address as a byte address, data as long words */
21526 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
21527 cnt_b = cnt * 4;
21528 if (cnt_b % 32 == 0) {
21529 (void) snprintf(bp, len, "\n%08x: ",
21530 (int)(w64 + cnt_b));
21531 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21532 return (strlen(bufp));
21533 }
21534 }
21535 (void) snprintf(bp, len, "%08x ",
21536 fw->fce_trace_buf[cnt]);
21537 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21538 return (strlen(bufp));
21539 }
21540 }
21541 }
21542
21543 QL_PRINT_10(ha, "done=%xh\n", strlen(bufp));
21544
21545 return (strlen(bufp));
21546 }
21547
21548
21549 /*
21550 * ql_str_ptr
21551 * Verifies buffer is not full
21552 *
21553 * Input:
21554 * ha: adapter state pointer.
21555 * bp: string buffer pointer
21556 * len: buffer length
21557 *
21558 * Returns:
21559 * NULL = buffer full else adjusted buffer pointer
21560 *
21561 * Context:
21562 * Kernel context.
21563 */
21564 /*ARGSUSED*/
21565 static caddr_t
21566 ql_str_ptr(ql_adapter_state_t *ha, caddr_t bp, uint32_t *len)
21567 {
21568 uint32_t i;
21569
21570 i = strlen(bp);
21571 if (i > *len || !(*len -= i)) {
21572 QL_PRINT_10(ha, "full buffer\n");
21573 return (NULL);
21574 }
21575 return (bp += i);
21576 }
21577
21578 /*
21579 * ql_27xx_binary_fw_dump
21580 *
21581 * Input:
21582 * ha: adapter state pointer.
21583 * dmp: firmware dump pointer.
21584 *
21585 * Returns:
21586 * ql local function return status code.
21587 *
21588 * Context:
21589 * Interrupt or Kernel context, no mailbox commands allowed.
21590 */
21591 static int
21592 ql_27xx_binary_fw_dump(ql_adapter_state_t *ha)
21593 {
21594 ql_dmp_template_t *template_buff;
21595 int rval;
21596 uint32_t cnt, *dp, *bp, tsize;
21597
21598 QL_PRINT_10(ha, "started\n");
21599
21600 if (ha->dmp_template.dma_handle == NULL) {
21601 rval = CFG_IST(ha, CFG_LOAD_FLASH_FW) ?
21602 ql_2700_get_flash_dmp_template(ha) :
21603 ql_2700_get_module_dmp_template(ha);
21604 if (rval != QL_SUCCESS) {
21605 EL(ha, "no dump template, status=%xh\n", rval);
21606 return (QL_FUNCTION_PARAMETER_ERROR);
21607 }
21608 }
21609 template_buff = ha->dmp_template.bp;
21610 tsize = template_buff->hdr.size_of_template;
21611
21612 if (ha->md_capture_size == 0) {
21613 ha->ql_dump_ptr = kmem_zalloc(tsize, KM_NOSLEEP);
21614 if (ha->ql_dump_ptr == NULL) {
21615 QL_PRINT_10(ha, "done, failed alloc\n");
21616 return (QL_MEMORY_ALLOC_FAILED);
21617 }
21618 cnt = (uint32_t)(tsize / sizeof (uint32_t));
21619 dp = (uint32_t *)ha->ql_dump_ptr;
21620 bp = (uint32_t *)&template_buff->hdr;
21621 while (cnt--) {
21622 *dp++ = ddi_get32(ha->dmp_template.acc_handle, bp++);
21623 }
21624 ha->md_capture_size = ql_2700_dmp_parse_template(ha,
21625 (ql_dt_hdr_t *)ha->ql_dump_ptr, NULL, 0);
21626 kmem_free(ha->ql_dump_ptr, tsize);
21627 ha->ql_dump_ptr = NULL;
21628
21629 if (ha->md_capture_size == 0) {
21630 return (QL_MEMORY_ALLOC_FAILED);
21631 }
21632
21633 /*
21634 * Determine ascii dump file size
21635 * 2 ascii bytes per binary byte + a space and
21636 * a newline every 16 binary bytes
21637 */
21638 ha->risc_dump_size = ha->md_capture_size << 1;
21639 ha->risc_dump_size += ha->md_capture_size;
21640 ha->risc_dump_size += ha->md_capture_size / 16 + 1;
21641 QL_PRINT_10(ha, "md_capture_size=%xh, "
21642 "risc_dump_size=%xh\n", ha->md_capture_size,
21643 ha->risc_dump_size);
21644 }
21645
21646 ha->ql_dump_ptr = kmem_zalloc(ha->md_capture_size, KM_NOSLEEP);
21647 if (ha->ql_dump_ptr == NULL) {
21648 QL_PRINT_10(ha, "done, failed alloc\n");
21649 return (QL_MEMORY_ALLOC_FAILED);
21650 }
21651 ha->ql_dump_size = ha->md_capture_size;
21652
21653 /* Disable ISP interrupts. */
21654 ql_disable_intr(ha);
21655
21656 cnt = (uint32_t)(tsize / sizeof (uint32_t));
21657 dp = (uint32_t *)ha->ql_dump_ptr;
21658 bp = (uint32_t *)&template_buff->hdr;
21659 while (cnt--) {
21660 *dp++ = ddi_get32(ha->dmp_template.acc_handle, bp++);
21661 }
21662
21663 (void) ql_2700_dmp_parse_template(ha,
21664 (ql_dt_hdr_t *)ha->ql_dump_ptr,
21665 (uint8_t *)dp, ha->ql_dump_size);
21666
21667 #ifdef _BIG_ENDIAN
21668 cnt = (uint32_t)(tsize / sizeof (uint32_t));
21669 dp = (uint32_t *)ha->ql_dump_ptr;
21670 while (cnt--) {
21671 ql_chg_endian((uint8_t *)dp, 4);
21672 dp++;
21673 }
21674 #endif
21675 QL_PRINT_10(ha, "done\n");
21676 return (QL_SUCCESS);
21677 }
21678
21679 /*
21680 * ql_27xx_ascii_fw_dump
21681 * Converts ISP27xx firmware binary dump to ascii.
21682 *
21683 * Input:
21684 * ha: port info pointer.
21685 * bptr: buffer pointer.
21686 *
21687 * Returns:
21688 * Amount of data buffer used.
21689 *
21690 * Context:
21691 * Kernel context.
21692 */
21693 static size_t
21694 ql_27xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
21695 {
21696 uint32_t cnt, len, dsize;
21697 uint8_t *fw;
21698 caddr_t bp;
21699
21700 QL_PRINT_10(ha, "started\n");
21701
21702 if ((len = ha->risc_dump_size) == 0) {
21703 QL_PRINT_10(ha, "no buffer\n");
21704 return (0);
21705 }
21706
21707 dsize = ha->ql_dump_size;
21708 fw = (uint8_t *)ha->ql_dump_ptr;
21709 bp = bufp;
21710
21711 QL_PRINT_10(ha, "fw_dump_buffer=%ph, fw_bin_dump_size=%xh\n",
21712 (void *)ha->ql_dump_ptr, ha->ql_dump_size);
21713
21714 /*
21715 * 2 ascii bytes per binary byte + a space and
21716 * a newline every 16 binary bytes
21717 */
21718 cnt = 0;
21719 while (cnt < dsize) {
21720 (void) snprintf(bp, len, "%02x ", *fw++);
21721 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21722 return (strlen(bufp));
21723 }
21724 if (++cnt % 16 == 0) {
21725 (void) snprintf(bp, len, "\n");
21726 if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21727 return (strlen(bufp));
21728 }
21729 }
21730 }
21731 if (cnt % 16 != 0) {
21732 (void) snprintf(bp, len, "\n");
21733 bp = ql_str_ptr(ha, bp, &len);
21734 if (bp == NULL) {
21735 return (strlen(bufp));
21736 }
21737 }
21738
21739 QL_PRINT_10(ha, "done=%xh\n", strlen(bufp));
21740
21741 return (strlen(bufp));
21742 }
21743
21744 /* ******************************************************************* */
21745 /* ********************* Dump Template Functions ********************* */
21746 /* ******************************************************************* */
21747
21748 /*
21749 * ql_2700_get_module_dmp_template
21750 * Get dump template from firmware module
21751 *
21752 * Input:
21753 * ha: adapter state pointer.
21754 *
21755 * Returns:
21756 * ql local function return status code.
21757 *
21758 * Context:
21759 * Kernel context.
21760 */
21761 int
21762 ql_2700_get_module_dmp_template(ql_adapter_state_t *ha)
21763 {
21764 int rval;
21765 uint32_t word_count, cnt, *bp, *dp;
21766
21767 QL_PRINT_10(ha, "started\n");
21768
21769 if (ha->dmp_template.dma_handle != NULL) {
21770 return (QL_SUCCESS);
21771 }
21772
21773 if ((word_count = ha->risc_fw[2].length) == 0) {
21774 EL(ha, "no dump template, length=0\n");
21775 return (QL_FUNCTION_PARAMETER_ERROR);
21776 }
21777
21778 /* Allocate template buffer. */
21779 ha->dmp_template.size = word_count << 2;
21780 ha->dmp_template.type = LITTLE_ENDIAN_DMA;
21781 ha->dmp_template.max_cookie_count = 1;
21782 ha->dmp_template.alignment = 8;
21783 rval = ql_alloc_phys(ha, &ha->dmp_template, KM_SLEEP);
21784 if (rval != QL_SUCCESS) {
21785 EL(ha, "unable to allocate template buffer, "
21786 "status=%xh\n", rval);
21787 return (rval);
21788 }
21789
21790 /* Get big endian template. */
21791 bp = ha->dmp_template.bp;
21792 dp = (uint32_t *)ha->risc_fw[2].code;
21793 for (cnt = 0; cnt < word_count; cnt++) {
21794 ddi_put32(ha->dmp_template.acc_handle, bp, *dp++);
21795 if (cnt > 6) {
21796 ql_chg_endian((uint8_t *)bp, 4);
21797 }
21798 bp++;
21799 }
21800
21801 QL_PRINT_10(ha, "done\n");
21802 return (rval);
21803 }
21804
21805 /*
21806 * ql_2700_get_flash_dmp_template
21807 * Get dump template from flash
21808 *
21809 * Input:
21810 * pi: port info pointer.
21811 *
21812 * Returns:
21813 * ql local function return status code.
21814 *
21815 * Context:
21816 * Kernel context.
21817 */
21818 int
21819 ql_2700_get_flash_dmp_template(ql_adapter_state_t *ha)
21820 {
21821 int rval;
21822 uint32_t word_count, cnt, *bp;
21823 uint32_t faddr = ha->flash_data_addr | ha->flash_fw_addr;
21824 uint32_t fdata = 0;
21825
21826 QL_PRINT_10(ha, "started, fw_addr=%xh\n", ha->flash_fw_addr);
21827
21828 if (ha->dmp_template.dma_handle != NULL) {
21829 ql_free_phys(ha, &ha->dmp_template);
21830 }
21831
21832 /* First array length */
21833 rval = ql_24xx_read_flash(ha, faddr + 3, &fdata);
21834 QL_PRINT_7(ha, "read_flash, fw_addr=0x%x, data=0x%x\n",
21835 faddr + 3, fdata);
21836 if (rval != QL_SUCCESS) {
21837 EL(ha, "2700_read_flash status=%xh\n", rval);
21838 return (rval);
21839 }
21840 if (fdata == 0 || fdata == 0xffffffff) {
21841 EL(ha, "Invalid first array length = %xh\n", fdata);
21842 return (QL_FUNCTION_PARAMETER_ERROR);
21843 }
21844 ql_chg_endian((uint8_t *)&fdata, 4);
21845 QL_PRINT_7(ha, "First array length = %xh\n", fdata);
21846 faddr += fdata;
21847
21848 /* Second array length */
21849 rval = ql_24xx_read_flash(ha, faddr + 3, &fdata);
21850 QL_PRINT_7(ha, "read_flash, fw_addr=0x%x, data=0x%x\n",
21851 faddr + 3, fdata);
21852 if (rval != QL_SUCCESS) {
21853 EL(ha, "2700_read_flash status=%xh\n", rval);
21854 return (rval);
21855 }
21856 if (fdata == 0 || fdata == 0xffffffff) {
21857 EL(ha, "Invalid second array length = %xh\n", fdata);
21858 return (QL_FUNCTION_PARAMETER_ERROR);
21859 }
21860 ql_chg_endian((uint8_t *)&fdata, 4);
21861 QL_PRINT_7(ha, "Second array length = %xh\n", fdata);
21862 faddr += fdata;
21863
21864 /* Third array length (dump template) */
21865 rval = ql_24xx_read_flash(ha, faddr + 2, &fdata);
21866 QL_PRINT_7(ha, "read_flash, fw_addr=0x%x, data=0x%x\n",
21867 faddr + 2, fdata);
21868 if (rval != QL_SUCCESS) {
21869 EL(ha, "2700_read_flash status=%xh\n", rval);
21870 return (rval);
21871 }
21872 if (fdata == 0 || fdata == 0xffffffff) {
21873 EL(ha, "Invalid third array length = %xh\n", fdata);
21874 return (QL_FUNCTION_PARAMETER_ERROR);
21875 }
21876 ql_chg_endian((uint8_t *)&fdata, 4);
21877 QL_PRINT_7(ha, "Third array length = %xh\n", fdata);
21878 word_count = fdata;
21879
21880 /* Allocate template buffer. */
21881 ha->dmp_template.size = word_count << 2;
21882 ha->dmp_template.type = LITTLE_ENDIAN_DMA;
21883 ha->dmp_template.max_cookie_count = 1;
21884 ha->dmp_template.alignment = 8;
21885 rval = ql_alloc_phys(ha, &ha->dmp_template, KM_SLEEP);
21886 if (rval != QL_SUCCESS) {
21887 EL(ha, "unable to allocate template buffer, "
21888 "status=%xh\n", rval);
21889 return (rval);
21890 }
21891
21892 /* Get big endian template. */
21893 bp = ha->dmp_template.bp;
21894 for (cnt = 0; cnt < word_count; cnt++) {
21895 rval = ql_24xx_read_flash(ha, faddr++, &fdata);
21896 if (rval != QL_SUCCESS) {
21897 EL(ha, "2700_read_flash status=%xh\n", rval);
21898 ql_free_phys(ha, &ha->dmp_template);
21899 return (rval);
21900 }
21901 ddi_put32(ha->dmp_template.acc_handle, bp, fdata);
21902 bp++;
21903 }
21904
21905 QL_PRINT_10(ha, "done\n");
21906 return (rval);
21907 }
21908
21909 static uint32_t
21910 ql_2700_dmp_parse_template(ql_adapter_state_t *ha, ql_dt_hdr_t *template_hdr,
21911 uint8_t *dump_buff, uint32_t buff_size)
21912 {
21913 int e_cnt, esize, num_of_entries;
21914 uint32_t bsize;
21915 time_t time;
21916 uint8_t *dbuff, *dbuff_end;
21917 ql_dt_entry_t *entry;
21918 int sane_end = 0;
21919
21920 dbuff = dump_buff; /* dbuff = NULL size determination. */
21921 dbuff_end = dump_buff + buff_size;
21922
21923 template_hdr->ver_attr[0] = ha->fw_major_version;
21924 template_hdr->ver_attr[1] = ha->fw_minor_version;
21925 template_hdr->ver_attr[2] = ha->fw_subminor_version;
21926 template_hdr->ver_attr[3] = ha->fw_attributes;
21927 template_hdr->ver_attr[4] = ha->fw_ext_attributes;
21928
21929 QL_PRINT_7(ha, "started, template_hdr=%ph, dump_buff=%ph, "
21930 "buff_size=%xh, buff_end=%ph\n", (void *)template_hdr,
21931 (void *)dbuff, buff_size, (void *)dbuff_end);
21932
21933 /* Setup parameters */
21934 QL_PRINT_7(ha, "type=%d, first_entry_offset=%xh, "
21935 "num_of_entries=%xh ver_attr=%xh,%xh,%xh,%xh,%xh\n",
21936 template_hdr->type, template_hdr->first_entry_offset,
21937 template_hdr->num_of_entries, template_hdr->ver_attr[0],
21938 template_hdr->ver_attr[1], template_hdr->ver_attr[2],
21939 template_hdr->ver_attr[3], template_hdr->ver_attr[4]);
21940
21941 if (template_hdr->type != DT_THDR) {
21942 EL(ha, "Template header not found\n");
21943 return (0);
21944 }
21945 if (dbuff != NULL) {
21946 (void) drv_getparm(TIME, &time);
21947 template_hdr->driver_timestamp = LSD(time);
21948 }
21949
21950 num_of_entries = template_hdr->num_of_entries;
21951 entry = (ql_dt_entry_t *)((caddr_t)template_hdr +
21952 template_hdr->first_entry_offset);
21953
21954 bsize = template_hdr->size_of_template;
21955 for (e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
21956 QL_PRINT_7(ha, "e_cnt=%xh, entry=%ph, type=%d, size=%xh, "
21957 "capture_flags=%xh, driver_flags=%xh, bofst=%xh\n",
21958 e_cnt, (void *)entry, entry->h.type, entry->h.size,
21959 entry->h.capture_flags, entry->h.driver_flags,
21960 dbuff != NULL ? (uintptr_t)dbuff - (uintptr_t)template_hdr :
21961 bsize);
21962 /*
21963 * Decode the entry type and process it accordingly
21964 */
21965 esize = 0;
21966 switch (entry->h.type) {
21967 case DT_NOP:
21968 if (dbuff != NULL) {
21969 entry->h.driver_flags = (uint8_t)
21970 (entry->h.driver_flags | SKIPPED_FLAG);
21971 }
21972 QL_PRINT_3(ha, "Skipping Entry ID=%d, type=%d\n",
21973 e_cnt, entry->h.type);
21974 break;
21975 case DT_TEND:
21976 if (dbuff != NULL) {
21977 entry->h.driver_flags = (uint8_t)
21978 (entry->h.driver_flags | SKIPPED_FLAG);
21979 }
21980 QL_PRINT_3(ha, "Skipping Entry ID=%d, type=%d\n",
21981 e_cnt, entry->h.type);
21982 sane_end++;
21983 break;
21984 case DT_RIOB1:
21985 esize = ql_2700_dt_riob1(ha, (ql_dt_riob1_t *)entry,
21986 dbuff, dbuff_end);
21987 break;
21988 case DT_WIOB1:
21989 ql_2700_dt_wiob1(ha, (ql_dt_wiob1_t *)entry,
21990 dbuff, dbuff_end);
21991 break;
21992 case DT_RIOB2:
21993 esize = ql_2700_dt_riob2(ha, (ql_dt_riob2_t *)entry,
21994 dbuff, dbuff_end);
21995 break;
21996 case DT_WIOB2:
21997 ql_2700_dt_wiob2(ha, (ql_dt_wiob2_t *)entry,
21998 dbuff, dbuff_end);
21999 break;
22000 case DT_RPCI:
22001 esize = ql_2700_dt_rpci(ha, (ql_dt_rpci_t *)entry,
22002 dbuff, dbuff_end);
22003 break;
22004 case DT_WPCI:
22005 ql_2700_dt_wpci(ha, (ql_dt_wpci_t *)entry,
22006 dbuff, dbuff_end);
22007 break;
22008 case DT_RRAM:
22009 esize = ql_2700_dt_rram(ha, (ql_dt_rram_t *)entry,
22010 dbuff, dbuff_end);
22011 break;
22012 case DT_GQUE:
22013 esize = ql_2700_dt_gque(ha, (ql_dt_gque_t *)entry,
22014 dbuff, dbuff_end);
22015 break;
22016 case DT_GFCE:
22017 esize = ql_2700_dt_gfce(ha, (ql_dt_gfce_t *)entry,
22018 dbuff, dbuff_end);
22019 break;
22020 case DT_PRISC:
22021 ql_2700_dt_prisc(ha, (ql_dt_prisc_t *)entry,
22022 dbuff, dbuff_end);
22023 break;
22024 case DT_RRISC:
22025 ql_2700_dt_rrisc(ha, (ql_dt_rrisc_t *)entry,
22026 dbuff, dbuff_end);
22027 break;
22028 case DT_DINT:
22029 ql_2700_dt_dint(ha, (ql_dt_dint_t *)entry,
22030 dbuff, dbuff_end);
22031 break;
22032 case DT_GHBD:
22033 esize = ql_2700_dt_ghbd(ha, (ql_dt_ghbd_t *)entry,
22034 dbuff, dbuff_end);
22035 break;
22036 case DT_SCRA:
22037 esize = ql_2700_dt_scra(ha, (ql_dt_scra_t *)entry,
22038 dbuff, dbuff_end);
22039 break;
22040 case DT_RRREG:
22041 esize = ql_2700_dt_rrreg(ha, (ql_dt_rrreg_t *)entry,
22042 dbuff, dbuff_end);
22043 break;
22044 case DT_WRREG:
22045 ql_2700_dt_wrreg(ha, (ql_dt_wrreg_t *)entry,
22046 dbuff, dbuff_end);
22047 break;
22048 case DT_RRRAM:
22049 esize = ql_2700_dt_rrram(ha, (ql_dt_rrram_t *)entry,
22050 dbuff, dbuff_end);
22051 break;
22052 case DT_RPCIC:
22053 esize = ql_2700_dt_rpcic(ha, (ql_dt_rpcic_t *)entry,
22054 dbuff, dbuff_end);
22055 break;
22056 case DT_GQUES:
22057 esize = ql_2700_dt_gques(ha, (ql_dt_gques_t *)entry,
22058 dbuff, dbuff_end);
22059 break;
22060 case DT_WDMP:
22061 esize = ql_2700_dt_wdmp(ha, (ql_dt_wdmp_t *)entry,
22062 dbuff, dbuff_end);
22063 break;
22064 default:
22065 entry->h.driver_flags = (uint8_t)
22066 (entry->h.driver_flags | SKIPPED_FLAG);
22067 EL(ha, "Entry ID=%d, type=%d unknown\n", e_cnt,
22068 entry->h.type);
22069 break;
22070 }
22071 if (dbuff != NULL && esize) {
22072 QL_PRINT_7(ha, "entry=%d, esize=%xh, capture data\n",
22073 entry->h.type, esize);
22074 QL_DUMP_3(dbuff, 8, esize);
22075 dbuff += esize;
22076 }
22077 bsize += esize;
22078 /* next entry in the template */
22079 entry = (ql_dt_entry_t *)((caddr_t)entry + entry->h.size);
22080 }
22081 if (sane_end > 1) {
22082 EL(ha, "Template configuration error. Check Template\n");
22083 }
22084
22085 QL_PRINT_7(ha, "done, num of entries=%xh, size=%xh\n",
22086 template_hdr->num_of_entries, bsize);
22087 return (bsize);
22088 }
22089
22090 static int
22091 ql_2700_dt_riob1(ql_adapter_state_t *ha, ql_dt_riob1_t *entry,
22092 uint8_t *dbuff, uint8_t *dbuff_end)
22093 {
22094 int esize;
22095 uint32_t i, cnt;
22096 uint8_t *bp = dbuff;
22097 uint32_t addr = entry->addr;
22098 uint8_t *reg = (uint8_t *)ha->iobase + entry->pci_offset;
22099
22100 QL_PRINT_7(ha, "started, buf=%ph, addr=%xh, reg_size=%xh, "
22101 "reg_count=%x%02xh, pci_offset=%xh\n", (void *)dbuff, entry->addr,
22102 entry->reg_size, entry->reg_count_h, entry->reg_count_l,
22103 entry->pci_offset);
22104
22105 cnt = CHAR_TO_SHORT(entry->reg_count_l, entry->reg_count_h);
22106 esize = cnt * 4; /* addr */
22107 esize += cnt * entry->reg_size; /* data */
22108
22109 if (dbuff == NULL) {
22110 QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22111 return (esize);
22112 }
22113 if (esize + dbuff >= dbuff_end) {
22114 EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22115 entry->h.driver_flags = (uint8_t)
22116 (entry->h.driver_flags | SKIPPED_FLAG);
22117 return (0);
22118 }
22119
22120 WRT32_IO_REG(ha, io_base_addr, addr);
22121 while (cnt--) {
22122 *bp++ = LSB(LSW(addr));
22123 *bp++ = MSB(LSW(addr));
22124 *bp++ = LSB(MSW(addr));
22125 *bp++ = MSB(MSW(addr));
22126 for (i = 0; i < entry->reg_size; i++) {
22127 *bp++ = RD_REG_BYTE(ha, reg++);
22128 }
22129 addr++;
22130 }
22131
22132 QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22133 return (esize);
22134 }
22135
22136 static void
22137 ql_2700_dt_wiob1(ql_adapter_state_t *ha, ql_dt_wiob1_t *entry,
22138 uint8_t *dbuff, uint8_t *dbuff_end)
22139 {
22140 uint8_t *reg = (uint8_t *)ha->iobase + entry->pci_offset;
22141
22142 QL_PRINT_7(ha, "started, addr=%xh, data=%xh, pci_offset=%xh\n",
22143 entry->addr, entry->data, entry->pci_offset);
22144
22145 if (dbuff == NULL) {
22146 QL_PRINT_7(ha, "null buf done\n");
22147 return;
22148 }
22149 if (dbuff >= dbuff_end) {
22150 EL(ha, "skipped, no buffer space, needed=0\n");
22151 entry->h.driver_flags = (uint8_t)
22152 (entry->h.driver_flags | SKIPPED_FLAG);
22153 return;
22154 }
22155
22156 WRT32_IO_REG(ha, io_base_addr, entry->addr);
22157 WRT_REG_DWORD(ha, reg, entry->data);
22158
22159 QL_PRINT_7(ha, "done\n");
22160 }
22161
22162 static int
22163 ql_2700_dt_riob2(ql_adapter_state_t *ha, ql_dt_riob2_t *entry,
22164 uint8_t *dbuff, uint8_t *dbuff_end)
22165 {
22166 int esize;
22167 uint32_t i, cnt;
22168 uint8_t *bp = dbuff;
22169 uint8_t *reg = (uint8_t *)ha->iobase + entry->pci_offset;
22170 uint32_t addr = entry->addr;
22171
22172 QL_PRINT_7(ha, "started, buf=%ph, addr=%xh, reg_size=%xh, "
22173 "reg_count=%x%02xh, pci_offset=%xh, bank_sel_offset=%xh, "
22174 "reg_bank=%xh\n", (void *)dbuff, entry->addr,
22175 entry->reg_size, entry->reg_count_h, entry->reg_count_l,
22176 entry->pci_offset, entry->bank_sel_offset, entry->reg_bank);
22177
22178 cnt = CHAR_TO_SHORT(entry->reg_count_l, entry->reg_count_h);
22179 esize = cnt * 4; /* addr */
22180 esize += cnt * entry->reg_size; /* data */
22181
22182 if (dbuff == NULL) {
22183 QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22184 return (esize);
22185 }
22186 if (esize + dbuff >= dbuff_end) {
22187 EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22188 entry->h.driver_flags = (uint8_t)
22189 (entry->h.driver_flags | SKIPPED_FLAG);
22190 return (0);
22191 }
22192
22193 WRT32_IO_REG(ha, io_base_addr, addr);
22194 WRT_REG_DWORD(ha, ha->iobase + entry->bank_sel_offset, entry->reg_bank);
22195 while (cnt--) {
22196 *bp++ = LSB(LSW(addr));
22197 *bp++ = MSB(LSW(addr));
22198 *bp++ = LSB(MSW(addr));
22199 *bp++ = MSB(MSW(addr));
22200 for (i = 0; i < entry->reg_size; i++) {
22201 *bp++ = RD_REG_BYTE(ha, reg++);
22202 }
22203 addr++;
22204 }
22205
22206 QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22207 return (esize);
22208 }
22209
22210 static void
22211 ql_2700_dt_wiob2(ql_adapter_state_t *ha, ql_dt_wiob2_t *entry,
22212 uint8_t *dbuff, uint8_t *dbuff_end)
22213 {
22214 uint16_t data;
22215 uint8_t *reg = (uint8_t *)ha->iobase + entry->pci_offset;
22216
22217 QL_PRINT_7(ha, "started, addr=%xh, data=%x%02xh, pci_offset=%xhh, "
22218 "bank_sel_offset=%xh, reg_bank=%xh\n", entry->addr, entry->data_h,
22219 entry->data_l, entry->pci_offset, entry->bank_sel_offset,
22220 entry->reg_bank);
22221
22222 if (dbuff == NULL) {
22223 QL_PRINT_7(ha, "null buf done\n");
22224 return;
22225 }
22226 if (dbuff >= dbuff_end) {
22227 EL(ha, "skipped, no buffer space, needed=0\n");
22228 entry->h.driver_flags = (uint8_t)
22229 (entry->h.driver_flags | SKIPPED_FLAG);
22230 return;
22231 }
22232
22233 data = CHAR_TO_SHORT(entry->data_l, entry->data_h);
22234
22235 WRT32_IO_REG(ha, io_base_addr, entry->addr);
22236 WRT_REG_DWORD(ha, ha->iobase + entry->bank_sel_offset, entry->reg_bank);
22237 WRT_REG_WORD(ha, reg, data);
22238
22239 QL_PRINT_7(ha, "done\n");
22240 }
22241
22242 static int
22243 ql_2700_dt_rpci(ql_adapter_state_t *ha, ql_dt_rpci_t *entry, uint8_t *dbuff,
22244 uint8_t *dbuff_end)
22245 {
22246 int esize;
22247 uint32_t i;
22248 uint8_t *bp = dbuff;
22249 uint8_t *reg = (uint8_t *)ha->iobase + entry->addr;
22250
22251 QL_PRINT_7(ha, "started, addr=%xh, reg=%ph\n", entry->addr,
22252 (void *)reg);
22253
22254 esize = 4; /* addr */
22255 esize += 4; /* data */
22256
22257 if (dbuff == NULL) {
22258 QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22259 return (esize);
22260 }
22261 if (esize + dbuff >= dbuff_end) {
22262 EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22263 entry->h.driver_flags = (uint8_t)
22264 (entry->h.driver_flags | SKIPPED_FLAG);
22265 return (0);
22266 }
22267
22268 *bp++ = LSB(LSW(entry->addr));
22269 *bp++ = MSB(LSW(entry->addr));
22270 *bp++ = LSB(MSW(entry->addr));
22271 *bp++ = MSB(MSW(entry->addr));
22272 for (i = 0; i < 4; i++) {
22273 *bp++ = RD_REG_BYTE(ha, reg++);
22274 }
22275
22276 QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22277 return (esize);
22278 }
22279
22280 static void
22281 ql_2700_dt_wpci(ql_adapter_state_t *ha, ql_dt_wpci_t *entry,
22282 uint8_t *dbuff, uint8_t *dbuff_end)
22283 {
22284 uint8_t *reg = (uint8_t *)ha->iobase + entry->addr;
22285
22286 QL_PRINT_7(ha, "started, addr=%xh, data=%xh, reg=%ph\n",
22287 entry->addr, entry->data, (void *)reg);
22288
22289 if (dbuff == NULL) {
22290 QL_PRINT_7(ha, "null buf done\n");
22291 return;
22292 }
22293 if (dbuff >= dbuff_end) {
22294 EL(ha, "skipped, no buffer space, needed=0\n");
22295 entry->h.driver_flags = (uint8_t)
22296 (entry->h.driver_flags | SKIPPED_FLAG);
22297 return;
22298 }
22299
22300 WRT_REG_DWORD(ha, reg, entry->data);
22301
22302 QL_PRINT_7(ha, "done\n");
22303 }
22304
22305 static int
22306 ql_2700_dt_rram(ql_adapter_state_t *ha, ql_dt_rram_t *entry,
22307 uint8_t *dbuff, uint8_t *dbuff_end)
22308 {
22309 int esize, rval;
22310 uint32_t start = entry->start_addr;
22311 uint32_t end = entry->end_addr;
22312
22313 QL_PRINT_7(ha, "started, buf=%ph, ram_area=%xh, start_addr=%xh, "
22314 "end_addr=%xh\n", (void *)dbuff, entry->ram_area,
22315 entry->start_addr, entry->end_addr);
22316
22317 if (entry->ram_area == 2) {
22318 end = ha->fw_ext_memory_end;
22319 } else if (entry->ram_area == 3) {
22320 start = ha->fw_shared_ram_start;
22321 end = ha->fw_shared_ram_end;
22322 } else if (entry->ram_area == 4) {
22323 start = ha->fw_ddr_ram_start;
22324 end = ha->fw_ddr_ram_end;
22325 } else if (entry->ram_area != 1) {
22326 EL(ha, "skipped, unknown RAM_AREA %d\n", entry->ram_area);
22327 start = 0;
22328 end = 0;
22329 }
22330 esize = end > start ? end - start : 0;
22331 if (esize) {
22332 esize = (esize + 1) * 4;
22333 }
22334
22335 if (dbuff == NULL) {
22336 QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22337 return (esize);
22338 }
22339 if (esize == 0 || esize + dbuff >= dbuff_end) {
22340 if (esize != 0) {
22341 EL(ha, "skipped, no buffer space, needed=%xh\n",
22342 esize);
22343 } else {
22344 /*EMPTY*/
22345 QL_PRINT_7(ha, "skipped, no ram_area=%xh, start=%xh, "
22346 "end=%xh\n", entry->ram_area, start, end);
22347 }
22348 entry->h.driver_flags = (uint8_t)
22349 (entry->h.driver_flags | SKIPPED_FLAG);
22350 return (0);
22351 }
22352 entry->end_addr = end;
22353 entry->start_addr = start;
22354
22355 if ((rval = ql_2700_dump_ram(ha, MBC_DUMP_RAM_EXTENDED,
22356 start, esize / 4, dbuff)) != QL_SUCCESS) {
22357 EL(ha, "dump_ram failed, rval=%xh, addr=%xh, len=%xh, "
22358 "esize=0\n", rval, start, esize / 4);
22359 return (0);
22360 }
22361
22362 QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22363 return (esize);
22364 }
22365
22366 static int
22367 ql_2700_dt_gque(ql_adapter_state_t *ha, ql_dt_gque_t *entry,
22368 uint8_t *dbuff, uint8_t *dbuff_end)
22369 {
22370 int esize;
22371 uint32_t cnt, q_cnt, e_cnt, i;
22372 uint8_t *bp = dbuff, *dp;
22373
22374 QL_PRINT_7(ha, "started, buf=%ph, num_queues=%xh, queue_type=%xh\n",
22375 (void *)dbuff, entry->num_queues, entry->queue_type);
22376
22377 if (entry->queue_type == 1) {
22378 ql_request_q_t *req_q;
22379
22380 e_cnt = ha->rsp_queues_cnt > 1 ? 2 : 1;
22381 esize = e_cnt * 2; /* queue number */
22382 esize += e_cnt * 2; /* queue entries */
22383
22384 /* queue size */
22385 esize += ha->req_q[0]->req_entry_cnt * REQUEST_ENTRY_SIZE;
22386 if (e_cnt > 1) {
22387 esize += ha->req_q[1]->req_entry_cnt *
22388 REQUEST_ENTRY_SIZE;
22389 }
22390
22391 if (dbuff == NULL) {
22392 QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22393 return (esize);
22394 }
22395 if (esize + dbuff >= dbuff_end) {
22396 EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22397 entry->h.driver_flags = (uint8_t)
22398 (entry->h.driver_flags | SKIPPED_FLAG);
22399 return (0);
22400 }
22401 entry->num_queues = e_cnt;
22402
22403 for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
22404 req_q = q_cnt == 0 ? ha->req_q[0] : ha->req_q[1];
22405 e_cnt = req_q->req_entry_cnt;
22406 dp = req_q->req_ring.bp;
22407 *bp++ = LSB(q_cnt);
22408 *bp++ = MSB(q_cnt);
22409 *bp++ = LSB(e_cnt);
22410 *bp++ = MSB(e_cnt);
22411 for (cnt = 0; cnt < e_cnt; cnt++) {
22412 for (i = 0; i < REQUEST_ENTRY_SIZE; i++) {
22413 *bp++ = *dp++;
22414 }
22415 }
22416 }
22417 } else if (entry->queue_type == 2) {
22418 ql_response_q_t *rsp_q;
22419
22420 e_cnt = ha->rsp_queues_cnt;
22421 esize = e_cnt * 2; /* queue number */
22422 esize += e_cnt * 2; /* queue entries */
22423
22424 /* queue size */
22425 for (q_cnt = 0; q_cnt < ha->rsp_queues_cnt; q_cnt++) {
22426 rsp_q = ha->rsp_queues[q_cnt];
22427 esize += rsp_q->rsp_entry_cnt * RESPONSE_ENTRY_SIZE;
22428 }
22429
22430 if (dbuff == NULL) {
22431 QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22432 return (esize);
22433 }
22434 if (esize + dbuff >= dbuff_end) {
22435 EL(ha, "skipped2, no buffer space, needed=%xh\n",
22436 esize);
22437 entry->h.driver_flags = (uint8_t)
22438 (entry->h.driver_flags | SKIPPED_FLAG);
22439 return (0);
22440 }
22441 entry->num_queues = e_cnt;
22442
22443 for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
22444 rsp_q = ha->rsp_queues[q_cnt];
22445 e_cnt = rsp_q->rsp_entry_cnt;
22446 dp = rsp_q->rsp_ring.bp;
22447 *bp++ = LSB(q_cnt);
22448 *bp++ = MSB(q_cnt);
22449 *bp++ = LSB(e_cnt);
22450 *bp++ = MSB(e_cnt);
22451 for (cnt = 0; cnt < e_cnt; cnt++) {
22452 for (i = 0; i < RESPONSE_ENTRY_SIZE; i++) {
22453 *bp++ = *dp++;
22454 }
22455 }
22456 }
22457 } else if (entry->queue_type == 3) {
22458 QL_PRINT_7(ha, "skipped, no ATIO queue, esize=0\n");
22459 if (dbuff != NULL) {
22460 entry->num_queues = 0;
22461 entry->h.driver_flags = (uint8_t)
22462 (entry->h.driver_flags | SKIPPED_FLAG);
22463 }
22464 return (0);
22465 } else {
22466 EL(ha, "skipped, unknown queue_type %d, esize=0\n",
22467 entry->queue_type);
22468 if (dbuff != NULL) {
22469 entry->h.driver_flags = (uint8_t)
22470 (entry->h.driver_flags | SKIPPED_FLAG);
22471 }
22472 return (0);
22473 }
22474
22475 QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22476 return (esize);
22477 }
22478
22479 /*ARGSUSED*/
22480 static int
22481 ql_2700_dt_gfce(ql_adapter_state_t *ha, ql_dt_gfce_t *entry,
22482 uint8_t *dbuff, uint8_t *dbuff_end)
22483 {
22484 QL_PRINT_7(ha, "started\n");
22485
22486 QL_PRINT_7(ha, "skipped, not supported, esize=0\n");
22487 if (dbuff != NULL) {
22488 entry->h.driver_flags = (uint8_t)
22489 (entry->h.driver_flags | SKIPPED_FLAG);
22490 }
22491
22492 return (0);
22493 }
22494
22495 static void
22496 ql_2700_dt_prisc(ql_adapter_state_t *ha, ql_dt_prisc_t *entry,
22497 uint8_t *dbuff, uint8_t *dbuff_end)
22498 {
22499 clock_t timer;
22500
22501 QL_PRINT_7(ha, "started\n");
22502
22503 if (dbuff == NULL) {
22504 QL_PRINT_7(ha, "null buf done\n");
22505 return;
22506 }
22507 if (dbuff >= dbuff_end) {
22508 EL(ha, "skipped, no buffer space, needed=0\n");
22509 entry->h.driver_flags = (uint8_t)
22510 (entry->h.driver_flags | SKIPPED_FLAG);
22511 return;
22512 }
22513
22514 /* Pause RISC. */
22515 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
22516 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
22517 for (timer = 30000;
22518 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0;
22519 timer--) {
22520 if (timer) {
22521 drv_usecwait(100);
22522 if (timer % 10000 == 0) {
22523 EL(ha, "risc pause %d\n", timer);
22524 }
22525 } else {
22526 EL(ha, "risc pause timeout\n");
22527 break;
22528 }
22529 }
22530 }
22531
22532 QL_PRINT_7(ha, "done\n");
22533 }
22534
22535 static void
22536 ql_2700_dt_rrisc(ql_adapter_state_t *ha, ql_dt_rrisc_t *entry,
22537 uint8_t *dbuff, uint8_t *dbuff_end)
22538 {
22539 clock_t timer;
22540
22541 QL_PRINT_7(ha, "started\n");
22542
22543 if (dbuff == NULL) {
22544 QL_PRINT_7(ha, "null buf done\n");
22545 return;
22546 }
22547 if (dbuff >= dbuff_end) {
22548 EL(ha, "skipped, no buffer space, needed=0\n");
22549 entry->h.driver_flags = (uint8_t)
22550 (entry->h.driver_flags | SKIPPED_FLAG);
22551 return;
22552 }
22553
22554 /* Shutdown DMA. */
22555 WRT32_IO_REG(ha, ctrl_status, DMA_SHUTDOWN);
22556
22557 /* Wait for DMA to stop. */
22558 for (timer = 0; timer < 30000; timer++) {
22559 if (!(RD32_IO_REG(ha, ctrl_status) & DMA_ACTIVE)) {
22560 break;
22561 }
22562 drv_usecwait(100);
22563 }
22564
22565 /* Reset the chip. */
22566 WRT32_IO_REG(ha, ctrl_status, ISP_RESET);
22567 drv_usecwait(200);
22568
22569 /* Wait for RISC to recover from reset. */
22570 for (timer = 30000; timer; timer--) {
22571 ha->rom_status = RD16_IO_REG(ha, mailbox_out[0]);
22572 if ((ha->rom_status & MBS_ROM_STATUS_MASK) != MBS_ROM_BUSY) {
22573 break;
22574 }
22575 drv_usecwait(100);
22576 }
22577
22578 /* Wait for reset to finish. */
22579 for (timer = 30000; timer; timer--) {
22580 if (!(RD32_IO_REG(ha, ctrl_status) & ISP_RESET)) {
22581 break;
22582 }
22583 drv_usecwait(100);
22584 }
22585
22586 ADAPTER_STATE_LOCK(ha);
22587 ha->flags &= ~FIRMWARE_UP;
22588 ADAPTER_STATE_UNLOCK(ha);
22589
22590 QL_PRINT_7(ha, "done\n");
22591 }
22592
22593 static void
22594 ql_2700_dt_dint(ql_adapter_state_t *ha, ql_dt_dint_t *entry,
22595 uint8_t *dbuff, uint8_t *dbuff_end)
22596 {
22597 QL_PRINT_7(ha, "started, pci_offset=%xh, data=%xh\n",
22598 entry->pci_offset, entry->data);
22599
22600 if (dbuff == NULL) {
22601 QL_PRINT_7(ha, "null buf done\n");
22602 return;
22603 }
22604 if (dbuff >= dbuff_end) {
22605 EL(ha, "skipped, no buffer space, needed=0\n");
22606 entry->h.driver_flags = (uint8_t)
22607 (entry->h.driver_flags | SKIPPED_FLAG);
22608 return;
22609 }
22610
22611 ql_pci_config_put32(ha, entry->pci_offset, entry->data);
22612
22613 QL_PRINT_7(ha, "done\n");
22614 }
22615
22616 /*ARGSUSED*/
22617 static int
22618 ql_2700_dt_ghbd(ql_adapter_state_t *ha, ql_dt_ghbd_t *entry,
22619 uint8_t *dbuff, uint8_t *dbuff_end)
22620 {
22621 QL_PRINT_7(ha, "started\n");
22622
22623 QL_PRINT_7(ha, "skipped, not supported\n");
22624 if (dbuff != NULL) {
22625 entry->h.driver_flags = (uint8_t)
22626 (entry->h.driver_flags | SKIPPED_FLAG);
22627 }
22628
22629 return (0);
22630 }
22631
22632 /*ARGSUSED*/
22633 static int
22634 ql_2700_dt_scra(ql_adapter_state_t *ha, ql_dt_scra_t *entry,
22635 uint8_t *dbuff, uint8_t *dbuff_end)
22636 {
22637 QL_PRINT_7(ha, "started\n");
22638
22639 QL_PRINT_7(ha, "skipped, not supported, esize=0\n");
22640 if (dbuff != NULL) {
22641 entry->h.driver_flags = (uint8_t)
22642 (entry->h.driver_flags | SKIPPED_FLAG);
22643 }
22644
22645 return (0);
22646 }
22647
22648 static int
22649 ql_2700_dt_rrreg(ql_adapter_state_t *ha, ql_dt_rrreg_t *entry,
22650 uint8_t *dbuff, uint8_t *dbuff_end)
22651 {
22652 int esize;
22653 uint32_t i;
22654 uint8_t *bp = dbuff;
22655 uint8_t *reg = (uint8_t *)ha->iobase + 0xc4;
22656 uint32_t addr = entry->addr;
22657 uint32_t cnt = entry->count;
22658
22659 QL_PRINT_7(ha, "started, buf=%ph, addr=%xh, count=%xh\n",
22660 (void *)dbuff, entry->addr, entry->count);
22661
22662 esize = cnt * 4; /* addr */
22663 esize += cnt * 4; /* data */
22664
22665 if (dbuff == NULL) {
22666 QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22667 return (esize);
22668 }
22669 if (esize + dbuff >= dbuff_end) {
22670 EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22671 entry->h.driver_flags = (uint8_t)
22672 (entry->h.driver_flags | SKIPPED_FLAG);
22673 return (0);
22674 }
22675
22676 WRT32_IO_REG(ha, io_base_addr, 0x40);
22677 while (cnt--) {
22678 WRT_REG_DWORD(ha, ha->iobase + 0xc0, addr | 0x80000000);
22679 *bp++ = LSB(LSW(addr));
22680 *bp++ = MSB(LSW(addr));
22681 *bp++ = LSB(MSW(addr));
22682 *bp++ = MSB(MSW(addr));
22683 for (i = 0; i < 4; i++) {
22684 *bp++ = RD_REG_BYTE(ha, reg + i);
22685 }
22686 addr += 4;
22687 }
22688
22689 QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22690 return (esize);
22691 }
22692
22693 static void
22694 ql_2700_dt_wrreg(ql_adapter_state_t *ha, ql_dt_wrreg_t *entry,
22695 uint8_t *dbuff, uint8_t *dbuff_end)
22696 {
22697 QL_PRINT_7(ha, "started, addr=%xh, data=%xh\n", entry->addr,
22698 entry->data);
22699
22700 if (dbuff == NULL) {
22701 QL_PRINT_7(ha, "null buf done\n");
22702 return;
22703 }
22704 if (dbuff >= dbuff_end) {
22705 EL(ha, "skipped, no buffer space, needed=0\n");
22706 entry->h.driver_flags = (uint8_t)
22707 (entry->h.driver_flags | SKIPPED_FLAG);
22708 return;
22709 }
22710
22711 WRT32_IO_REG(ha, io_base_addr, 0x40);
22712 WRT_REG_DWORD(ha, ha->iobase + 0xc4, entry->data);
22713 WRT_REG_DWORD(ha, ha->iobase + 0xc0, entry->addr);
22714
22715 QL_PRINT_7(ha, "done\n");
22716 }
22717
22718 static int
22719 ql_2700_dt_rrram(ql_adapter_state_t *ha, ql_dt_rrram_t *entry,
22720 uint8_t *dbuff, uint8_t *dbuff_end)
22721 {
22722 int rval, esize;
22723
22724 QL_PRINT_7(ha, "started, buf=%ph, addr=%xh, count=%xh\n",
22725 (void *)dbuff, entry->addr, entry->count);
22726
22727 esize = entry->count * 4; /* data */
22728
22729 if (dbuff == NULL) {
22730 QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22731 return (esize);
22732 }
22733 if (esize + dbuff >= dbuff_end) {
22734 EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22735 entry->h.driver_flags = (uint8_t)
22736 (entry->h.driver_flags | SKIPPED_FLAG);
22737 return (0);
22738 }
22739
22740 if ((rval = ql_2700_dump_ram(ha, MBC_MPI_RAM, entry->addr,
22741 entry->count, dbuff)) != QL_SUCCESS) {
22742 EL(ha, "dump_ram failed, rval=%xh, addr=%xh, len=%xh, "
22743 "esize=0\n", rval, entry->addr, entry->count);
22744 return (0);
22745 }
22746
22747 QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22748 return (esize);
22749 }
22750
22751 static int
22752 ql_2700_dt_rpcic(ql_adapter_state_t *ha, ql_dt_rpcic_t *entry,
22753 uint8_t *dbuff, uint8_t *dbuff_end)
22754 {
22755 int esize;
22756 uint32_t i;
22757 uint8_t *bp = dbuff;
22758 uint32_t addr = entry->addr;
22759 uint32_t cnt = entry->count;
22760
22761 QL_PRINT_7(ha, "started, buf=%ph, addr=%xh, count=%xh\n",
22762 (void *)dbuff, entry->addr, entry->count);
22763
22764 esize = cnt * 4; /* addr */
22765 esize += cnt * 4; /* data */
22766
22767 if (dbuff == NULL) {
22768 QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22769 return (esize);
22770 }
22771 if (esize + dbuff >= dbuff_end) {
22772 EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22773 entry->h.driver_flags = (uint8_t)
22774 (entry->h.driver_flags | SKIPPED_FLAG);
22775 return (0);
22776 }
22777
22778 while (cnt--) {
22779 *bp++ = LSB(LSW(addr));
22780 *bp++ = MSB(LSW(addr));
22781 *bp++ = LSB(MSW(addr));
22782 *bp++ = MSB(MSW(addr));
22783 for (i = 0; i < 4; i++) {
22784 *bp++ = ql_pci_config_get8(ha, addr++);
22785 }
22786 }
22787
22788 QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22789 return (esize);
22790 }
22791
22792 static int
22793 ql_2700_dt_gques(ql_adapter_state_t *ha, ql_dt_gques_t *entry,
22794 uint8_t *dbuff, uint8_t *dbuff_end)
22795 {
22796 int esize;
22797 uint32_t q_cnt, e_cnt, data;
22798 uint8_t *bp = dbuff;
22799
22800 QL_PRINT_7(ha, "started, buf=%ph, num_queues=%xh, queue_type=%xh\n",
22801 (void *)dbuff, entry->num_queues, entry->queue_type);
22802
22803 if (entry->queue_type == 1) {
22804 ql_request_q_t *req_q;
22805
22806 e_cnt = ha->rsp_queues_cnt > 1 ? 2 : 1;
22807 esize = e_cnt * 2; /* queue number */
22808 esize += e_cnt * 2; /* shadow entries */
22809
22810 /* shadow size */
22811 esize += SHADOW_ENTRY_SIZE;
22812 if (e_cnt > 1) {
22813 esize += SHADOW_ENTRY_SIZE;
22814 }
22815 if (dbuff == NULL) {
22816 QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22817 return (esize);
22818 }
22819 if (esize + dbuff >= dbuff_end) {
22820 EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22821 entry->h.driver_flags = (uint8_t)
22822 (entry->h.driver_flags | SKIPPED_FLAG);
22823 return (0);
22824 }
22825 entry->num_queues = e_cnt;
22826
22827 for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
22828 req_q = q_cnt == 0 ? ha->req_q[0] : ha->req_q[1];
22829 e_cnt = 1;
22830 data = ddi_get32(req_q->req_ring.acc_handle,
22831 req_q->req_out_shadow_ptr);
22832 *bp++ = LSB(q_cnt);
22833 *bp++ = MSB(q_cnt);
22834 *bp++ = LSB(e_cnt);
22835 *bp++ = MSB(e_cnt);
22836 *bp++ = LSB(LSW(data));
22837 *bp++ = MSB(LSW(data));
22838 *bp++ = LSB(MSW(data));
22839 *bp++ = MSB(MSW(data));
22840 }
22841 } else if (entry->queue_type == 2) {
22842 ql_response_q_t *rsp_q;
22843
22844 e_cnt = ha->rsp_queues_cnt;
22845 esize = e_cnt * 2; /* queue number */
22846 esize += e_cnt * 2; /* shadow entries */
22847
22848 /* shadow size */
22849 for (q_cnt = 0; q_cnt < ha->rsp_queues_cnt; q_cnt++) {
22850 esize += SHADOW_ENTRY_SIZE;
22851 }
22852
22853 if (dbuff == NULL) {
22854 QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22855 return (esize);
22856 }
22857 if (esize + dbuff >= dbuff_end) {
22858 EL(ha, "skipped2, no buffer space, needed=%xh\n",
22859 esize);
22860 entry->h.driver_flags = (uint8_t)
22861 (entry->h.driver_flags | SKIPPED_FLAG);
22862 return (0);
22863 }
22864 entry->num_queues = e_cnt;
22865
22866 for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
22867 rsp_q = ha->rsp_queues[q_cnt];
22868 e_cnt = 1;
22869 data = ddi_get32(rsp_q->rsp_ring.acc_handle,
22870 rsp_q->rsp_in_shadow_ptr);
22871 *bp++ = LSB(q_cnt);
22872 *bp++ = MSB(q_cnt);
22873 *bp++ = LSB(e_cnt);
22874 *bp++ = MSB(e_cnt);
22875 *bp++ = LSB(LSW(data));
22876 *bp++ = MSB(LSW(data));
22877 *bp++ = LSB(MSW(data));
22878 *bp++ = MSB(MSW(data));
22879 }
22880 } else if (entry->queue_type == 3) {
22881 EL(ha, "skipped, no ATIO queue, esize=0\n");
22882 if (dbuff != NULL) {
22883 entry->num_queues = 0;
22884 entry->h.driver_flags = (uint8_t)
22885 (entry->h.driver_flags | SKIPPED_FLAG);
22886 }
22887 return (0);
22888 } else {
22889 EL(ha, "skipped, unknown queue_type %d, esize=0\n",
22890 entry->queue_type);
22891 if (dbuff != NULL) {
22892 entry->h.driver_flags = (uint8_t)
22893 (entry->h.driver_flags | SKIPPED_FLAG);
22894 }
22895 return (0);
22896 }
22897
22898 QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22899 return (esize);
22900 }
22901
22902 static int
22903 ql_2700_dt_wdmp(ql_adapter_state_t *ha, ql_dt_wdmp_t *entry,
22904 uint8_t *dbuff, uint8_t *dbuff_end)
22905 {
22906 int esize;
22907 uint8_t *bp = dbuff;
22908 uint32_t data, cnt = entry->length, *dp = entry->data;
22909
22910 QL_PRINT_7(ha, "started, buf=%ph, length=%xh\n",
22911 (void *)dbuff, entry->length);
22912
22913 esize = cnt;
22914 if (dbuff == NULL) {
22915 QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22916 return (esize);
22917 }
22918 if (esize + dbuff >= dbuff_end) {
22919 EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22920 entry->h.driver_flags = (uint8_t)
22921 (entry->h.driver_flags | SKIPPED_FLAG);
22922 return (0);
22923 }
22924
22925 while (cnt--) {
22926 data = *dp++;
22927 *bp++ = LSB(LSW(data));
22928 *bp++ = MSB(LSW(data));
22929 *bp++ = LSB(MSW(data));
22930 *bp++ = MSB(MSW(data));
22931 }
22932 QL_PRINT_7(ha, "%s\n", dbuff);
22933
22934 QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22935 return (esize);
22936 }
22937
22938 /*
22939 * ql_2700_dump_ram
22940 * Dumps RAM.
22941 * Risc interrupts must be disabled when this routine is called.
22942 *
22943 * Input:
22944 * ha: adapter state pointer.
22945 * cmd: MBC_DUMP_RAM_EXTENDED/MBC_MPI_RAM.
22946 * risc_address: RISC code start address.
22947 * len: Number of words.
22948 * bp: buffer pointer.
22949 *
22950 * Returns:
22951 * ql local function return status code.
22952 *
22953 * Context:
22954 * Interrupt or Kernel context, no mailbox commands allowed.
22955 */
22956 static int
22957 ql_2700_dump_ram(ql_adapter_state_t *ha, uint16_t cmd, uint32_t risc_address,
22958 uint32_t len, uint8_t *bp)
22959 {
22960 dma_mem_t mem;
22961 uint32_t i, stat, timer;
22962 uint8_t *dp;
22963 int rval = QL_SUCCESS;
22964
22965 QL_PRINT_7(ha, "started, cmd=%xh, risc_address=%xh, len=%xh, "
22966 "bp=%ph\n", cmd, risc_address, len, (void *)bp);
22967
22968 mem.size = len * 4;
22969 mem.type = LITTLE_ENDIAN_DMA;
22970 mem.max_cookie_count = 1;
22971 mem.alignment = 8;
22972 if ((rval = ql_alloc_phys(ha, &mem, KM_SLEEP)) != QL_SUCCESS) {
22973 EL(ha, "alloc status=%xh\n", rval);
22974 return (rval);
22975 }
22976
22977 WRT16_IO_REG(ha, mailbox_in[0], cmd);
22978 WRT16_IO_REG(ha, mailbox_in[1], LSW(risc_address));
22979 WRT16_IO_REG(ha, mailbox_in[2], MSW(LSD(mem.cookie.dmac_laddress)));
22980 WRT16_IO_REG(ha, mailbox_in[3], LSW(LSD(mem.cookie.dmac_laddress)));
22981 WRT16_IO_REG(ha, mailbox_in[4], MSW(len));
22982 WRT16_IO_REG(ha, mailbox_in[5], LSW(len));
22983 WRT16_IO_REG(ha, mailbox_in[6], MSW(MSD(mem.cookie.dmac_laddress)));
22984 WRT16_IO_REG(ha, mailbox_in[7], LSW(MSD(mem.cookie.dmac_laddress)));
22985 WRT16_IO_REG(ha, mailbox_in[8], MSW(risc_address));
22986 if (cmd == MBC_MPI_RAM) {
22987 WRT16_IO_REG(ha, mailbox_in[9], BIT_0);
22988 }
22989
22990 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
22991 for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
22992 stat = RD32_IO_REG(ha, risc2host);
22993 if (stat & RH_RISC_INT) {
22994 stat &= 0xff;
22995 if ((stat == 1) || (stat == 0x10)) {
22996 break;
22997 } else if ((stat == 2) || (stat == 0x11)) {
22998 rval = RD16_IO_REG(ha, mailbox_out[0]);
22999 break;
23000 }
23001 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
23002 }
23003 drv_usecwait(5);
23004 }
23005 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
23006
23007 if (timer == 0) {
23008 QL_PRINT_7(ha, "timeout addr=%xh\n", risc_address);
23009 rval = QL_FUNCTION_TIMEOUT;
23010 } else {
23011 (void) ddi_dma_sync(mem.dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
23012 dp = mem.bp;
23013 for (i = 0; i < mem.size; i++) {
23014 *bp++ = *dp++;
23015 }
23016 }
23017
23018 ql_free_phys(ha, &mem);
23019
23020 QL_PRINT_7(ha, "done\n");
23021 return (rval);
23022 }