Print this page
NEX-5733 cleanup qlt/qlc
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Alek Pinchuk <alek.pinchuk@nexenta.com>
NEX-5717 import QLogic 16G FC drivers
Reviewed by: Steve Peng <steve.peng@nexenta.com>
Reviewed by: Josef 'Jeff' Sipek <josef.sipek@nexenta.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
NEX-4957 Panic in qlc with QLE2460 in PCI passthrough mode on ESXi
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
Reviewed by: Gordon Ross <gordon.ross@nexenta.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_api.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_api.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 -/* Copyright 2010 QLogic Corporation */
22 +/* Copyright 2015 QLogic Corporation */
23 23
24 24 /*
25 - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
25 + * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
26 26 */
27 27 /*
28 28 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
29 29 * Copyright (c) 2016 by Delphix. All rights reserved.
30 30 */
31 31
32 -#pragma ident "Copyright 2010 QLogic Corporation; ql_api.c"
32 +#pragma ident "Copyright 2015 QLogic Corporation; ql_api.c"
33 33
34 34 /*
35 35 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
36 36 *
37 37 * ***********************************************************************
38 38 * * **
39 39 * * NOTICE **
40 - * * COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION **
40 + * * COPYRIGHT (C) 1996-2015 QLOGIC CORPORATION **
41 41 * * ALL RIGHTS RESERVED **
42 42 * * **
43 43 * ***********************************************************************
44 44 *
45 45 */
46 46
47 47 #include <ql_apps.h>
48 48 #include <ql_api.h>
49 49 #include <ql_debug.h>
50 50 #include <ql_init.h>
51 51 #include <ql_iocb.h>
52 52 #include <ql_ioctl.h>
53 53 #include <ql_isr.h>
54 54 #include <ql_mbx.h>
55 55 #include <ql_nx.h>
56 56 #include <ql_xioctl.h>
57 +#include <ql_fm.h>
57 58
58 59 /*
59 60 * Solaris external defines.
60 61 */
61 62 extern pri_t minclsyspri;
62 63 extern pri_t maxclsyspri;
63 64
64 65 /*
65 66 * dev_ops functions prototypes
66 67 */
67 68 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
68 69 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
69 70 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
70 71 static int ql_power(dev_info_t *, int, int);
71 72 static int ql_quiesce(dev_info_t *);
72 73
73 74 /*
74 75 * FCA functions prototypes exported by means of the transport table
75 76 */
76 77 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
77 78 fc_fca_bind_info_t *);
78 79 static void ql_unbind_port(opaque_t);
79 80 static int ql_init_pkt(opaque_t, fc_packet_t *, int);
80 81 static int ql_un_init_pkt(opaque_t, fc_packet_t *);
81 82 static int ql_els_send(opaque_t, fc_packet_t *);
82 83 static int ql_get_cap(opaque_t, char *, void *);
83 84 static int ql_set_cap(opaque_t, char *, void *);
84 85 static int ql_getmap(opaque_t, fc_lilpmap_t *);
85 86 static int ql_transport(opaque_t, fc_packet_t *);
86 87 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
|
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
87 88 static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
88 89 static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
89 90 static int ql_abort(opaque_t, fc_packet_t *, int);
90 91 static int ql_reset(opaque_t, uint32_t);
91 92 static int ql_port_manage(opaque_t, fc_fca_pm_t *);
92 93 static opaque_t ql_get_device(opaque_t, fc_portid_t);
93 94
94 95 /*
95 96 * FCA Driver Support Function Prototypes.
96 97 */
97 -static uint16_t ql_wait_outstanding(ql_adapter_state_t *);
98 -static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
99 - ql_srb_t *);
100 -static void ql_task_daemon(void *);
101 -static void ql_task_thread(ql_adapter_state_t *);
102 -static void ql_unsol_callback(ql_srb_t *);
103 -static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
104 - fc_unsol_buf_t *);
105 -static void ql_timer(void *);
106 -static void ql_watchdog(ql_adapter_state_t *, uint32_t *, uint32_t *);
107 -static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *,
108 - uint32_t *, uint32_t *);
109 -static void ql_halt(ql_adapter_state_t *, int);
98 +static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
110 99 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
100 +static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *);
111 101 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
112 102 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
113 103 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
114 104 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
115 105 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
116 106 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
117 107 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
118 108 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
119 109 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
120 110 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
121 111 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
122 112 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
123 -static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
124 113 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
114 +static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
115 +static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
125 116 static int ql_login_port(ql_adapter_state_t *, port_id_t);
126 117 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
127 118 static int ql_logout_port(ql_adapter_state_t *, port_id_t);
128 -static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
119 +static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint64_t);
129 120 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
121 +static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
122 + ql_srb_t *);
130 123 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
131 124 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
132 -static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
133 125 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
134 126 ql_srb_t *);
135 -static int ql_kstat_update(kstat_t *, int);
136 -static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
137 -static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
138 -static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
139 -static void ql_rst_aen(ql_adapter_state_t *);
140 -static void ql_restart_queues(ql_adapter_state_t *);
141 -static void ql_abort_queues(ql_adapter_state_t *);
142 -static void ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq);
127 +static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
128 +static void ql_task_daemon(void *);
129 +static void ql_task_thread(ql_adapter_state_t *);
143 130 static void ql_idle_check(ql_adapter_state_t *);
144 -static int ql_loop_resync(ql_adapter_state_t *);
145 -static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
146 -static size_t ql_2581_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
147 -static int ql_save_config_regs(dev_info_t *);
148 -static int ql_restore_config_regs(dev_info_t *);
149 -static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
150 -static int ql_handle_rscn_update(ql_adapter_state_t *);
131 +static void ql_unsol_callback(ql_srb_t *);
132 +static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
151 133 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
134 +static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
135 +static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
152 136 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
153 -static int ql_dump_firmware(ql_adapter_state_t *);
154 -static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
137 +static int ql_handle_rscn_update(ql_adapter_state_t *);
138 +static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
139 + fc_unsol_buf_t *);
140 +static void ql_timer(void *);
141 +static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
142 +static void ql_watchdog(ql_adapter_state_t *);
143 +static void ql_wdg_tq_list(ql_adapter_state_t *, ql_tgt_t *);
144 +static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *);
145 +static uint16_t ql_wait_outstanding(ql_adapter_state_t *);
146 +static void ql_iidma(ql_adapter_state_t *);
147 +static void ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq);
148 +static void ql_loop_resync(ql_adapter_state_t *);
149 +static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
150 +static int ql_kstat_update(kstat_t *, int);
151 +static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
152 +static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
153 +static size_t ql_25xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
154 +static size_t ql_81xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
155 +static size_t ql_8021_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
155 156 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
156 157 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
157 158 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
158 159 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
159 160 static int ql_81xx_binary_fw_dump(ql_adapter_state_t *, ql_81xx_fw_dump_t *);
160 161 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
161 162 void *);
162 163 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
163 164 uint8_t);
164 -static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
165 -static int ql_suspend_adapter(ql_adapter_state_t *);
166 -static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
167 -static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
168 -int ql_alloc_dma_resouce(ql_adapter_state_t *, dma_mem_t *, int);
165 +static int ql_save_config_regs(dev_info_t *);
166 +static int ql_restore_config_regs(dev_info_t *);
167 +static void ql_halt(ql_adapter_state_t *, int);
169 168 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
170 169 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
171 -static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
170 +static int ql_suspend_adapter(ql_adapter_state_t *);
171 +static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
172 172 static int ql_setup_interrupts(ql_adapter_state_t *);
173 173 static int ql_setup_msi(ql_adapter_state_t *);
174 174 static int ql_setup_msix(ql_adapter_state_t *);
175 175 static int ql_setup_fixed(ql_adapter_state_t *);
176 176 static void ql_release_intr(ql_adapter_state_t *);
177 -static void ql_disable_intr(ql_adapter_state_t *);
178 177 static int ql_legacy_intr(ql_adapter_state_t *);
179 178 static int ql_init_mutex(ql_adapter_state_t *);
180 179 static void ql_destroy_mutex(ql_adapter_state_t *);
181 -static void ql_iidma(ql_adapter_state_t *);
182 -
183 -static int ql_n_port_plogi(ql_adapter_state_t *);
184 -static void ql_fca_isp_els_request(ql_adapter_state_t *, fc_packet_t *,
185 - els_descriptor_t *);
180 +static void ql_fca_isp_els_request(ql_adapter_state_t *, ql_request_q_t *,
181 + fc_packet_t *, els_descriptor_t *);
186 182 static void ql_isp_els_request_ctor(els_descriptor_t *,
187 183 els_passthru_entry_t *);
188 -static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *);
189 -static int ql_wait_for_td_stop(ql_adapter_state_t *);
190 -static void ql_process_idc_event(ql_adapter_state_t *);
184 +static int ql_n_port_plogi(ql_adapter_state_t *);
185 +static int ql_create_queues(ql_adapter_state_t *);
186 +static int ql_create_rsp_queue(ql_adapter_state_t *, uint16_t);
187 +static void ql_delete_queues(ql_adapter_state_t *);
188 +static int ql_multi_queue_support(ql_adapter_state_t *);
189 +static int ql_map_mem_bar(ql_adapter_state_t *, ddi_acc_handle_t *, caddr_t *,
190 + uint32_t, uint32_t);
191 +static void ql_completion_thread(void *);
192 +static void ql_process_comp_queue(void *);
193 +static int ql_abort_io(ql_adapter_state_t *vha, ql_srb_t *);
194 +static void ql_idc(ql_adapter_state_t *);
195 +static int ql_83xx_binary_fw_dump(ql_adapter_state_t *, ql_83xx_fw_dump_t *);
196 +static size_t ql_83xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
197 +static caddr_t ql_str_ptr(ql_adapter_state_t *, caddr_t, uint32_t *);
198 +static int ql_27xx_binary_fw_dump(ql_adapter_state_t *);
199 +static size_t ql_27xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
200 +static uint32_t ql_2700_dmp_parse_template(ql_adapter_state_t *, ql_dt_hdr_t *,
201 + uint8_t *, uint32_t);
202 +static int ql_2700_dt_riob1(ql_adapter_state_t *, ql_dt_riob1_t *, uint8_t *,
203 + uint8_t *);
204 +static void ql_2700_dt_wiob1(ql_adapter_state_t *, ql_dt_wiob1_t *, uint8_t *,
205 + uint8_t *);
206 +static int ql_2700_dt_riob2(ql_adapter_state_t *, ql_dt_riob2_t *, uint8_t *,
207 + uint8_t *);
208 +static void ql_2700_dt_wiob2(ql_adapter_state_t *, ql_dt_wiob2_t *, uint8_t *,
209 + uint8_t *);
210 +static int ql_2700_dt_rpci(ql_adapter_state_t *, ql_dt_rpci_t *, uint8_t *,
211 + uint8_t *);
212 +static void ql_2700_dt_wpci(ql_adapter_state_t *, ql_dt_wpci_t *, uint8_t *,
213 + uint8_t *);
214 +static int ql_2700_dt_rram(ql_adapter_state_t *, ql_dt_rram_t *, uint8_t *,
215 + uint8_t *);
216 +static int ql_2700_dt_gque(ql_adapter_state_t *, ql_dt_gque_t *, uint8_t *,
217 + uint8_t *);
218 +static int ql_2700_dt_gfce(ql_adapter_state_t *, ql_dt_gfce_t *, uint8_t *,
219 + uint8_t *);
220 +static void ql_2700_dt_prisc(ql_adapter_state_t *, ql_dt_prisc_t *, uint8_t *,
221 + uint8_t *);
222 +static void ql_2700_dt_rrisc(ql_adapter_state_t *, ql_dt_rrisc_t *, uint8_t *,
223 + uint8_t *);
224 +static void ql_2700_dt_dint(ql_adapter_state_t *, ql_dt_dint_t *, uint8_t *,
225 + uint8_t *);
226 +static int ql_2700_dt_ghbd(ql_adapter_state_t *, ql_dt_ghbd_t *, uint8_t *,
227 + uint8_t *);
228 +static int ql_2700_dt_scra(ql_adapter_state_t *, ql_dt_scra_t *, uint8_t *,
229 + uint8_t *);
230 +static int ql_2700_dt_rrreg(ql_adapter_state_t *, ql_dt_rrreg_t *, uint8_t *,
231 + uint8_t *);
232 +static void ql_2700_dt_wrreg(ql_adapter_state_t *, ql_dt_wrreg_t *, uint8_t *,
233 + uint8_t *);
234 +static int ql_2700_dt_rrram(ql_adapter_state_t *, ql_dt_rrram_t *, uint8_t *,
235 + uint8_t *);
236 +static int ql_2700_dt_rpcic(ql_adapter_state_t *, ql_dt_rpcic_t *, uint8_t *,
237 + uint8_t *);
238 +static int ql_2700_dt_gques(ql_adapter_state_t *, ql_dt_gques_t *, uint8_t *,
239 + uint8_t *);
240 +static int ql_2700_dt_wdmp(ql_adapter_state_t *, ql_dt_wdmp_t *, uint8_t *,
241 + uint8_t *);
242 +static int ql_2700_dump_ram(ql_adapter_state_t *, uint16_t, uint32_t, uint32_t,
243 + uint8_t *);
191 244
192 245 /*
193 246 * Global data
194 247 */
195 248 static uint8_t ql_enable_pm = 1;
196 249 static int ql_flash_sbus_fpga = 0;
197 250 uint32_t ql_os_release_level;
198 251 uint32_t ql_disable_aif = 0;
252 +uint32_t ql_disable_intx = 0;
199 253 uint32_t ql_disable_msi = 0;
200 254 uint32_t ql_disable_msix = 0;
201 255 uint32_t ql_enable_ets = 0;
202 256 uint16_t ql_osc_wait_count = 1000;
257 +uint32_t ql_task_cb_dly = 64;
258 +uint32_t qlc_disable_load = 0;
203 259
204 260 /* Timer routine variables. */
205 261 static timeout_id_t ql_timer_timeout_id = NULL;
206 262 static clock_t ql_timer_ticks;
207 263
208 264 /* Soft state head pointer. */
209 265 void *ql_state = NULL;
210 266
211 267 /* Head adapter link. */
212 268 ql_head_t ql_hba = {
213 269 NULL,
214 270 NULL
215 271 };
216 272
217 273 /* Global hba index */
218 274 uint32_t ql_gfru_hba_index = 1;
219 275
220 276 /*
221 277 * Some IP defines and globals
222 278 */
223 279 uint32_t ql_ip_buffer_count = 128;
224 280 uint32_t ql_ip_low_water = 10;
225 281 uint8_t ql_ip_fast_post_count = 5;
226 282 static int ql_ip_mtu = 65280; /* equivalent to FCIPMTU */
227 283
228 284 /* Device AL_PA to Device Head Queue index array. */
229 285 uint8_t ql_alpa_to_index[] = {
230 286 0x7e, 0x7d, 0x7c, 0x00, 0x7b, 0x01, 0x02, 0x03, 0x7a, 0x04,
231 287 0x05, 0x06, 0x07, 0x08, 0x09, 0x79, 0x78, 0x0a, 0x0b, 0x0c,
232 288 0x0d, 0x0e, 0x0f, 0x77, 0x76, 0x10, 0x11, 0x75, 0x12, 0x74,
233 289 0x73, 0x72, 0x13, 0x14, 0x15, 0x71, 0x16, 0x70, 0x6f, 0x6e,
234 290 0x17, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x18, 0x19, 0x67,
235 291 0x66, 0x65, 0x64, 0x63, 0x62, 0x20, 0x21, 0x61, 0x60, 0x23,
236 292 0x5f, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x5e, 0x2a, 0x5d,
237 293 0x5c, 0x5b, 0x2b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x2c,
238 294 0x2d, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x2e, 0x2f, 0x4e,
239 295 0x4d, 0x30, 0x4c, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x4b,
240 296 0x37, 0x4a, 0x49, 0x48, 0x38, 0x47, 0x46, 0x45, 0x44, 0x43,
241 297 0x42, 0x39, 0x3a, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b,
242 298 0x3c, 0x3b, 0x3a, 0x3d, 0x39, 0x3e, 0x3f, 0x40, 0x38, 0x37,
243 299 0x36, 0x41, 0x35, 0x42, 0x43, 0x44, 0x34, 0x45, 0x46, 0x47,
244 300 0x48, 0x49, 0x4a, 0x33, 0x32, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
245 301 0x50, 0x31, 0x30, 0x51, 0x52, 0x2f, 0x53, 0x2e, 0x2d, 0x2c,
246 302 0x54, 0x55, 0x56, 0x2b, 0x57, 0x2a, 0x29, 0x28, 0x58, 0x27,
247 303 0x26, 0x25, 0x24, 0x23, 0x22, 0x59, 0x5a, 0x21, 0x20, 0x1f,
248 304 0x1e, 0x1d, 0x1c, 0x5b, 0x5c, 0x1b, 0x1a, 0x5d, 0x19, 0x5e,
249 305 0x5f, 0x60, 0x61, 0x62, 0x63, 0x18, 0x64, 0x17, 0x16, 0x15,
250 306 0x65, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x66, 0x67, 0x0e,
251 307 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x68, 0x69, 0x08, 0x07, 0x6a,
252 308 0x06, 0x6b, 0x6c, 0x6d, 0x05, 0x04, 0x03, 0x6e, 0x02, 0x6f,
253 309 0x70, 0x71, 0x01, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x00,
254 310 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7f, 0x80, 0x00, 0x01,
255 311 0x02, 0x03, 0x80, 0x7f, 0x7e, 0x04
256 312 };
257 313
258 314 /* Device loop_id to ALPA array. */
259 315 static uint8_t ql_index_to_alpa[] = {
260 316 0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
261 317 0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
262 318 0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
263 319 0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
264 320 0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
265 321 0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
266 322 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
267 323 0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
268 324 0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
269 325 0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
270 326 0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
271 327 0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
272 328 0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
273 329 };
274 330
275 331 /* 2200 register offsets */
276 332 static reg_off_t reg_off_2200 = {
277 333 0x00, /* flash_address */
278 334 0x02, /* flash_data */
279 335 0x06, /* ctrl_status */
280 336 0x08, /* ictrl */
281 337 0x0a, /* istatus */
282 338 0x0c, /* semaphore */
283 339 0x0e, /* nvram */
284 340 0x18, /* req_in */
285 341 0x18, /* req_out */
286 342 0x1a, /* resp_in */
287 343 0x1a, /* resp_out */
288 344 0xff, /* risc2host - n/a */
289 345 24, /* Number of mailboxes */
290 346
291 347 /* Mailbox in register offsets 0 - 23 */
292 348 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
293 349 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
294 350 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
295 351 /* 2200 does not have mailbox 24-31 - n/a */
296 352 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
297 353
298 354 /* Mailbox out register offsets 0 - 23 */
299 355 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
300 356 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
301 357 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
302 358 /* 2200 does not have mailbox 24-31 - n/a */
303 359 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
304 360
305 361 0x96, /* fpm_diag_config */
306 362 0xa4, /* pcr */
307 363 0xb0, /* mctr */
308 364 0xb8, /* fb_cmd */
309 365 0xc0, /* hccr */
310 366 0xcc, /* gpiod */
311 367 0xce, /* gpioe */
312 368 0xff, /* host_to_host_sema - n/a */
313 369 0xff, /* pri_req_in - n/a */
314 370 0xff, /* pri_req_out - n/a */
315 371 0xff, /* atio_req_in - n/a */
316 372 0xff, /* atio_req_out - n/a */
317 373 0xff, /* io_base_addr - n/a */
318 374 0xff, /* nx_host_int - n/a */
319 375 0xff /* nx_risc_int - n/a */
320 376 };
321 377
322 378 /* 2300 register offsets */
323 379 static reg_off_t reg_off_2300 = {
324 380 0x00, /* flash_address */
325 381 0x02, /* flash_data */
326 382 0x06, /* ctrl_status */
327 383 0x08, /* ictrl */
328 384 0x0a, /* istatus */
329 385 0x0c, /* semaphore */
330 386 0x0e, /* nvram */
331 387 0x10, /* req_in */
332 388 0x12, /* req_out */
333 389 0x14, /* resp_in */
334 390 0x16, /* resp_out */
335 391 0x18, /* risc2host */
336 392 32, /* Number of mailboxes */
337 393
338 394 /* Mailbox in register offsets 0 - 31 */
339 395 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
340 396 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
341 397 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
342 398 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
343 399
344 400 /* Mailbox out register offsets 0 - 31 */
345 401 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
346 402 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
347 403 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
348 404 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
349 405
350 406 0x96, /* fpm_diag_config */
351 407 0xa4, /* pcr */
352 408 0xb0, /* mctr */
353 409 0x80, /* fb_cmd */
354 410 0xc0, /* hccr */
355 411 0xcc, /* gpiod */
356 412 0xce, /* gpioe */
357 413 0x1c, /* host_to_host_sema */
358 414 0xff, /* pri_req_in - n/a */
359 415 0xff, /* pri_req_out - n/a */
360 416 0xff, /* atio_req_in - n/a */
361 417 0xff, /* atio_req_out - n/a */
362 418 0xff, /* io_base_addr - n/a */
363 419 0xff, /* nx_host_int - n/a */
364 420 0xff /* nx_risc_int - n/a */
365 421 };
366 422
367 423 /* 2400/2500 register offsets */
368 424 reg_off_t reg_off_2400_2500 = {
369 425 0x00, /* flash_address */
370 426 0x04, /* flash_data */
371 427 0x08, /* ctrl_status */
372 428 0x0c, /* ictrl */
373 429 0x10, /* istatus */
374 430 0xff, /* semaphore - n/a */
375 431 0xff, /* nvram - n/a */
376 432 0x1c, /* req_in */
377 433 0x20, /* req_out */
378 434 0x24, /* resp_in */
379 435 0x28, /* resp_out */
380 436 0x44, /* risc2host */
381 437 32, /* Number of mailboxes */
382 438
383 439 /* Mailbox in register offsets 0 - 31 */
384 440 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
385 441 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
386 442 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
387 443 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
388 444
389 445 /* Mailbox out register offsets 0 - 31 */
390 446 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
391 447 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
392 448 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
393 449 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
394 450
395 451 0xff, /* fpm_diag_config - n/a */
396 452 0xff, /* pcr - n/a */
397 453 0xff, /* mctr - n/a */
398 454 0xff, /* fb_cmd - n/a */
399 455 0x48, /* hccr */
400 456 0x4c, /* gpiod */
401 457 0x50, /* gpioe */
402 458 0xff, /* host_to_host_sema - n/a */
403 459 0x2c, /* pri_req_in */
404 460 0x30, /* pri_req_out */
405 461 0x3c, /* atio_req_in */
406 462 0x40, /* atio_req_out */
407 463 0x54, /* io_base_addr */
408 464 0xff, /* nx_host_int - n/a */
409 465 0xff /* nx_risc_int - n/a */
410 466 };
411 467
412 468 /* P3 register offsets */
413 469 static reg_off_t reg_off_8021 = {
414 470 0x00, /* flash_address */
415 471 0x04, /* flash_data */
416 472 0x08, /* ctrl_status */
417 473 0x0c, /* ictrl */
418 474 0x10, /* istatus */
419 475 0xff, /* semaphore - n/a */
420 476 0xff, /* nvram - n/a */
421 477 0xff, /* req_in - n/a */
422 478 0x0, /* req_out */
423 479 0x100, /* resp_in */
424 480 0x200, /* resp_out */
425 481 0x500, /* risc2host */
426 482 32, /* Number of mailboxes */
427 483
428 484 /* Mailbox in register offsets 0 - 31 */
429 485 0x300, 0x302, 0x304, 0x306, 0x308, 0x30a, 0x30c, 0x30e,
430 486 0x310, 0x312, 0x314, 0x316, 0x318, 0x31a, 0x31c, 0x31e,
431 487 0x320, 0x322, 0x324, 0x326, 0x328, 0x32a, 0x32c, 0x32e,
432 488 0x330, 0x332, 0x334, 0x336, 0x338, 0x33a, 0x33c, 0x33e,
433 489
434 490 /* Mailbox out register offsets 0 - 31 */
435 491 0x400, 0x402, 0x404, 0x406, 0x408, 0x40a, 0x40c, 0x40e,
436 492 0x410, 0x412, 0x414, 0x416, 0x418, 0x41a, 0x41c, 0x41e,
437 493 0x420, 0x422, 0x424, 0x426, 0x428, 0x42a, 0x42c, 0x42e,
438 494 0x430, 0x432, 0x434, 0x436, 0x438, 0x43a, 0x43c, 0x43e,
439 495
440 496 0xff, /* fpm_diag_config - n/a */
441 497 0xff, /* pcr - n/a */
442 498 0xff, /* mctr - n/a */
443 499 0xff, /* fb_cmd - n/a */
444 500 0x48, /* hccr */
445 501 0x4c, /* gpiod */
446 502 0x50, /* gpioe */
|
↓ open down ↓ |
234 lines elided |
↑ open up ↑ |
447 503 0xff, /* host_to_host_sema - n/a */
448 504 0x2c, /* pri_req_in */
449 505 0x30, /* pri_req_out */
450 506 0x3c, /* atio_req_in */
451 507 0x40, /* atio_req_out */
452 508 0x54, /* io_base_addr */
453 509 0x380, /* nx_host_int */
454 510 0x504 /* nx_risc_int */
455 511 };
456 512
513 +/* 2700/8300 register offsets */
514 +static reg_off_t reg_off_2700_8300 = {
515 + 0x00, /* flash_address */
516 + 0x04, /* flash_data */
517 + 0x08, /* ctrl_status */
518 + 0x0c, /* ictrl */
519 + 0x10, /* istatus */
520 + 0xff, /* semaphore - n/a */
521 + 0xff, /* nvram - n/a */
522 + 0xff, /* req_in - n/a */
523 + 0xff, /* req_out - n/a */
524 + 0xff, /* resp_in - n/a */
525 + 0xff, /* resp_out - n/a */
526 + 0x44, /* risc2host */
527 + 32, /* Number of mailboxes */
528 +
529 + /* Mailbox in register offsets 0 - 31 */
530 + 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
531 + 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
532 + 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
533 + 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
534 +
535 + /* Mailbox out register offsets 0 - 31 */
536 + 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
537 + 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
538 + 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
539 + 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
540 +
541 + 0xff, /* fpm_diag_config - n/a */
542 + 0xff, /* pcr - n/a */
543 + 0xff, /* mctr - n/a */
544 + 0xff, /* fb_cmd - n/a */
545 + 0x48, /* hccr */
546 + 0x4c, /* gpiod */
547 + 0x50, /* gpioe */
548 + 0x58, /* host_to_host_sema - n/a */
549 + 0xff, /* pri_req_in - n/a */
550 + 0xff, /* pri_req_out - n/a */
551 + 0xff, /* atio_req_in - n/a */
552 + 0xff, /* atio_req_out - n/a */
553 + 0x54, /* io_base_addr */
554 + 0xff, /* nx_host_int - n/a */
555 + 0xff /* nx_risc_int - n/a */
556 +};
557 +
457 558 /* mutex for protecting variables shared by all instances of the driver */
458 559 kmutex_t ql_global_mutex;
459 560 kmutex_t ql_global_hw_mutex;
460 561 kmutex_t ql_global_el_mutex;
562 +kmutex_t ql_global_timer_mutex;
461 563
462 564 /* DMA access attribute structure. */
463 -static ddi_device_acc_attr_t ql_dev_acc_attr = {
565 +ddi_device_acc_attr_t ql_dev_acc_attr = {
464 566 DDI_DEVICE_ATTR_V0,
465 567 DDI_STRUCTURE_LE_ACC,
466 568 DDI_STRICTORDER_ACC
467 569 };
468 570
469 571 /* I/O DMA attributes structures. */
470 -static ddi_dma_attr_t ql_64bit_io_dma_attr = {
572 +ddi_dma_attr_t ql_64bit_io_dma_attr = {
471 573 DMA_ATTR_V0, /* dma_attr_version */
472 574 QL_DMA_LOW_ADDRESS, /* low DMA address range */
473 575 QL_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */
474 576 QL_DMA_XFER_COUNTER, /* DMA counter register */
475 577 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */
476 578 QL_DMA_BURSTSIZES, /* DMA burstsizes */
477 579 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
478 580 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
479 581 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
480 582 QL_DMA_SG_LIST_LENGTH, /* s/g list length */
481 583 QL_DMA_GRANULARITY, /* granularity of device */
482 584 QL_DMA_XFER_FLAGS /* DMA transfer flags */
483 585 };
484 586
485 -static ddi_dma_attr_t ql_32bit_io_dma_attr = {
587 +ddi_dma_attr_t ql_32bit_io_dma_attr = {
486 588 DMA_ATTR_V0, /* dma_attr_version */
487 589 QL_DMA_LOW_ADDRESS, /* low DMA address range */
488 590 QL_DMA_HIGH_32BIT_ADDRESS, /* high DMA address range */
489 591 QL_DMA_XFER_COUNTER, /* DMA counter register */
490 592 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */
491 593 QL_DMA_BURSTSIZES, /* DMA burstsizes */
492 594 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
493 595 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
494 596 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
495 597 QL_DMA_SG_LIST_LENGTH, /* s/g list length */
496 598 QL_DMA_GRANULARITY, /* granularity of device */
497 599 QL_DMA_XFER_FLAGS /* DMA transfer flags */
498 600 };
499 601
500 -/* Load the default dma attributes */
501 -static ddi_dma_attr_t ql_32fcsm_cmd_dma_attr;
502 -static ddi_dma_attr_t ql_64fcsm_cmd_dma_attr;
503 -static ddi_dma_attr_t ql_32fcsm_rsp_dma_attr;
504 -static ddi_dma_attr_t ql_64fcsm_rsp_dma_attr;
505 -static ddi_dma_attr_t ql_32fcip_cmd_dma_attr;
506 -static ddi_dma_attr_t ql_64fcip_cmd_dma_attr;
507 -static ddi_dma_attr_t ql_32fcip_rsp_dma_attr;
508 -static ddi_dma_attr_t ql_64fcip_rsp_dma_attr;
509 -static ddi_dma_attr_t ql_32fcp_cmd_dma_attr;
510 -static ddi_dma_attr_t ql_64fcp_cmd_dma_attr;
511 -static ddi_dma_attr_t ql_32fcp_rsp_dma_attr;
512 -static ddi_dma_attr_t ql_64fcp_rsp_dma_attr;
513 -static ddi_dma_attr_t ql_32fcp_data_dma_attr;
514 -static ddi_dma_attr_t ql_64fcp_data_dma_attr;
515 -
516 602 /* Static declarations of cb_ops entry point functions... */
517 603 static struct cb_ops ql_cb_ops = {
518 604 ql_open, /* b/c open */
519 605 ql_close, /* b/c close */
520 606 nodev, /* b strategy */
521 607 nodev, /* b print */
522 608 nodev, /* b dump */
523 609 nodev, /* c read */
524 610 nodev, /* c write */
525 611 ql_ioctl, /* c ioctl */
526 612 nodev, /* c devmap */
527 613 nodev, /* c mmap */
528 614 nodev, /* c segmap */
529 615 nochpoll, /* c poll */
530 616 nodev, /* cb_prop_op */
531 617 NULL, /* streamtab */
532 618 D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flag */
533 619 CB_REV, /* cb_ops revision */
534 620 nodev, /* c aread */
535 621 nodev /* c awrite */
536 622 };
537 623
538 624 /* Static declarations of dev_ops entry point functions... */
539 625 static struct dev_ops ql_devops = {
540 626 DEVO_REV, /* devo_rev */
541 627 0, /* refcnt */
542 628 ql_getinfo, /* getinfo */
543 629 nulldev, /* identify */
544 630 nulldev, /* probe */
545 631 ql_attach, /* attach */
546 632 ql_detach, /* detach */
547 633 nodev, /* reset */
548 634 &ql_cb_ops, /* char/block ops */
|
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
549 635 NULL, /* bus operations */
550 636 ql_power, /* power management */
551 637 ql_quiesce /* quiesce device */
552 638 };
553 639
554 640 /* ELS command code to text converter */
555 641 cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE();
556 642 /* Mailbox command code to text converter */
557 643 cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE();
558 644
559 -char qlc_driver_version[] = QL_VERSION;
645 +char ql_driver_version[] = QL_VERSION;
560 646
647 +uint32_t ql_log_entries = QL_LOG_ENTRIES;
648 +
561 649 /*
562 650 * Loadable Driver Interface Structures.
563 651 * Declare and initialize the module configuration section...
564 652 */
565 653 static struct modldrv modldrv = {
566 654 &mod_driverops, /* type of module: driver */
567 655 "SunFC Qlogic FCA v" QL_VERSION, /* name of module */
568 656 &ql_devops /* driver dev_ops */
569 657 };
570 658
571 659 static struct modlinkage modlinkage = {
572 660 MODREV_1,
573 661 &modldrv,
574 662 NULL
575 663 };
576 664
577 665 /* ************************************************************************ */
578 666 /* Loadable Module Routines. */
579 667 /* ************************************************************************ */
580 668
581 669 /*
582 670 * _init
583 671 * Initializes a loadable module. It is called before any other
584 672 * routine in a loadable module.
585 673 *
586 674 * Returns:
587 675 * 0 = success
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
588 676 *
589 677 * Context:
590 678 * Kernel context.
591 679 */
592 680 int
593 681 _init(void)
594 682 {
595 683 uint16_t w16;
596 684 int rval = 0;
597 685
686 + if (qlc_disable_load) {
687 + cmn_err(CE_WARN, "%s load disabled", QL_NAME);
688 + return (EINVAL);
689 + }
690 +
598 691 /* Get OS major release level. */
599 692 for (w16 = 0; w16 < sizeof (utsname.release); w16++) {
600 693 if (utsname.release[w16] == '.') {
601 694 w16++;
602 695 break;
603 696 }
604 697 }
605 698 if (w16 < sizeof (utsname.release)) {
606 699 (void) ql_bstr_to_dec(&utsname.release[w16],
607 700 &ql_os_release_level, 0);
608 701 } else {
609 702 ql_os_release_level = 0;
610 703 }
611 704 if (ql_os_release_level < 6) {
612 705 cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
613 706 QL_NAME, ql_os_release_level);
614 707 rval = EINVAL;
615 708 }
616 709 if (ql_os_release_level == 6) {
617 710 ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
618 711 ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
619 712 }
620 713
621 714 if (rval == 0) {
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
622 715 rval = ddi_soft_state_init(&ql_state,
623 716 sizeof (ql_adapter_state_t), 0);
624 717 }
625 718 if (rval == 0) {
626 719 /* allow the FC Transport to tweak the dev_ops */
627 720 fc_fca_init(&ql_devops);
628 721
629 722 mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
630 723 mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
631 724 mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
725 + mutex_init(&ql_global_timer_mutex, NULL, MUTEX_DRIVER, NULL);
632 726 rval = mod_install(&modlinkage);
633 727 if (rval != 0) {
728 + mutex_destroy(&ql_global_timer_mutex);
729 + mutex_destroy(&ql_global_el_mutex);
634 730 mutex_destroy(&ql_global_hw_mutex);
635 731 mutex_destroy(&ql_global_mutex);
636 - mutex_destroy(&ql_global_el_mutex);
637 732 ddi_soft_state_fini(&ql_state);
638 - } else {
639 - /*EMPTY*/
640 - ql_32fcsm_cmd_dma_attr = ql_32bit_io_dma_attr;
641 - ql_64fcsm_cmd_dma_attr = ql_64bit_io_dma_attr;
642 - ql_32fcsm_rsp_dma_attr = ql_32bit_io_dma_attr;
643 - ql_64fcsm_rsp_dma_attr = ql_64bit_io_dma_attr;
644 - ql_32fcip_cmd_dma_attr = ql_32bit_io_dma_attr;
645 - ql_64fcip_cmd_dma_attr = ql_64bit_io_dma_attr;
646 - ql_32fcip_rsp_dma_attr = ql_32bit_io_dma_attr;
647 - ql_64fcip_rsp_dma_attr = ql_64bit_io_dma_attr;
648 - ql_32fcp_cmd_dma_attr = ql_32bit_io_dma_attr;
649 - ql_64fcp_cmd_dma_attr = ql_64bit_io_dma_attr;
650 - ql_32fcp_rsp_dma_attr = ql_32bit_io_dma_attr;
651 - ql_64fcp_rsp_dma_attr = ql_64bit_io_dma_attr;
652 - ql_32fcp_data_dma_attr = ql_32bit_io_dma_attr;
653 - ql_64fcp_data_dma_attr = ql_64bit_io_dma_attr;
654 - ql_32fcsm_cmd_dma_attr.dma_attr_sgllen =
655 - ql_64fcsm_cmd_dma_attr.dma_attr_sgllen =
656 - QL_FCSM_CMD_SGLLEN;
657 - ql_32fcsm_rsp_dma_attr.dma_attr_sgllen =
658 - ql_64fcsm_rsp_dma_attr.dma_attr_sgllen =
659 - QL_FCSM_RSP_SGLLEN;
660 - ql_32fcip_cmd_dma_attr.dma_attr_sgllen =
661 - ql_64fcip_cmd_dma_attr.dma_attr_sgllen =
662 - QL_FCIP_CMD_SGLLEN;
663 - ql_32fcip_rsp_dma_attr.dma_attr_sgllen =
664 - ql_64fcip_rsp_dma_attr.dma_attr_sgllen =
665 - QL_FCIP_RSP_SGLLEN;
666 - ql_32fcp_cmd_dma_attr.dma_attr_sgllen =
667 - ql_64fcp_cmd_dma_attr.dma_attr_sgllen =
668 - QL_FCP_CMD_SGLLEN;
669 - ql_32fcp_rsp_dma_attr.dma_attr_sgllen =
670 - ql_64fcp_rsp_dma_attr.dma_attr_sgllen =
671 - QL_FCP_RSP_SGLLEN;
672 733 }
673 734 }
674 735
675 736 if (rval != 0) {
676 737 cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
677 738 QL_NAME);
678 739 }
679 740
680 741 return (rval);
681 742 }
682 743
683 744 /*
684 745 * _fini
685 746 * Prepares a module for unloading. It is called when the system
686 747 * wants to unload a module. If the module determines that it can
687 748 * be unloaded, then _fini() returns the value returned by
688 749 * mod_remove(). Upon successful return from _fini() no other
689 750 * routine in the module will be called before _init() is called.
690 751 *
691 752 * Returns:
692 753 * 0 = success
693 754 *
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
694 755 * Context:
695 756 * Kernel context.
696 757 */
697 758 int
698 759 _fini(void)
699 760 {
700 761 int rval;
701 762
702 763 rval = mod_remove(&modlinkage);
703 764 if (rval == 0) {
765 + mutex_destroy(&ql_global_timer_mutex);
766 + mutex_destroy(&ql_global_el_mutex);
704 767 mutex_destroy(&ql_global_hw_mutex);
705 768 mutex_destroy(&ql_global_mutex);
706 - mutex_destroy(&ql_global_el_mutex);
707 769 ddi_soft_state_fini(&ql_state);
708 770 }
709 771
710 772 return (rval);
711 773 }
712 774
713 775 /*
714 776 * _info
715 777 * Returns information about loadable module.
716 778 *
717 779 * Input:
718 780 * modinfo = pointer to module information structure.
719 781 *
720 782 * Returns:
721 783 * Value returned by mod_info().
722 784 *
723 785 * Context:
724 786 * Kernel context.
725 787 */
726 788 int
727 789 _info(struct modinfo *modinfop)
728 790 {
729 791 return (mod_info(&modlinkage, modinfop));
730 792 }
731 793
732 794 /* ************************************************************************ */
733 795 /* dev_ops functions */
734 796 /* ************************************************************************ */
735 797
736 798 /*
737 799 * ql_getinfo
738 800 * Returns the pointer associated with arg when cmd is
739 801 * set to DDI_INFO_DEVT2DEVINFO, or it should return the
740 802 * instance number associated with arg when cmd is set
741 803 * to DDI_INFO_DEV2INSTANCE.
742 804 *
743 805 * Input:
744 806 * dip = Do not use.
745 807 * cmd = command argument.
746 808 * arg = command specific argument.
747 809 * resultp = pointer to where request information is stored.
748 810 *
749 811 * Returns:
750 812 * DDI_SUCCESS or DDI_FAILURE.
751 813 *
752 814 * Context:
753 815 * Kernel context.
754 816 */
755 817 /* ARGSUSED */
|
↓ open down ↓ |
39 lines elided |
↑ open up ↑ |
756 818 static int
757 819 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
758 820 {
759 821 ql_adapter_state_t *ha;
760 822 int minor;
761 823 int rval = DDI_FAILURE;
762 824
763 825 minor = (int)(getminor((dev_t)arg));
764 826 ha = ddi_get_soft_state(ql_state, minor);
765 827 if (ha == NULL) {
766 - QL_PRINT_2(CE_CONT, "failed, unknown minor=%d\n",
828 + QL_PRINT_2(ha, "failed, unknown minor=%d\n",
767 829 getminor((dev_t)arg));
768 830 *resultp = NULL;
769 831 return (rval);
770 832 }
771 833
772 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
834 + QL_PRINT_3(ha, "started\n");
773 835
774 836 switch (cmd) {
775 837 case DDI_INFO_DEVT2DEVINFO:
776 838 *resultp = ha->dip;
777 839 rval = DDI_SUCCESS;
778 840 break;
779 841 case DDI_INFO_DEVT2INSTANCE:
780 842 *resultp = (void *)(uintptr_t)(ha->instance);
781 843 rval = DDI_SUCCESS;
782 844 break;
783 845 default:
784 846 EL(ha, "failed, unsupported cmd=%d\n", cmd);
785 847 rval = DDI_FAILURE;
786 848 break;
787 849 }
788 850
789 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
851 + QL_PRINT_3(ha, "done\n");
790 852
791 853 return (rval);
792 854 }
793 855
794 856 /*
795 857 * ql_attach
796 858 * Configure and attach an instance of the driver
797 859 * for a port.
798 860 *
799 861 * Input:
800 862 * dip = pointer to device information structure.
801 863 * cmd = attach type.
802 864 *
803 865 * Returns:
804 866 * DDI_SUCCESS or DDI_FAILURE.
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
805 867 *
806 868 * Context:
807 869 * Kernel context.
808 870 */
809 871 static int
810 872 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
811 873 {
812 874 off_t regsize;
813 875 uint32_t size;
814 876 int rval, *ptr;
815 - int instance;
816 877 uint_t progress = 0;
817 - char *buf;
878 + char *buf, taskq_name[32];
818 879 ushort_t caps_ptr, cap;
819 880 fc_fca_tran_t *tran;
820 881 ql_adapter_state_t *ha = NULL;
882 + int instance = ddi_get_instance(dip);
821 883
822 884 static char *pmcomps[] = {
823 885 NULL,
824 886 PM_LEVEL_D3_STR, /* Device OFF */
825 887 PM_LEVEL_D0_STR, /* Device ON */
826 888 };
827 889
828 - QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n",
890 + QL_PRINT_3(NULL, "started, instance=%d, cmd=%xh\n",
829 891 ddi_get_instance(dip), cmd);
830 892
831 893 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
832 894
833 895 switch (cmd) {
834 896 case DDI_ATTACH:
835 - /* first get the instance */
836 - instance = ddi_get_instance(dip);
837 -
838 897 cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
839 898 QL_NAME, instance, QL_VERSION);
840 899
841 900 /* Correct OS version? */
842 901 if (ql_os_release_level != 11) {
843 902 cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
844 903 "11", QL_NAME, instance);
845 904 goto attach_failed;
846 905 }
847 906
848 907 /* Hardware is installed in a DMA-capable slot? */
849 908 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
850 909 cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
851 910 instance);
852 911 goto attach_failed;
853 912 }
854 913
855 - /* No support for high-level interrupts */
856 - if (ddi_intr_hilevel(dip, 0) != 0) {
857 - cmn_err(CE_WARN, "%s(%d): High level interrupt"
858 - " not supported", QL_NAME, instance);
859 - goto attach_failed;
860 - }
861 -
862 914 /* Allocate our per-device-instance structure */
863 915 if (ddi_soft_state_zalloc(ql_state,
864 916 instance) != DDI_SUCCESS) {
865 917 cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
866 918 QL_NAME, instance);
867 919 goto attach_failed;
868 920 }
869 - progress |= QL_SOFT_STATE_ALLOCED;
870 921
871 922 ha = ddi_get_soft_state(ql_state, instance);
872 923 if (ha == NULL) {
873 924 cmn_err(CE_WARN, "%s(%d): can't get soft state",
874 925 QL_NAME, instance);
875 926 goto attach_failed;
876 927 }
877 928 ha->dip = dip;
878 929 ha->instance = instance;
879 930 ha->hba.base_address = ha;
880 931 ha->pha = ha;
881 932
882 - if (ql_el_trace_desc_ctor(ha) != DDI_SUCCESS) {
883 - cmn_err(CE_WARN, "%s(%d): can't setup el tracing",
884 - QL_NAME, instance);
885 - goto attach_failed;
886 - }
933 + ha->bit32_io_dma_attr = ql_32bit_io_dma_attr;
934 + ha->bit64_io_dma_attr = ql_64bit_io_dma_attr;
887 935
936 + (void) ql_el_trace_alloc(ha);
937 +
938 + progress |= QL_SOFT_STATE_ALLOCED;
939 +
888 940 /* Get extended logging and dump flags. */
889 941 ql_common_properties(ha);
890 942
943 + qlc_fm_init(ha);
944 + progress |= QL_FCA_INIT_FM;
945 +
946 + ha->io_dma_attr = ha->bit32_io_dma_attr;
947 +
891 948 if (strcmp(ddi_driver_name(ddi_get_parent(dip)),
892 949 "sbus") == 0) {
893 - EL(ha, "%s SBUS card detected", QL_NAME);
950 + EL(ha, "%s SBUS card detected\n", QL_NAME);
894 951 ha->cfg_flags |= CFG_SBUS_CARD;
895 952 }
896 953
897 954 ha->dev = kmem_zalloc(sizeof (*ha->dev) *
898 955 DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
899 956
900 - ha->outstanding_cmds = kmem_zalloc(
901 - sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS,
902 - KM_SLEEP);
903 -
904 957 ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
905 958 QL_UB_LIMIT, KM_SLEEP);
906 959
907 960 ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
908 961 KM_SLEEP);
909 962
910 963 (void) ddi_pathname(dip, buf);
911 - ha->devpath = kmem_zalloc(strlen(buf)+1, KM_SLEEP);
964 + ha->devpath = kmem_zalloc(strlen(buf) + 1, KM_SLEEP);
912 965 if (ha->devpath == NULL) {
913 966 EL(ha, "devpath mem alloc failed\n");
914 967 } else {
915 968 (void) strcpy(ha->devpath, buf);
916 969 EL(ha, "devpath is: %s\n", ha->devpath);
917 970 }
918 971
919 972 if (CFG_IST(ha, CFG_SBUS_CARD)) {
920 973 /*
921 974 * For cards where PCI is mapped to sbus e.g. Ivory.
922 975 *
923 976 * 0x00 : 0x000 - 0x0FF PCI Config Space for 2200
924 977 * : 0x100 - 0x3FF PCI IO space for 2200
925 978 * 0x01 : 0x000 - 0x0FF PCI Config Space for fpga
926 979 * : 0x100 - 0x3FF PCI IO Space for fpga
927 980 */
928 981 if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
929 982 0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle) !=
930 983 DDI_SUCCESS) {
931 984 cmn_err(CE_WARN, "%s(%d): Unable to map device"
932 985 " registers", QL_NAME, instance);
933 986 goto attach_failed;
934 987 }
935 988 if (ddi_regs_map_setup(dip, 1,
936 989 (caddr_t *)&ha->sbus_fpga_iobase, 0, 0x400,
937 990 &ql_dev_acc_attr, &ha->sbus_fpga_dev_handle) !=
938 991 DDI_SUCCESS) {
939 992 /* We should not fail attach here */
940 993 cmn_err(CE_WARN, "%s(%d): Unable to map FPGA",
941 994 QL_NAME, instance);
942 995 ha->sbus_fpga_iobase = NULL;
943 996 }
944 997 progress |= QL_REGS_MAPPED;
945 998
946 999 /*
947 1000 * We should map config space before adding interrupt
948 1001 * So that the chip type (2200 or 2300) can be
949 1002 * determined before the interrupt routine gets a
950 1003 * chance to execute.
951 1004 */
952 1005 if (ddi_regs_map_setup(dip, 0,
953 1006 (caddr_t *)&ha->sbus_config_base, 0, 0x100,
954 1007 &ql_dev_acc_attr, &ha->sbus_config_handle) !=
955 1008 DDI_SUCCESS) {
956 1009 cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
957 1010 "config registers", QL_NAME, instance);
958 1011 goto attach_failed;
959 1012 }
960 1013 progress |= QL_CONFIG_SPACE_SETUP;
|
↓ open down ↓ |
39 lines elided |
↑ open up ↑ |
961 1014 } else {
962 1015 /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
963 1016 rval = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
964 1017 DDI_PROP_DONTPASS, "reg", &ptr, &size);
965 1018 if (rval != DDI_PROP_SUCCESS) {
966 1019 cmn_err(CE_WARN, "%s(%d): Unable to get PCI "
967 1020 "address registers", QL_NAME, instance);
968 1021 goto attach_failed;
969 1022 } else {
970 1023 ha->pci_bus_addr = ptr[0];
971 - ha->function_number = (uint8_t)
1024 + ha->pci_function_number = (uint8_t)
972 1025 (ha->pci_bus_addr >> 8 & 7);
973 1026 ddi_prop_free(ptr);
974 1027 }
975 1028
976 1029 /*
977 1030 * We should map config space before adding interrupt
978 1031 * So that the chip type (2200 or 2300) can be
979 1032 * determined before the interrupt routine gets a
980 1033 * chance to execute.
981 1034 */
982 1035 if (pci_config_setup(ha->dip, &ha->pci_handle) !=
983 1036 DDI_SUCCESS) {
984 1037 cmn_err(CE_WARN, "%s(%d): can't setup PCI "
985 1038 "config space", QL_NAME, instance);
986 1039 goto attach_failed;
987 1040 }
988 1041 progress |= QL_CONFIG_SPACE_SETUP;
989 1042
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
990 1043 /*
991 1044 * Setup the ISP2200 registers address mapping to be
992 1045 * accessed by this particular driver.
993 1046 * 0x0 Configuration Space
994 1047 * 0x1 I/O Space
995 1048 * 0x2 32-bit Memory Space address
996 1049 * 0x3 64-bit Memory Space address
997 1050 */
998 1051 size = ql_pci_config_get32(ha, PCI_CONF_BASE0) & BIT_0 ?
999 1052 2 : 1;
1053 +
1054 + if (qlc_fm_check_acc_handle(ha, ha->pci_handle)
1055 + != DDI_FM_OK) {
1056 + qlc_fm_report_err_impact(ha,
1057 + QL_FM_EREPORT_ACC_HANDLE_CHECK);
1058 + goto attach_failed;
1059 + }
1060 +
1000 1061 if (ddi_dev_regsize(dip, size, ®size) !=
1001 1062 DDI_SUCCESS ||
1002 1063 ddi_regs_map_setup(dip, size, &ha->iobase,
1003 1064 0, regsize, &ql_dev_acc_attr, &ha->dev_handle) !=
1004 1065 DDI_SUCCESS) {
1005 1066 cmn_err(CE_WARN, "%s(%d): regs_map_setup(mem) "
1006 1067 "failed", QL_NAME, instance);
1007 1068 goto attach_failed;
1008 1069 }
1009 1070 progress |= QL_REGS_MAPPED;
1010 1071
1072 + if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
1073 + != DDI_FM_OK) {
1074 + qlc_fm_report_err_impact(ha,
1075 + QL_FM_EREPORT_ACC_HANDLE_CHECK);
1076 + goto attach_failed;
1077 + }
1078 +
1011 1079 /*
1012 1080 * We need I/O space mappings for 23xx HBAs for
1013 1081 * loading flash (FCode). The chip has a bug due to
1014 1082 * which loading flash fails through mem space
1015 1083 * mappings in PCI-X mode.
1016 1084 */
1017 1085 if (size == 1) {
1018 1086 ha->iomap_iobase = ha->iobase;
1019 1087 ha->iomap_dev_handle = ha->dev_handle;
1020 1088 } else {
1021 1089 if (ddi_dev_regsize(dip, 1, ®size) !=
1022 1090 DDI_SUCCESS ||
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
1023 1091 ddi_regs_map_setup(dip, 1,
1024 1092 &ha->iomap_iobase, 0, regsize,
1025 1093 &ql_dev_acc_attr, &ha->iomap_dev_handle) !=
1026 1094 DDI_SUCCESS) {
1027 1095 cmn_err(CE_WARN, "%s(%d): regs_map_"
1028 1096 "setup(I/O) failed", QL_NAME,
1029 1097 instance);
1030 1098 goto attach_failed;
1031 1099 }
1032 1100 progress |= QL_IOMAP_IOBASE_MAPPED;
1101 +
1102 + if (qlc_fm_check_acc_handle(ha,
1103 + ha->iomap_dev_handle) != DDI_FM_OK) {
1104 + qlc_fm_report_err_impact(ha,
1105 + QL_FM_EREPORT_ACC_HANDLE_CHECK);
1106 + goto attach_failed;
1107 + }
1033 1108 }
1034 1109 }
1035 1110
1036 1111 ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
1037 1112 PCI_CONF_SUBSYSID);
1038 1113 ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
1039 1114 PCI_CONF_SUBVENID);
1040 1115 ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
1041 1116 PCI_CONF_VENID);
1042 1117 ha->device_id = (uint16_t)ql_pci_config_get16(ha,
1043 1118 PCI_CONF_DEVID);
1044 1119 ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
1045 1120 PCI_CONF_REVID);
1046 1121
1047 1122 EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
1048 1123 "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
1049 1124 ha->subven_id, ha->subsys_id);
1050 1125
1051 1126 switch (ha->device_id) {
1052 1127 case 0x2300:
1053 1128 case 0x2312:
1054 1129 case 0x2322:
1055 1130 case 0x6312:
1056 1131 case 0x6322:
1057 1132 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1058 - ha->flags |= FUNCTION_1;
1133 + ha->function_number = 1;
1059 1134 }
1060 - if ((ha->device_id == 0x6322) ||
1061 - (ha->device_id == 0x2322)) {
1062 - ha->cfg_flags |= CFG_CTRL_6322;
1135 + if (ha->device_id == 0x2322 ||
1136 + ha->device_id == 0x6322) {
1137 + ha->cfg_flags |= CFG_CTRL_63XX;
1063 1138 ha->fw_class = 0x6322;
1064 1139 ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
1065 1140 } else {
1066 - ha->cfg_flags |= CFG_CTRL_2300;
1141 + ha->cfg_flags |= CFG_CTRL_23XX;
1067 1142 ha->fw_class = 0x2300;
1068 1143 ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
1069 1144 }
1070 1145 ha->reg_off = ®_off_2300;
1146 + ha->interrupt_count = 1;
1147 + ha->osc_max_cnt = 1024;
1071 1148 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1072 1149 goto attach_failed;
1073 1150 }
1074 1151 ha->fcp_cmd = ql_command_iocb;
1075 1152 ha->ip_cmd = ql_ip_iocb;
1076 1153 ha->ms_cmd = ql_ms_iocb;
1077 1154 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1078 1155 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1079 1156 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1080 1157 } else {
1081 1158 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1082 1159 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1083 1160 }
1084 1161 break;
1085 1162
1086 1163 case 0x2200:
1087 - ha->cfg_flags |= CFG_CTRL_2200;
1164 + ha->cfg_flags |= CFG_CTRL_22XX;
1088 1165 ha->reg_off = ®_off_2200;
1166 + ha->interrupt_count = 1;
1167 + ha->osc_max_cnt = 1024;
1089 1168 ha->fw_class = 0x2200;
1090 1169 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1091 1170 goto attach_failed;
1092 1171 }
1093 1172 ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
1094 1173 ha->fcp_cmd = ql_command_iocb;
1095 1174 ha->ip_cmd = ql_ip_iocb;
1096 1175 ha->ms_cmd = ql_ms_iocb;
1097 1176 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1098 1177 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1099 1178 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1100 1179 } else {
1101 1180 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
1102 1181 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1103 1182 }
1104 1183 break;
1105 1184
1106 1185 case 0x2422:
1107 1186 case 0x2432:
1108 1187 case 0x5422:
1109 1188 case 0x5432:
1110 1189 case 0x8432:
1111 1190 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1112 - ha->flags |= FUNCTION_1;
1191 + ha->function_number = 1;
1113 1192 }
1114 - ha->cfg_flags |= CFG_CTRL_2422;
1193 + ha->cfg_flags |= CFG_CTRL_24XX;
1115 1194 if (ha->device_id == 0x8432) {
1116 1195 ha->cfg_flags |= CFG_CTRL_MENLO;
1117 1196 } else {
1118 1197 ha->flags |= VP_ENABLED;
1198 + ha->max_vports = MAX_24_VIRTUAL_PORTS;
1119 1199 }
1120 1200
1121 1201 ha->reg_off = ®_off_2400_2500;
1202 + ha->interrupt_count = 2;
1203 + ha->osc_max_cnt = 2048;
1122 1204 ha->fw_class = 0x2400;
1123 1205 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1124 1206 goto attach_failed;
1125 1207 }
1126 1208 ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
1127 1209 ha->fcp_cmd = ql_command_24xx_iocb;
1128 1210 ha->ip_cmd = ql_ip_24xx_iocb;
1129 1211 ha->ms_cmd = ql_ms_24xx_iocb;
1130 1212 ha->els_cmd = ql_els_24xx_iocb;
1131 1213 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1132 1214 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1133 1215 break;
1134 1216
1135 1217 case 0x2522:
1136 1218 case 0x2532:
1137 1219 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1138 - ha->flags |= FUNCTION_1;
1220 + ha->function_number = 1;
1139 1221 }
1140 1222 ha->cfg_flags |= CFG_CTRL_25XX;
1141 1223 ha->flags |= VP_ENABLED;
1142 - ha->fw_class = 0x2500;
1224 + ha->max_vports = MAX_25_VIRTUAL_PORTS;
1143 1225 ha->reg_off = ®_off_2400_2500;
1226 + ha->mbar_queue_offset = MBAR2_REG_OFFSET;
1227 + ha->interrupt_count = 2;
1228 + ha->osc_max_cnt = 2048;
1229 + ha->fw_class = 0x2500;
1144 1230 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1145 1231 goto attach_failed;
1146 1232 }
1147 1233 ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1148 1234 ha->fcp_cmd = ql_command_24xx_iocb;
1149 - ha->ip_cmd = ql_ip_24xx_iocb;
1150 1235 ha->ms_cmd = ql_ms_24xx_iocb;
1151 1236 ha->els_cmd = ql_els_24xx_iocb;
1152 1237 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1153 1238 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1239 + if (ql_multi_queue_support(ha) == QL_SUCCESS) {
1240 + ha->flags |= MULTI_QUEUE;
1241 + }
1154 1242 break;
1155 1243
1244 + case 0x2031:
1245 + /* Get queue pointer memory mapped registers */
1246 + if (ddi_dev_regsize(dip, 3, ®size) != DDI_SUCCESS ||
1247 + ddi_regs_map_setup(dip, 3, &ha->mbar,
1248 + 0, regsize, &ql_dev_acc_attr,
1249 + &ha->mbar_dev_handle) != DDI_SUCCESS) {
1250 + cmn_err(CE_WARN, "%s(%d): regs_map_setup"
1251 + "(mbar) failed", QL_NAME, instance);
1252 + goto attach_failed;
1253 + }
1254 + ha->mbar_size = (uint32_t)regsize;
1255 +
1256 + if (ha->pci_function_number != 0 &&
1257 + ha->pci_function_number != 2) {
1258 + ha->function_number = 1;
1259 + }
1260 + ha->cfg_flags |= CFG_CTRL_83XX;
1261 + ha->flags |= VP_ENABLED | MULTI_QUEUE;
1262 + ha->max_vports = MAX_83_VIRTUAL_PORTS;
1263 + ha->reg_off = ®_off_2700_8300;
1264 + ha->mbar_queue_offset = MBAR2_REG_OFFSET;
1265 + ha->interrupt_count = 2;
1266 + ha->osc_max_cnt = 2048;
1267 + ha->fw_class = 0x8301fc;
1268 + if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1269 + goto attach_failed;
1270 + }
1271 + ha->risc_dump_size = QL_83XX_FW_DUMP_SIZE;
1272 + ha->fcp_cmd = ql_command_24xx_iocb;
1273 + ha->ms_cmd = ql_ms_24xx_iocb;
1274 + ha->els_cmd = ql_els_24xx_iocb;
1275 + ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1276 + ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1277 + break;
1278 +
1279 + case 0x2071:
1280 + case 0x2261:
1281 + case 0x2271:
1282 + /* Get queue pointer memory mapped registers */
1283 + if (ddi_dev_regsize(dip, 3, ®size) != DDI_SUCCESS ||
1284 + ddi_regs_map_setup(dip, 3, &ha->mbar,
1285 + 0, regsize, &ql_dev_acc_attr,
1286 + &ha->mbar_dev_handle) != DDI_SUCCESS) {
1287 + cmn_err(CE_WARN, "%s(%d): regs_map_setup"
1288 + "(mbar) failed", QL_NAME, instance);
1289 + goto attach_failed;
1290 + }
1291 + ha->mbar_size = (uint32_t)regsize;
1292 +
1293 + ha->function_number = ha->pci_function_number;
1294 + ha->cfg_flags |= CFG_CTRL_27XX;
1295 + ha->flags |= VP_ENABLED | MULTI_QUEUE |
1296 + QUEUE_SHADOW_PTRS;
1297 + ha->max_vports = MAX_27_VIRTUAL_PORTS;
1298 + ha->reg_off = ®_off_2700_8300;
1299 + ha->mbar_queue_offset = MBAR2_REG_OFFSET;
1300 + ha->interrupt_count = 2;
1301 + ha->osc_max_cnt = 2048;
1302 + ha->fw_class = 0x2700;
1303 + if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1304 + goto attach_failed;
1305 + }
1306 + ha->risc_dump_size = QL_27XX_FW_DUMP_SIZE;
1307 + ha->fcp_cmd = ql_command_24xx_iocb;
1308 + ha->ms_cmd = ql_ms_24xx_iocb;
1309 + ha->els_cmd = ql_els_24xx_iocb;
1310 + ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1311 + ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1312 + break;
1313 +
1156 1314 case 0x8001:
1157 1315 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 4) {
1158 - ha->flags |= FUNCTION_1;
1316 + ha->function_number = 1;
1159 1317 }
1160 1318 ha->cfg_flags |= CFG_CTRL_81XX;
1161 1319 ha->flags |= VP_ENABLED;
1162 - ha->fw_class = 0x8100;
1320 + ha->max_vports = MAX_81XX_VIRTUAL_PORTS;
1163 1321 ha->reg_off = ®_off_2400_2500;
1322 + ha->mbar_queue_offset = MBAR2_REG_OFFSET;
1323 + ha->interrupt_count = 2;
1324 + ha->osc_max_cnt = 2048;
1325 + ha->fw_class = 0x8100;
1164 1326 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1165 1327 goto attach_failed;
1166 1328 }
1167 - ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1329 + ha->risc_dump_size = QL_81XX_FW_DUMP_SIZE;
1168 1330 ha->fcp_cmd = ql_command_24xx_iocb;
1169 - ha->ip_cmd = ql_ip_24xx_iocb;
1170 1331 ha->ms_cmd = ql_ms_24xx_iocb;
1171 1332 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1172 1333 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1334 + if (ql_multi_queue_support(ha) == QL_SUCCESS) {
1335 + ha->flags |= MULTI_QUEUE;
1336 + }
1173 1337 break;
1174 1338
1175 1339 case 0x8021:
1176 - if (ha->function_number & BIT_0) {
1177 - ha->flags |= FUNCTION_1;
1340 + if (ha->pci_function_number & BIT_0) {
1341 + ha->function_number = 1;
1178 1342 }
1179 - ha->cfg_flags |= CFG_CTRL_8021;
1343 + ha->cfg_flags |= CFG_CTRL_82XX;
1344 + ha->flags |= VP_ENABLED;
1345 + ha->max_vports = MAX_8021_VIRTUAL_PORTS;
1180 1346 ha->reg_off = ®_off_8021;
1347 + ha->interrupt_count = 2;
1348 + ha->osc_max_cnt = 2048;
1181 1349 ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1182 1350 ha->fcp_cmd = ql_command_24xx_iocb;
1183 1351 ha->ms_cmd = ql_ms_24xx_iocb;
1184 1352 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1185 1353 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1354 + ha->io_dma_attr.dma_attr_flags |=
1355 + DDI_DMA_RELAXED_ORDERING;
1186 1356
1187 1357 ha->nx_pcibase = ha->iobase;
1188 - ha->iobase += 0xBC000 + (ha->function_number << 11);
1358 + ha->iobase += 0xBC000 + (ha->pci_function_number << 11);
1189 1359 ha->iomap_iobase += 0xBC000 +
1190 - (ha->function_number << 11);
1360 + (ha->pci_function_number << 11);
1191 1361
1192 1362 /* map doorbell */
1193 1363 if (ddi_dev_regsize(dip, 2, ®size) != DDI_SUCCESS ||
1194 1364 ddi_regs_map_setup(dip, 2, &ha->db_iobase,
1195 - 0, regsize, &ql_dev_acc_attr, &ha->db_dev_handle) !=
1365 + 0, regsize, &ql_dev_acc_attr,
1366 + &ha->db_dev_handle) !=
1196 1367 DDI_SUCCESS) {
1197 1368 cmn_err(CE_WARN, "%s(%d): regs_map_setup"
1198 1369 "(doorbell) failed", QL_NAME, instance);
1199 1370 goto attach_failed;
1200 1371 }
1201 1372 progress |= QL_DB_IOBASE_MAPPED;
1202 1373
1374 + if (qlc_fm_check_acc_handle(ha, ha->db_dev_handle)
1375 + != DDI_FM_OK) {
1376 + qlc_fm_report_err_impact(ha,
1377 + QL_FM_EREPORT_ACC_HANDLE_CHECK);
1378 + goto attach_failed;
1379 + }
1380 +
1203 1381 ha->nx_req_in = (uint32_t *)(ha->db_iobase +
1204 - (ha->function_number << 12));
1382 + (ha->pci_function_number << 12));
1205 1383 ha->db_read = ha->nx_pcibase + (512 * 1024) +
1206 - (ha->function_number * 8);
1384 + (ha->pci_function_number * 8);
1207 1385
1208 1386 ql_8021_update_crb_int_ptr(ha);
1209 1387 ql_8021_set_drv_active(ha);
1210 1388 break;
1211 1389
1212 1390 default:
1213 1391 cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1214 1392 QL_NAME, instance, ha->device_id);
1215 1393 goto attach_failed;
1216 1394 }
1217 1395
1218 - /* Setup hba buffer. */
1396 + ha->outstanding_cmds = kmem_zalloc(
1397 + sizeof (*ha->outstanding_cmds) * ha->osc_max_cnt,
1398 + KM_SLEEP);
1219 1399
1220 - size = CFG_IST(ha, CFG_CTRL_24258081) ?
1221 - (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE) :
1222 - (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE +
1223 - RCVBUF_QUEUE_SIZE);
1400 + /* Setup interrupts */
1401 + if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1402 + cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1403 + "rval=%xh", QL_NAME, instance, rval);
1404 + goto attach_failed;
1405 + }
1224 1406
1225 - if (ql_get_dma_mem(ha, &ha->hba_buf, size, LITTLE_ENDIAN_DMA,
1226 - QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1407 + progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1408 +
1409 + /* Setup hba buffer. */
1410 + if (ql_create_queues(ha) != QL_SUCCESS) {
1227 1411 cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1228 1412 "alloc failed", QL_NAME, instance);
1229 1413 goto attach_failed;
1230 1414 }
1231 1415 progress |= QL_HBA_BUFFER_SETUP;
1232 1416
1233 - /* Setup buffer pointers. */
1234 - ha->request_dvma = ha->hba_buf.cookie.dmac_laddress +
1235 - REQUEST_Q_BUFFER_OFFSET;
1236 - ha->request_ring_bp = (struct cmd_entry *)
1237 - ((caddr_t)ha->hba_buf.bp + REQUEST_Q_BUFFER_OFFSET);
1238 -
1239 - ha->response_dvma = ha->hba_buf.cookie.dmac_laddress +
1240 - RESPONSE_Q_BUFFER_OFFSET;
1241 - ha->response_ring_bp = (struct sts_entry *)
1242 - ((caddr_t)ha->hba_buf.bp + RESPONSE_Q_BUFFER_OFFSET);
1243 -
1244 - ha->rcvbuf_dvma = ha->hba_buf.cookie.dmac_laddress +
1245 - RCVBUF_Q_BUFFER_OFFSET;
1246 - ha->rcvbuf_ring_bp = (struct rcvbuf *)
1247 - ((caddr_t)ha->hba_buf.bp + RCVBUF_Q_BUFFER_OFFSET);
1248 -
1249 1417 /* Allocate resource for QLogic IOCTL */
1250 1418 (void) ql_alloc_xioctl_resource(ha);
1251 1419
1252 - /* Setup interrupts */
1253 - if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1254 - cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1255 - "rval=%xh", QL_NAME, instance, rval);
1420 +
1421 + if (ql_nvram_cache_desc_ctor(ha) != DDI_SUCCESS) {
1422 + cmn_err(CE_WARN, "%s(%d): can't setup nvram cache",
1423 + QL_NAME, instance);
1256 1424 goto attach_failed;
1257 1425 }
1258 1426
1259 - progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1427 + progress |= QL_NVRAM_CACHE_CREATED;
1260 1428
1261 - if (ql_nvram_cache_desc_ctor(ha) != DDI_SUCCESS) {
1262 - cmn_err(CE_WARN, "%s(%d): can't setup nvram cache",
1429 + if (ql_plogi_params_desc_ctor(ha) != DDI_SUCCESS) {
1430 + cmn_err(CE_WARN, "%s(%d): can't setup plogi params",
1263 1431 QL_NAME, instance);
1264 1432 goto attach_failed;
1265 1433 }
1266 1434
1435 + progress |= QL_PLOGI_PARAMS_CREATED;
1436 +
1267 1437 /*
1268 1438 * Allocate an N Port information structure
1269 1439 * for use when in P2P topology.
1270 1440 */
1271 1441 ha->n_port = (ql_n_port_info_t *)
1272 1442 kmem_zalloc(sizeof (ql_n_port_info_t), KM_SLEEP);
1273 1443 if (ha->n_port == NULL) {
1274 1444 cmn_err(CE_WARN, "%s(%d): Failed to create N Port info",
1275 1445 QL_NAME, instance);
1276 1446 goto attach_failed;
1277 1447 }
1278 1448
1279 1449 progress |= QL_N_PORT_INFO_CREATED;
1280 1450
1281 1451 /*
1282 1452 * Determine support for Power Management
1283 1453 */
1284 1454 caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1285 1455
1286 1456 while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1287 1457 cap = (uint8_t)ql_pci_config_get8(ha, caps_ptr);
1288 1458 if (cap == PCI_CAP_ID_PM) {
1289 1459 ha->pm_capable = 1;
1290 1460 break;
1291 1461 }
1292 1462 caps_ptr = (uint8_t)ql_pci_config_get8(ha, caps_ptr +
1293 1463 PCI_CAP_NEXT_PTR);
1294 1464 }
1295 1465
1296 1466 if (ha->pm_capable) {
1297 1467 /*
1298 1468 * Enable PM for 2200 based HBAs only.
1299 1469 */
1300 1470 if (ha->device_id != 0x2200) {
1301 1471 ha->pm_capable = 0;
1302 1472 }
1303 1473 }
1304 1474
1305 1475 if (ha->pm_capable) {
1306 1476 ha->pm_capable = ql_enable_pm;
1307 1477 }
1308 1478
1309 1479 if (ha->pm_capable) {
1310 1480 /*
1311 1481 * Initialize power management bookkeeping;
1312 1482 * components are created idle.
1313 1483 */
1314 1484 (void) sprintf(buf, "NAME=%s(%d)", QL_NAME, instance);
1315 1485 pmcomps[0] = buf;
1316 1486
1317 1487 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1318 1488 if (ddi_prop_update_string_array(DDI_DEV_T_NONE,
1319 1489 dip, "pm-components", pmcomps,
1320 1490 sizeof (pmcomps) / sizeof (pmcomps[0])) !=
1321 1491 DDI_PROP_SUCCESS) {
1322 1492 cmn_err(CE_WARN, "%s(%d): failed to create"
1323 1493 " pm-components property", QL_NAME,
1324 1494 instance);
1325 1495
1326 1496 /* Initialize adapter. */
1327 1497 ha->power_level = PM_LEVEL_D0;
1328 1498 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1329 1499 cmn_err(CE_WARN, "%s(%d): failed to"
1330 1500 " initialize adapter", QL_NAME,
1331 1501 instance);
1332 1502 goto attach_failed;
1333 1503 }
1334 1504 } else {
1335 1505 ha->power_level = PM_LEVEL_D3;
1336 1506 if (pm_raise_power(dip, QL_POWER_COMPONENT,
1337 1507 PM_LEVEL_D0) != DDI_SUCCESS) {
1338 1508 cmn_err(CE_WARN, "%s(%d): failed to"
1339 1509 " raise power or initialize"
1340 1510 " adapter", QL_NAME, instance);
1341 1511 }
1342 1512 }
1343 1513 } else {
1344 1514 /* Initialize adapter. */
1345 1515 ha->power_level = PM_LEVEL_D0;
1346 1516 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
|
↓ open down ↓ |
70 lines elided |
↑ open up ↑ |
1347 1517 cmn_err(CE_WARN, "%s(%d): failed to initialize"
1348 1518 " adapter", QL_NAME, instance);
1349 1519 }
1350 1520 }
1351 1521
1352 1522 if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1353 1523 ha->fw_subminor_version == 0) {
1354 1524 cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1355 1525 QL_NAME, ha->instance);
1356 1526 } else {
1357 - int rval;
1527 + int rval, rval1;
1358 1528 char ver_fmt[256];
1359 1529
1360 - rval = (int)snprintf(ver_fmt, (size_t)sizeof (ver_fmt),
1530 + rval1 = (int)snprintf(ver_fmt, (size_t)sizeof (ver_fmt),
1361 1531 "Firmware version %d.%d.%d", ha->fw_major_version,
1362 1532 ha->fw_minor_version, ha->fw_subminor_version);
1363 1533
1364 1534 if (CFG_IST(ha, CFG_CTRL_81XX)) {
1365 - rval = (int)snprintf(ver_fmt + rval,
1535 + rval = (int)snprintf(ver_fmt + rval1,
1366 1536 (size_t)sizeof (ver_fmt),
1367 1537 ", MPI fw version %d.%d.%d",
1368 1538 ha->mpi_fw_major_version,
1369 1539 ha->mpi_fw_minor_version,
1370 1540 ha->mpi_fw_subminor_version);
1371 1541
1372 1542 if (ha->subsys_id == 0x17B ||
1373 1543 ha->subsys_id == 0x17D) {
1374 - (void) snprintf(ver_fmt + rval,
1544 + (void) snprintf(ver_fmt + rval1 + rval,
1375 1545 (size_t)sizeof (ver_fmt),
1376 1546 ", PHY fw version %d.%d.%d",
1377 1547 ha->phy_fw_major_version,
1378 1548 ha->phy_fw_minor_version,
1379 1549 ha->phy_fw_subminor_version);
1380 1550 }
1381 1551 }
1382 1552 cmn_err(CE_NOTE, "!%s(%d): %s",
1383 1553 QL_NAME, ha->instance, ver_fmt);
1384 1554 }
1385 1555
1386 1556 ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1387 1557 "controller", KSTAT_TYPE_RAW,
1388 1558 (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1389 1559 if (ha->k_stats == NULL) {
1390 1560 cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1391 1561 QL_NAME, instance);
1392 1562 goto attach_failed;
1393 1563 }
1394 1564 progress |= QL_KSTAT_CREATED;
1395 1565
1396 1566 ha->adapter_stats->version = 1;
1397 1567 ha->k_stats->ks_data = (void *)ha->adapter_stats;
1398 1568 ha->k_stats->ks_private = ha;
1399 1569 ha->k_stats->ks_update = ql_kstat_update;
1400 1570 ha->k_stats->ks_ndata = 1;
1401 1571 ha->k_stats->ks_data_size = sizeof (ql_adapter_stat_t);
1402 1572 kstat_install(ha->k_stats);
1403 1573
1404 1574 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
1405 1575 instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1406 1576 cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1407 1577 QL_NAME, instance);
1408 1578 goto attach_failed;
1409 1579 }
1410 1580 progress |= QL_MINOR_NODE_CREATED;
1411 1581
1412 1582 /* Allocate a transport structure for this instance */
1413 1583 tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1414 1584 if (tran == NULL) {
|
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
1415 1585 cmn_err(CE_WARN, "%s(%d): failed to allocate transport",
1416 1586 QL_NAME, instance);
1417 1587 goto attach_failed;
1418 1588 }
1419 1589
1420 1590 progress |= QL_FCA_TRAN_ALLOCED;
1421 1591
1422 1592 /* fill in the structure */
1423 1593 tran->fca_numports = 1;
1424 1594 tran->fca_version = FCTL_FCA_MODREV_5;
1425 - if (CFG_IST(ha, CFG_CTRL_2422)) {
1426 - tran->fca_num_npivports = MAX_24_VIRTUAL_PORTS;
1427 - } else if (CFG_IST(ha, CFG_CTRL_2581)) {
1428 - tran->fca_num_npivports = MAX_25_VIRTUAL_PORTS;
1429 - }
1595 + tran->fca_num_npivports = ha->max_vports ?
1596 + ha->max_vports - 1 : 0;
1430 1597 bcopy(ha->loginparams.node_ww_name.raw_wwn,
1431 1598 tran->fca_perm_pwwn.raw_wwn, 8);
1432 1599
1433 - EL(ha, "FCA version %d\n", tran->fca_version);
1600 + if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1601 + ha->io_dma_attr = ha->bit64_io_dma_attr;
1602 + ha->fcsm_cmd_dma_attr = ha->bit64_io_dma_attr;
1603 + ha->fcsm_rsp_dma_attr = ha->bit64_io_dma_attr;
1604 + ha->fcip_cmd_dma_attr = ha->bit64_io_dma_attr;
1605 + ha->fcip_rsp_dma_attr = ha->bit64_io_dma_attr;
1606 + ha->fcp_cmd_dma_attr = ha->bit64_io_dma_attr;
1607 + ha->fcp_rsp_dma_attr = ha->bit64_io_dma_attr;
1608 + ha->fcp_data_dma_attr = ha->bit64_io_dma_attr;
1609 + } else {
1610 + ha->io_dma_attr = ha->bit32_io_dma_attr;
1611 + ha->fcsm_cmd_dma_attr = ha->bit32_io_dma_attr;
1612 + ha->fcsm_rsp_dma_attr = ha->bit32_io_dma_attr;
1613 + ha->fcip_cmd_dma_attr = ha->bit32_io_dma_attr;
1614 + ha->fcip_rsp_dma_attr = ha->bit32_io_dma_attr;
1615 + ha->fcp_cmd_dma_attr = ha->bit32_io_dma_attr;
1616 + ha->fcp_rsp_dma_attr = ha->bit32_io_dma_attr;
1617 + ha->fcp_data_dma_attr = ha->bit32_io_dma_attr;
1618 + }
1619 + ha->fcsm_cmd_dma_attr.dma_attr_sgllen = QL_FCSM_CMD_SGLLEN;
1620 + ha->fcsm_rsp_dma_attr.dma_attr_sgllen = QL_FCSM_RSP_SGLLEN;
1621 + ha->fcip_cmd_dma_attr.dma_attr_sgllen = QL_FCIP_CMD_SGLLEN;
1622 + ha->fcip_rsp_dma_attr.dma_attr_sgllen = QL_FCIP_RSP_SGLLEN;
1623 + ha->fcp_cmd_dma_attr.dma_attr_sgllen = QL_FCP_CMD_SGLLEN;
1624 + ha->fcp_rsp_dma_attr.dma_attr_sgllen = QL_FCP_RSP_SGLLEN;
1625 + if (CFG_IST(ha, CFG_CTRL_82XX)) {
1626 + ha->io_dma_attr.dma_attr_flags |=
1627 + DDI_DMA_RELAXED_ORDERING;
1628 + ha->fcsm_cmd_dma_attr.dma_attr_flags |=
1629 + DDI_DMA_RELAXED_ORDERING;
1630 + ha->fcsm_rsp_dma_attr.dma_attr_flags |=
1631 + DDI_DMA_RELAXED_ORDERING;
1632 + ha->fcip_cmd_dma_attr.dma_attr_flags |=
1633 + DDI_DMA_RELAXED_ORDERING;
1634 + ha->fcip_rsp_dma_attr.dma_attr_flags |=
1635 + DDI_DMA_RELAXED_ORDERING;
1636 + ha->fcp_cmd_dma_attr.dma_attr_flags |=
1637 + DDI_DMA_RELAXED_ORDERING;
1638 + ha->fcp_rsp_dma_attr.dma_attr_flags |=
1639 + DDI_DMA_RELAXED_ORDERING;
1640 + ha->fcp_data_dma_attr.dma_attr_flags |=
1641 + DDI_DMA_RELAXED_ORDERING;
1642 + }
1434 1643
1435 1644 /* Specify the amount of space needed in each packet */
1436 1645 tran->fca_pkt_size = sizeof (ql_srb_t);
1437 1646
1438 1647 /* command limits are usually dictated by hardware */
1439 - tran->fca_cmd_max = MAX_OUTSTANDING_COMMANDS;
1648 + tran->fca_cmd_max = ha->osc_max_cnt;
1440 1649
1441 1650 /* dmaattr are static, set elsewhere. */
1442 - if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1443 - tran->fca_dma_attr = &ql_64bit_io_dma_attr;
1444 - tran->fca_dma_fcp_cmd_attr = &ql_64fcp_cmd_dma_attr;
1445 - tran->fca_dma_fcp_rsp_attr = &ql_64fcp_rsp_dma_attr;
1446 - tran->fca_dma_fcp_data_attr = &ql_64fcp_data_dma_attr;
1447 - tran->fca_dma_fcsm_cmd_attr = &ql_64fcsm_cmd_dma_attr;
1448 - tran->fca_dma_fcsm_rsp_attr = &ql_64fcsm_rsp_dma_attr;
1449 - tran->fca_dma_fcip_cmd_attr = &ql_64fcip_cmd_dma_attr;
1450 - tran->fca_dma_fcip_rsp_attr = &ql_64fcip_rsp_dma_attr;
1451 - } else {
1452 - tran->fca_dma_attr = &ql_32bit_io_dma_attr;
1453 - tran->fca_dma_fcp_cmd_attr = &ql_32fcp_cmd_dma_attr;
1454 - tran->fca_dma_fcp_rsp_attr = &ql_32fcp_rsp_dma_attr;
1455 - tran->fca_dma_fcp_data_attr = &ql_32fcp_data_dma_attr;
1456 - tran->fca_dma_fcsm_cmd_attr = &ql_32fcsm_cmd_dma_attr;
1457 - tran->fca_dma_fcsm_rsp_attr = &ql_32fcsm_rsp_dma_attr;
1458 - tran->fca_dma_fcip_cmd_attr = &ql_32fcip_cmd_dma_attr;
1459 - tran->fca_dma_fcip_rsp_attr = &ql_32fcip_rsp_dma_attr;
1460 - }
1461 -
1651 + tran->fca_dma_attr = &ha->io_dma_attr;
1652 + tran->fca_dma_fcp_cmd_attr = &ha->fcp_cmd_dma_attr;
1653 + tran->fca_dma_fcp_rsp_attr = &ha->fcp_rsp_dma_attr;
1654 + tran->fca_dma_fcp_data_attr = &ha->fcp_data_dma_attr;
1655 + tran->fca_dma_fcsm_cmd_attr = &ha->fcsm_cmd_dma_attr;
1656 + tran->fca_dma_fcsm_rsp_attr = &ha->fcsm_rsp_dma_attr;
1657 + tran->fca_dma_fcip_cmd_attr = &ha->fcip_cmd_dma_attr;
1658 + tran->fca_dma_fcip_rsp_attr = &ha->fcip_rsp_dma_attr;
1462 1659 tran->fca_acc_attr = &ql_dev_acc_attr;
1463 1660 tran->fca_iblock = &(ha->iblock_cookie);
1464 1661
1465 1662 /* the remaining values are simply function vectors */
1466 1663 tran->fca_bind_port = ql_bind_port;
1467 1664 tran->fca_unbind_port = ql_unbind_port;
1468 1665 tran->fca_init_pkt = ql_init_pkt;
1469 1666 tran->fca_un_init_pkt = ql_un_init_pkt;
1470 1667 tran->fca_els_send = ql_els_send;
1471 1668 tran->fca_get_cap = ql_get_cap;
1472 1669 tran->fca_set_cap = ql_set_cap;
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
1473 1670 tran->fca_getmap = ql_getmap;
1474 1671 tran->fca_transport = ql_transport;
1475 1672 tran->fca_ub_alloc = ql_ub_alloc;
1476 1673 tran->fca_ub_free = ql_ub_free;
1477 1674 tran->fca_ub_release = ql_ub_release;
1478 1675 tran->fca_abort = ql_abort;
1479 1676 tran->fca_reset = ql_reset;
1480 1677 tran->fca_port_manage = ql_port_manage;
1481 1678 tran->fca_get_device = ql_get_device;
1482 1679
1680 + EL(ha, "Transport interface setup. FCA version %d\n",
1681 + tran->fca_version);
1682 +
1483 1683 /* give it to the FC transport */
1484 1684 if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1485 1685 cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1486 1686 instance);
1487 1687 goto attach_failed;
1488 1688 }
1489 1689 progress |= QL_FCA_ATTACH_DONE;
1490 1690
1491 1691 /* Stash the structure so it can be freed at detach */
1492 1692 ha->tran = tran;
1493 1693
1494 1694 /* Acquire global state lock. */
1495 1695 GLOBAL_STATE_LOCK();
1496 1696
1497 1697 /* Add adapter structure to link list. */
1498 1698 ql_add_link_b(&ql_hba, &ha->hba);
1499 1699
1700 + /* Determine and populate HBA fru info */
1701 + ql_setup_fruinfo(ha);
1702 +
1703 + /* Release global state lock. */
1704 + GLOBAL_STATE_UNLOCK();
1705 +
1500 1706 /* Start one second driver timer. */
1707 + GLOBAL_TIMER_LOCK();
1501 1708 if (ql_timer_timeout_id == NULL) {
1502 1709 ql_timer_ticks = drv_usectohz(1000000);
1503 1710 ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1504 1711 ql_timer_ticks);
1505 1712 }
1713 + GLOBAL_TIMER_UNLOCK();
1506 1714
1507 - /* Release global state lock. */
1508 - GLOBAL_STATE_UNLOCK();
1509 -
1510 - /* Determine and populate HBA fru info */
1511 - ql_setup_fruinfo(ha);
1512 -
1513 1715 /* Setup task_daemon thread. */
1514 - (void) thread_create(NULL, 0, (void (*)())ql_task_daemon, ha,
1515 - 0, &p0, TS_RUN, minclsyspri);
1716 + (void) snprintf(taskq_name, sizeof (taskq_name),
1717 + "qlc_%d_driver_thread", instance);
1718 + ha->driver_thread_taskq = ddi_taskq_create(NULL, taskq_name, 1,
1719 + TASKQ_DEFAULTPRI, 0);
1720 + (void) ddi_taskq_dispatch(ha->driver_thread_taskq,
1721 + ql_task_daemon, ha, DDI_SLEEP);
1722 + ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
1516 1723
1724 + (void) snprintf(taskq_name, sizeof (taskq_name),
1725 + "qlc_%d_comp_thd", instance);
1726 + ha->completion_taskq = ddi_taskq_create(0, taskq_name,
1727 + ha->completion_thds, maxclsyspri, 0);
1728 + for (size = 0; size < ha->completion_thds; size++) {
1729 + (void) ddi_taskq_dispatch(ha->completion_taskq,
1730 + ql_completion_thread, ha, DDI_SLEEP);
1731 + }
1732 +
1517 1733 progress |= QL_TASK_DAEMON_STARTED;
1518 1734
1519 1735 ddi_report_dev(dip);
1520 1736
1521 1737 /* Disable link reset in panic path */
1522 1738 ha->lip_on_panic = 1;
1523 1739
1524 1740 rval = DDI_SUCCESS;
1525 1741 break;
1526 1742
1527 1743 attach_failed:
1744 + if (progress & QL_FCA_INIT_FM) {
1745 + qlc_fm_fini(ha);
1746 + progress &= ~QL_FCA_INIT_FM;
1747 + }
1748 +
1528 1749 if (progress & QL_FCA_ATTACH_DONE) {
1529 1750 (void) fc_fca_detach(dip);
1530 1751 progress &= ~QL_FCA_ATTACH_DONE;
1531 1752 }
1532 1753
1533 1754 if (progress & QL_FCA_TRAN_ALLOCED) {
1534 1755 kmem_free(tran, sizeof (fc_fca_tran_t));
1535 1756 progress &= ~QL_FCA_TRAN_ALLOCED;
1536 1757 }
1537 1758
1538 1759 if (progress & QL_MINOR_NODE_CREATED) {
1539 1760 ddi_remove_minor_node(dip, "devctl");
1540 1761 progress &= ~QL_MINOR_NODE_CREATED;
1541 1762 }
1542 1763
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
1543 1764 if (progress & QL_KSTAT_CREATED) {
1544 1765 kstat_delete(ha->k_stats);
1545 1766 progress &= ~QL_KSTAT_CREATED;
1546 1767 }
1547 1768
1548 1769 if (progress & QL_N_PORT_INFO_CREATED) {
1549 1770 kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1550 1771 progress &= ~QL_N_PORT_INFO_CREATED;
1551 1772 }
1552 1773
1553 - if (progress & QL_TASK_DAEMON_STARTED) {
1554 - TASK_DAEMON_LOCK(ha);
1774 + if (progress & QL_PLOGI_PARAMS_CREATED) {
1775 + (void) ql_plogi_params_desc_dtor(ha);
1776 + progress &= ~QL_PLOGI_PARAMS_CREATED;
1777 + }
1555 1778
1556 - ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1779 + if (progress & QL_NVRAM_CACHE_CREATED) {
1780 + (void) ql_nvram_cache_desc_dtor(ha);
1781 + progress &= ~QL_NVRAM_CACHE_CREATED;
1782 + }
1557 1783
1558 - cv_signal(&ha->cv_task_daemon);
1784 + if (progress & QL_TASK_DAEMON_STARTED) {
1785 + if (ha->driver_thread_taskq) {
1786 + while (ha->task_daemon_flags &
1787 + TASK_DAEMON_ALIVE_FLG) {
1788 + /* Delay for 1 tick (10 ms). */
1789 + ql_awaken_task_daemon(ha, NULL,
1790 + TASK_DAEMON_STOP_FLG, 0);
1791 + delay(1);
1792 + }
1793 + ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1559 1794
1560 - /* Release task daemon lock. */
1561 - TASK_DAEMON_UNLOCK(ha);
1795 + ddi_taskq_destroy(ha->driver_thread_taskq);
1796 + ha->driver_thread_taskq = NULL;
1797 + }
1798 + if (ha->completion_taskq) {
1799 + ADAPTER_STATE_LOCK(ha);
1800 + ha->flags |= COMP_THD_TERMINATE;
1801 + ADAPTER_STATE_UNLOCK(ha);
1562 1802
1563 - /* Wait for for task daemon to stop running. */
1564 - while (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1565 - ql_delay(ha, 10000);
1803 + do {
1804 + COMP_Q_LOCK(ha);
1805 + cv_broadcast(&ha->cv_comp_thread);
1806 + COMP_Q_UNLOCK(ha);
1807 + ql_delay(ha, 10000);
1808 + } while (ha->comp_thds_active != 0);
1809 +
1810 + ddi_taskq_destroy(ha->completion_taskq);
1811 + ha->completion_taskq = NULL;
1566 1812 }
1567 1813 progress &= ~QL_TASK_DAEMON_STARTED;
1568 1814 }
1569 1815
1570 1816 if (progress & QL_DB_IOBASE_MAPPED) {
1571 1817 ql_8021_clr_drv_active(ha);
1572 1818 ddi_regs_map_free(&ha->db_dev_handle);
1573 1819 progress &= ~QL_DB_IOBASE_MAPPED;
1574 1820 }
1575 1821 if (progress & QL_IOMAP_IOBASE_MAPPED) {
1576 1822 ddi_regs_map_free(&ha->iomap_dev_handle);
1577 1823 progress &= ~QL_IOMAP_IOBASE_MAPPED;
1578 1824 }
1825 + if (progress & QL_REGS_MAPPED) {
1826 + if (ha->mbar_dev_handle) {
1827 + ddi_regs_map_free(&ha->mbar_dev_handle);
1828 + ha->mbar_dev_handle = 0;
1829 + }
1830 + }
1579 1831
1580 1832 if (progress & QL_CONFIG_SPACE_SETUP) {
1581 1833 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1582 1834 ddi_regs_map_free(&ha->sbus_config_handle);
1583 1835 } else {
1584 1836 pci_config_teardown(&ha->pci_handle);
1585 1837 }
1586 1838 progress &= ~QL_CONFIG_SPACE_SETUP;
1587 1839 }
1588 1840
1589 1841 if (progress & QL_INTR_ADDED) {
1590 1842 ql_disable_intr(ha);
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
1591 1843 ql_release_intr(ha);
1592 1844 progress &= ~QL_INTR_ADDED;
1593 1845 }
1594 1846
1595 1847 if (progress & QL_MUTEX_CV_INITED) {
1596 1848 ql_destroy_mutex(ha);
1597 1849 progress &= ~QL_MUTEX_CV_INITED;
1598 1850 }
1599 1851
1600 1852 if (progress & QL_HBA_BUFFER_SETUP) {
1601 - ql_free_phys(ha, &ha->hba_buf);
1853 + ql_delete_queues(ha);
1602 1854 progress &= ~QL_HBA_BUFFER_SETUP;
1603 1855 }
1604 1856
1605 1857 if (progress & QL_REGS_MAPPED) {
1606 1858 ddi_regs_map_free(&ha->dev_handle);
1607 1859 if (ha->sbus_fpga_iobase != NULL) {
1608 1860 ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1609 1861 }
1610 1862 progress &= ~QL_REGS_MAPPED;
1611 1863 }
1612 1864
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
1613 1865 if (progress & QL_SOFT_STATE_ALLOCED) {
1614 1866
1615 1867 ql_fcache_rel(ha->fcache);
1616 1868
1617 1869 kmem_free(ha->adapter_stats,
1618 1870 sizeof (*ha->adapter_stats));
1619 1871
1620 1872 kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1621 1873 QL_UB_LIMIT);
1622 1874
1623 - kmem_free(ha->outstanding_cmds,
1624 - sizeof (*ha->outstanding_cmds) *
1625 - MAX_OUTSTANDING_COMMANDS);
1875 + if (ha->outstanding_cmds != NULL) {
1876 + kmem_free(ha->outstanding_cmds,
1877 + sizeof (*ha->outstanding_cmds) *
1878 + ha->osc_max_cnt);
1879 + }
1626 1880
1627 1881 if (ha->devpath != NULL) {
1628 1882 kmem_free(ha->devpath,
1629 1883 strlen(ha->devpath) + 1);
1630 1884 }
1631 1885
1632 1886 kmem_free(ha->dev, sizeof (*ha->dev) *
1633 1887 DEVICE_HEAD_LIST_SIZE);
1634 1888
1635 1889 if (ha->xioctl != NULL) {
1636 1890 ql_free_xioctl_resource(ha);
1637 1891 }
1638 1892
1639 1893 if (ha->fw_module != NULL) {
1640 1894 (void) ddi_modclose(ha->fw_module);
1641 1895 }
1642 - (void) ql_el_trace_desc_dtor(ha);
1643 - (void) ql_nvram_cache_desc_dtor(ha);
1896 + (void) ql_el_trace_dealloc(ha);
1644 1897
1645 1898 ddi_soft_state_free(ql_state, instance);
1646 1899 progress &= ~QL_SOFT_STATE_ALLOCED;
1647 1900 }
1648 1901
1649 1902 ddi_prop_remove_all(dip);
1650 1903 rval = DDI_FAILURE;
1651 1904 break;
1652 1905
1653 1906 case DDI_RESUME:
1654 1907 rval = DDI_FAILURE;
1655 1908
1656 1909 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1657 1910 if (ha == NULL) {
1658 1911 cmn_err(CE_WARN, "%s(%d): can't get soft state",
1659 1912 QL_NAME, instance);
1660 1913 break;
1661 1914 }
1662 1915
1663 1916 ha->power_level = PM_LEVEL_D3;
1664 1917 if (ha->pm_capable) {
1665 1918 /*
1666 1919 * Get ql_power to do power on initialization
1667 1920 */
1668 1921 if (pm_raise_power(dip, QL_POWER_COMPONENT,
1669 1922 PM_LEVEL_D0) != DDI_SUCCESS) {
1670 1923 cmn_err(CE_WARN, "%s(%d): can't raise adapter"
1671 1924 " power", QL_NAME, instance);
1672 1925 }
1673 1926 }
1674 1927
1675 1928 /*
1676 1929 * There is a bug in DR that prevents PM framework
1677 1930 * from calling ql_power.
1678 1931 */
1679 1932 if (ha->power_level == PM_LEVEL_D3) {
1680 1933 ha->power_level = PM_LEVEL_D0;
1681 1934
|
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
1682 1935 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1683 1936 cmn_err(CE_WARN, "%s(%d): can't initialize the"
1684 1937 " adapter", QL_NAME, instance);
1685 1938 }
1686 1939
1687 1940 /* Wake up task_daemon. */
1688 1941 ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1689 1942 0);
1690 1943 }
1691 1944
1692 - /* Acquire global state lock. */
1693 - GLOBAL_STATE_LOCK();
1694 -
1695 1945 /* Restart driver timer. */
1946 + GLOBAL_TIMER_LOCK();
1696 1947 if (ql_timer_timeout_id == NULL) {
1697 1948 ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1698 1949 ql_timer_ticks);
1699 1950 }
1951 + GLOBAL_TIMER_LOCK();
1700 1952
1701 - /* Release global state lock. */
1702 - GLOBAL_STATE_UNLOCK();
1703 -
1704 1953 /* Wake up command start routine. */
1705 1954 ADAPTER_STATE_LOCK(ha);
1706 1955 ha->flags &= ~ADAPTER_SUSPENDED;
1707 1956 ADAPTER_STATE_UNLOCK(ha);
1708 1957
1709 - /*
1710 - * Transport doesn't make FC discovery in polled
1711 - * mode; So we need the daemon thread's services
1712 - * right here.
1713 - */
1714 - (void) callb_generic_cpr(&ha->cprinfo, CB_CODE_CPR_RESUME);
1715 -
1716 1958 rval = DDI_SUCCESS;
1717 1959
1718 1960 /* Restart IP if it was running. */
1719 1961 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1720 1962 (void) ql_initialize_ip(ha);
1721 1963 ql_isp_rcvbuf(ha);
1722 1964 }
1723 1965 break;
1724 1966
1725 1967 default:
1726 1968 cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1727 1969 " %x", QL_NAME, ddi_get_instance(dip), cmd);
1728 1970 rval = DDI_FAILURE;
1729 1971 break;
1730 1972 }
1731 1973
1732 1974 kmem_free(buf, MAXPATHLEN);
1733 1975
1734 1976 if (rval != DDI_SUCCESS) {
1735 1977 /*EMPTY*/
1736 - QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1978 + QL_PRINT_2(ha, "failed instance=%d, rval = %xh\n",
1737 1979 ddi_get_instance(dip), rval);
1738 1980 } else {
1739 1981 /*EMPTY*/
1740 - QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1982 + QL_PRINT_3(ha, "done\n");
1741 1983 }
1742 1984
1743 1985 return (rval);
1744 1986 }
1745 1987
1746 1988 /*
1747 1989 * ql_detach
1748 1990 * Used to remove all the states associated with a given
1749 1991 * instances of a device node prior to the removal of that
1750 1992 * instance from the system.
1751 1993 *
1752 1994 * Input:
1753 1995 * dip = pointer to device information structure.
1754 1996 * cmd = type of detach.
1755 1997 *
1756 1998 * Returns:
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
1757 1999 * DDI_SUCCESS or DDI_FAILURE.
1758 2000 *
1759 2001 * Context:
1760 2002 * Kernel context.
1761 2003 */
1762 2004 static int
1763 2005 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1764 2006 {
1765 2007 ql_adapter_state_t *ha, *vha;
1766 2008 ql_tgt_t *tq;
1767 - int delay_cnt;
1768 2009 uint16_t index;
1769 2010 ql_link_t *link;
1770 2011 char *buf;
1771 2012 timeout_id_t timer_id = NULL;
1772 2013 int suspend, rval = DDI_SUCCESS;
1773 2014
1774 2015 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1775 2016 if (ha == NULL) {
1776 - QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2017 + QL_PRINT_2(NULL, "no adapter, instance=%d\n",
1777 2018 ddi_get_instance(dip));
1778 2019 return (DDI_FAILURE);
1779 2020 }
1780 2021
1781 - QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n", ha->instance, cmd);
2022 + QL_PRINT_3(ha, "started, cmd=%xh\n", cmd);
1782 2023
1783 2024 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1784 2025
1785 2026 switch (cmd) {
1786 2027 case DDI_DETACH:
1787 2028 ADAPTER_STATE_LOCK(ha);
1788 2029 ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO);
1789 2030 ADAPTER_STATE_UNLOCK(ha);
1790 2031
1791 - TASK_DAEMON_LOCK(ha);
2032 + /* Wait for task thread to see suspend flag. */
2033 + while (!(ha->task_daemon_flags & TASK_DAEMON_STALLED_FLG) &&
2034 + ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
2035 + ql_awaken_task_daemon(ha, NULL, 0, 0);
2036 + /* Delay for 1 tick (10 milliseconds). */
2037 + delay(1);
2038 + }
1792 2039
1793 - if (ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
1794 - ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1795 - cv_signal(&ha->cv_task_daemon);
2040 + if (ha->driver_thread_taskq) {
2041 + while (ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
2042 + /* Delay for 1 tick (10 milliseconds). */
2043 + ql_awaken_task_daemon(ha, NULL,
2044 + TASK_DAEMON_STOP_FLG, 0);
2045 + delay(1);
2046 + }
2047 + ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1796 2048
1797 - TASK_DAEMON_UNLOCK(ha);
2049 + ddi_taskq_destroy(ha->driver_thread_taskq);
2050 + ha->driver_thread_taskq = NULL;
2051 + }
1798 2052
1799 - (void) ql_wait_for_td_stop(ha);
2053 + if (ha->completion_taskq) {
2054 + ADAPTER_STATE_LOCK(ha);
2055 + ha->flags |= COMP_THD_TERMINATE;
2056 + ADAPTER_STATE_UNLOCK(ha);
1800 2057
1801 - TASK_DAEMON_LOCK(ha);
1802 - if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1803 - ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1804 - EL(ha, "failed, could not stop task daemon\n");
1805 - }
2058 + do {
2059 + COMP_Q_LOCK(ha);
2060 + cv_broadcast(&ha->cv_comp_thread);
2061 + COMP_Q_UNLOCK(ha);
2062 + ql_delay(ha, 10000);
2063 + } while (ha->comp_thds_active != 0);
2064 +
2065 + ddi_taskq_destroy(ha->completion_taskq);
2066 + ha->completion_taskq = NULL;
1806 2067 }
1807 - TASK_DAEMON_UNLOCK(ha);
1808 2068
1809 - GLOBAL_STATE_LOCK();
1810 -
1811 2069 /* Disable driver timer if no adapters. */
2070 + GLOBAL_TIMER_LOCK();
1812 2071 if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1813 2072 ql_hba.last == &ha->hba) {
1814 2073 timer_id = ql_timer_timeout_id;
1815 2074 ql_timer_timeout_id = NULL;
1816 2075 }
1817 - ql_remove_link(&ql_hba, &ha->hba);
2076 + GLOBAL_TIMER_UNLOCK();
1818 2077
1819 - GLOBAL_STATE_UNLOCK();
1820 -
1821 2078 if (timer_id) {
1822 2079 (void) untimeout(timer_id);
1823 2080 }
1824 2081
2082 + GLOBAL_STATE_LOCK();
2083 + ql_remove_link(&ql_hba, &ha->hba);
2084 + GLOBAL_STATE_UNLOCK();
2085 +
1825 2086 if (ha->pm_capable) {
1826 2087 if (pm_lower_power(dip, QL_POWER_COMPONENT,
1827 2088 PM_LEVEL_D3) != DDI_SUCCESS) {
1828 2089 cmn_err(CE_WARN, "%s(%d): failed to lower the"
1829 2090 " power", QL_NAME, ha->instance);
1830 2091 }
1831 2092 }
1832 2093
1833 2094 /*
1834 2095 * If pm_lower_power shutdown the adapter, there
1835 2096 * isn't much else to do
1836 2097 */
1837 2098 if (ha->power_level != PM_LEVEL_D3) {
1838 2099 ql_halt(ha, PM_LEVEL_D3);
1839 2100 }
1840 2101
1841 2102 /* Remove virtual ports. */
1842 2103 while ((vha = ha->vp_next) != NULL) {
1843 2104 ql_vport_destroy(vha);
1844 2105 }
1845 2106
1846 2107 /* Free target queues. */
1847 2108 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1848 2109 link = ha->dev[index].first;
1849 2110 while (link != NULL) {
1850 2111 tq = link->base_address;
1851 2112 link = link->next;
1852 2113 ql_dev_free(ha, tq);
1853 2114 }
1854 2115 }
1855 2116
1856 2117 /*
1857 2118 * Free unsolicited buffers.
1858 2119 * If we are here then there are no ULPs still
1859 2120 * alive that wish to talk to ql so free up
1860 2121 * any SRB_IP_UB_UNUSED buffers that are
1861 2122 * lingering around
1862 2123 */
1863 2124 QL_UB_LOCK(ha);
1864 2125 for (index = 0; index < QL_UB_LIMIT; index++) {
1865 2126 fc_unsol_buf_t *ubp = ha->ub_array[index];
1866 2127
1867 2128 if (ubp != NULL) {
1868 2129 ql_srb_t *sp = ubp->ub_fca_private;
1869 2130
1870 2131 sp->flags |= SRB_UB_FREE_REQUESTED;
1871 2132
1872 2133 while (!(sp->flags & SRB_UB_IN_FCA) ||
1873 2134 (sp->flags & (SRB_UB_CALLBACK |
1874 2135 SRB_UB_ACQUIRED))) {
1875 2136 QL_UB_UNLOCK(ha);
1876 2137 delay(drv_usectohz(100000));
1877 2138 QL_UB_LOCK(ha);
1878 2139 }
1879 2140 ha->ub_array[index] = NULL;
1880 2141
1881 2142 QL_UB_UNLOCK(ha);
1882 2143 ql_free_unsolicited_buffer(ha, ubp);
1883 2144 QL_UB_LOCK(ha);
1884 2145 }
1885 2146 }
1886 2147 QL_UB_UNLOCK(ha);
1887 2148
1888 2149 /* Free any saved RISC code. */
1889 2150 if (ha->risc_code != NULL) {
1890 2151 kmem_free(ha->risc_code, ha->risc_code_size);
1891 2152 ha->risc_code = NULL;
1892 2153 ha->risc_code_size = 0;
1893 2154 }
1894 2155
1895 2156 if (ha->fw_module != NULL) {
1896 2157 (void) ddi_modclose(ha->fw_module);
1897 2158 ha->fw_module = NULL;
1898 2159 }
1899 2160
1900 2161 /* Free resources. */
1901 2162 ddi_prop_remove_all(dip);
|
↓ open down ↓ |
67 lines elided |
↑ open up ↑ |
1902 2163 (void) fc_fca_detach(dip);
1903 2164 kmem_free(ha->tran, sizeof (fc_fca_tran_t));
1904 2165 ddi_remove_minor_node(dip, "devctl");
1905 2166 if (ha->k_stats != NULL) {
1906 2167 kstat_delete(ha->k_stats);
1907 2168 }
1908 2169
1909 2170 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1910 2171 ddi_regs_map_free(&ha->sbus_config_handle);
1911 2172 } else {
1912 - if (CFG_IST(ha, CFG_CTRL_8021)) {
2173 + if (CFG_IST(ha, CFG_CTRL_82XX)) {
1913 2174 ql_8021_clr_drv_active(ha);
1914 2175 ddi_regs_map_free(&ha->db_dev_handle);
1915 2176 }
1916 2177 if (ha->iomap_dev_handle != ha->dev_handle) {
1917 2178 ddi_regs_map_free(&ha->iomap_dev_handle);
1918 2179 }
1919 2180 pci_config_teardown(&ha->pci_handle);
1920 2181 }
1921 2182
1922 2183 ql_disable_intr(ha);
1923 2184 ql_release_intr(ha);
1924 2185
1925 2186 ql_free_xioctl_resource(ha);
1926 2187
1927 2188 ql_destroy_mutex(ha);
1928 2189
1929 - ql_free_phys(ha, &ha->hba_buf);
2190 + ql_delete_queues(ha);
1930 2191 ql_free_phys(ha, &ha->fwexttracebuf);
1931 2192 ql_free_phys(ha, &ha->fwfcetracebuf);
1932 2193
1933 2194 ddi_regs_map_free(&ha->dev_handle);
1934 2195 if (ha->sbus_fpga_iobase != NULL) {
1935 2196 ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1936 2197 }
2198 + if (ha->mbar_dev_handle != NULL) {
2199 + ddi_regs_map_free(&ha->mbar_dev_handle);
2200 + }
1937 2201
1938 2202 ql_fcache_rel(ha->fcache);
1939 2203 if (ha->vcache != NULL) {
1940 2204 kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
1941 2205 }
1942 2206
1943 2207 if (ha->pi_attrs != NULL) {
1944 2208 kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
1945 2209 }
1946 2210
1947 2211 kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
1948 2212
1949 2213 kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
1950 2214
1951 2215 kmem_free(ha->outstanding_cmds,
1952 - sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS);
2216 + sizeof (*ha->outstanding_cmds) * ha->osc_max_cnt);
1953 2217
1954 2218 if (ha->n_port != NULL) {
1955 2219 kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1956 2220 }
1957 2221
1958 2222 if (ha->devpath != NULL) {
1959 2223 kmem_free(ha->devpath, strlen(ha->devpath) + 1);
1960 2224 }
1961 2225
1962 2226 kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
1963 2227
2228 + (void) ql_plogi_params_desc_dtor(ha);
2229 +
2230 + (void) ql_nvram_cache_desc_dtor(ha);
2231 +
2232 + (void) qlc_fm_fini(ha);
2233 +
1964 2234 EL(ha, "detached\n");
1965 2235
2236 + (void) ql_el_trace_dealloc(ha);
2237 +
1966 2238 ddi_soft_state_free(ql_state, (int)ha->instance);
1967 2239
2240 + rval = DDI_SUCCESS;
2241 +
1968 2242 break;
1969 2243
1970 2244 case DDI_SUSPEND:
1971 2245 ADAPTER_STATE_LOCK(ha);
1972 -
1973 - delay_cnt = 0;
1974 2246 ha->flags |= ADAPTER_SUSPENDED;
1975 - while (ha->flags & ADAPTER_TIMER_BUSY && delay_cnt++ < 10) {
1976 - ADAPTER_STATE_UNLOCK(ha);
1977 - delay(drv_usectohz(1000000));
1978 - ADAPTER_STATE_LOCK(ha);
2247 + ADAPTER_STATE_UNLOCK(ha);
2248 +
2249 + /* Disable driver timer if last adapter. */
2250 + GLOBAL_TIMER_LOCK();
2251 + if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
2252 + ql_hba.last == &ha->hba) {
2253 + timer_id = ql_timer_timeout_id;
2254 + ql_timer_timeout_id = NULL;
1979 2255 }
1980 - if (ha->busy || ha->flags & ADAPTER_TIMER_BUSY) {
1981 - ha->flags &= ~ADAPTER_SUSPENDED;
1982 - ADAPTER_STATE_UNLOCK(ha);
1983 - rval = DDI_FAILURE;
1984 - cmn_err(CE_WARN, "!%s(%d): Fail suspend"
1985 - " busy %xh flags %xh", QL_NAME, ha->instance,
1986 - ha->busy, ha->flags);
1987 - break;
2256 + GLOBAL_TIMER_UNLOCK();
2257 +
2258 + if (timer_id) {
2259 + (void) untimeout(timer_id);
1988 2260 }
1989 2261
1990 - ADAPTER_STATE_UNLOCK(ha);
1991 -
1992 2262 if (ha->flags & IP_INITIALIZED) {
1993 2263 (void) ql_shutdown_ip(ha);
1994 2264 }
1995 2265
1996 2266 if ((suspend = ql_suspend_adapter(ha)) != QL_SUCCESS) {
1997 2267 ADAPTER_STATE_LOCK(ha);
1998 2268 ha->flags &= ~ADAPTER_SUSPENDED;
1999 2269 ADAPTER_STATE_UNLOCK(ha);
2000 2270 cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
2001 2271 QL_NAME, ha->instance, suspend);
2002 2272
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
2003 2273 /* Restart IP if it was running. */
2004 2274 if (ha->flags & IP_ENABLED &&
2005 2275 !(ha->flags & IP_INITIALIZED)) {
2006 2276 (void) ql_initialize_ip(ha);
2007 2277 ql_isp_rcvbuf(ha);
2008 2278 }
2009 2279 rval = DDI_FAILURE;
2010 2280 break;
2011 2281 }
2012 2282
2013 - /* Acquire global state lock. */
2014 - GLOBAL_STATE_LOCK();
2015 -
2016 - /* Disable driver timer if last adapter. */
2017 - if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
2018 - ql_hba.last == &ha->hba) {
2019 - timer_id = ql_timer_timeout_id;
2020 - ql_timer_timeout_id = NULL;
2021 - }
2022 - GLOBAL_STATE_UNLOCK();
2023 -
2024 - if (timer_id) {
2025 - (void) untimeout(timer_id);
2026 - }
2027 -
2028 2283 EL(ha, "suspended\n");
2029 2284
2030 2285 break;
2031 2286
2032 2287 default:
2033 2288 rval = DDI_FAILURE;
2034 2289 break;
2035 2290 }
2036 2291
2037 2292 kmem_free(buf, MAXPATHLEN);
2038 2293
2039 2294 if (rval != DDI_SUCCESS) {
2040 - if (ha != NULL) {
2041 - EL(ha, "failed, rval = %xh\n", rval);
2042 - } else {
2043 - /*EMPTY*/
2044 - QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
2045 - ddi_get_instance(dip), rval);
2046 - }
2295 + EL(ha, "failed, rval = %xh\n", rval);
2047 2296 } else {
2048 2297 /*EMPTY*/
2049 - QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
2298 + QL_PRINT_3(ha, "done\n");
2050 2299 }
2051 2300
2052 2301 return (rval);
2053 2302 }
2054 2303
2055 -
2056 2304 /*
2057 2305 * ql_power
2058 2306 * Power a device attached to the system.
2059 2307 *
2060 2308 * Input:
2061 2309 * dip = pointer to device information structure.
2062 2310 * component = device.
2063 2311 * level = power level.
2064 2312 *
2065 2313 * Returns:
2066 2314 * DDI_SUCCESS or DDI_FAILURE.
2067 2315 *
2068 2316 * Context:
2069 2317 * Kernel context.
2070 2318 */
2071 2319 /* ARGSUSED */
2072 2320 static int
2073 2321 ql_power(dev_info_t *dip, int component, int level)
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
2074 2322 {
2075 2323 int rval = DDI_FAILURE;
2076 2324 off_t csr;
2077 2325 uint8_t saved_pm_val;
2078 2326 ql_adapter_state_t *ha;
2079 2327 char *buf;
2080 2328 char *path;
2081 2329
2082 2330 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2083 2331 if (ha == NULL || ha->pm_capable == 0) {
2084 - QL_PRINT_2(CE_CONT, "(%d): no hba or PM not supported\n",
2085 - ddi_get_instance(dip));
2332 + QL_PRINT_2(ha, "no hba or PM not supported\n");
2086 2333 return (rval);
2087 2334 }
2088 2335
2089 - QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2336 + QL_PRINT_10(ha, "started\n");
2090 2337
2091 2338 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2092 2339 path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2093 2340
2094 2341 if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
2095 2342 level != PM_LEVEL_D3)) {
2096 2343 EL(ha, "invalid, component=%xh or level=%xh\n",
2097 2344 component, level);
2098 2345 return (rval);
2099 2346 }
2100 2347
2101 2348 GLOBAL_HW_LOCK();
2102 2349 csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
2103 2350 GLOBAL_HW_UNLOCK();
2104 2351
2105 - (void) snprintf(buf, sizeof (buf),
2352 + (void) snprintf(buf, MAXPATHLEN,
2106 2353 "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
2107 2354 ddi_pathname(dip, path));
2108 2355
2109 2356 switch (level) {
2110 2357 case PM_LEVEL_D0: /* power up to D0 state - fully on */
2111 2358
2112 2359 QL_PM_LOCK(ha);
2113 2360 if (ha->power_level == PM_LEVEL_D0) {
2114 2361 QL_PM_UNLOCK(ha);
2115 2362 rval = DDI_SUCCESS;
2116 2363 break;
2117 2364 }
2118 2365
2119 2366 /*
2120 2367 * Enable interrupts now
2121 2368 */
2122 2369 saved_pm_val = ha->power_level;
2123 2370 ha->power_level = PM_LEVEL_D0;
2124 2371 QL_PM_UNLOCK(ha);
2125 2372
2126 2373 GLOBAL_HW_LOCK();
2127 2374
2128 2375 ql_pci_config_put16(ha, csr, PCI_PMCSR_D0);
2129 2376
2130 2377 /*
2131 2378 * Delay after reset, for chip to recover.
2132 2379 * Otherwise causes system PANIC
2133 2380 */
2134 2381 drv_usecwait(200000);
2135 2382
2136 2383 GLOBAL_HW_UNLOCK();
2137 2384
2138 2385 if (ha->config_saved) {
2139 2386 ha->config_saved = 0;
2140 2387 if (QL_RESTORE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2141 2388 QL_PM_LOCK(ha);
2142 2389 ha->power_level = saved_pm_val;
2143 2390 QL_PM_UNLOCK(ha);
2144 2391 cmn_err(CE_WARN, "%s failed to restore "
2145 2392 "config regs", buf);
2146 2393 break;
2147 2394 }
2148 2395 }
2149 2396
2150 2397 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
2151 2398 cmn_err(CE_WARN, "%s adapter initialization failed",
2152 2399 buf);
2153 2400 }
2154 2401
2155 2402 /* Wake up task_daemon. */
2156 2403 ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
2157 2404 TASK_DAEMON_SLEEPING_FLG, 0);
2158 2405
2159 2406 /* Restart IP if it was running. */
2160 2407 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
2161 2408 (void) ql_initialize_ip(ha);
2162 2409 ql_isp_rcvbuf(ha);
2163 2410 }
2164 2411
|
↓ open down ↓ |
49 lines elided |
↑ open up ↑ |
2165 2412 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
2166 2413 ha->instance, QL_NAME);
2167 2414
2168 2415 rval = DDI_SUCCESS;
2169 2416 break;
2170 2417
2171 2418 case PM_LEVEL_D3: /* power down to D3 state - off */
2172 2419
2173 2420 QL_PM_LOCK(ha);
2174 2421
2175 - if (ha->busy || ((ha->task_daemon_flags &
2422 + if (ha->pm_busy || ((ha->task_daemon_flags &
2176 2423 TASK_DAEMON_SLEEPING_FLG) == 0)) {
2177 2424 QL_PM_UNLOCK(ha);
2178 2425 break;
2179 2426 }
2180 2427
2181 2428 if (ha->power_level == PM_LEVEL_D3) {
2182 2429 rval = DDI_SUCCESS;
2183 2430 QL_PM_UNLOCK(ha);
2184 2431 break;
2185 2432 }
2186 2433 QL_PM_UNLOCK(ha);
2187 2434
2188 2435 if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2189 2436 cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
2190 2437 " config regs", QL_NAME, ha->instance, buf);
2191 2438 break;
2192 2439 }
2193 2440 ha->config_saved = 1;
2194 2441
2195 2442 /*
2196 2443 * Don't enable interrupts. Running mailbox commands with
2197 2444 * interrupts enabled could cause hangs since pm_run_scan()
2198 2445 * runs out of a callout thread and on single cpu systems
2199 2446 * cv_reltimedwait_sig(), called from ql_mailbox_command(),
2200 2447 * would not get to run.
2201 2448 */
2202 2449 TASK_DAEMON_LOCK(ha);
2203 2450 ha->task_daemon_flags |= TASK_DAEMON_POWERING_DOWN;
2204 2451 TASK_DAEMON_UNLOCK(ha);
2205 2452
2206 2453 ql_halt(ha, PM_LEVEL_D3);
2207 2454
2208 2455 /*
2209 2456 * Setup ql_intr to ignore interrupts from here on.
2210 2457 */
2211 2458 QL_PM_LOCK(ha);
2212 2459 ha->power_level = PM_LEVEL_D3;
2213 2460 QL_PM_UNLOCK(ha);
2214 2461
2215 2462 /*
2216 2463 * Wait for ISR to complete.
2217 2464 */
2218 2465 INTR_LOCK(ha);
2219 2466 ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
2220 2467 INTR_UNLOCK(ha);
2221 2468
|
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
2222 2469 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
2223 2470 ha->instance, QL_NAME);
2224 2471
2225 2472 rval = DDI_SUCCESS;
2226 2473 break;
2227 2474 }
2228 2475
2229 2476 kmem_free(buf, MAXPATHLEN);
2230 2477 kmem_free(path, MAXPATHLEN);
2231 2478
2232 - QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
2479 + QL_PRINT_10(ha, "done\n");
2233 2480
2234 2481 return (rval);
2235 2482 }
2236 2483
2237 2484 /*
2238 2485 * ql_quiesce
2239 2486 * quiesce a device attached to the system.
2240 2487 *
2241 2488 * Input:
2242 2489 * dip = pointer to device information structure.
2243 2490 *
2244 2491 * Returns:
2245 2492 * DDI_SUCCESS
2246 2493 *
2247 2494 * Context:
2248 2495 * Kernel context.
2249 2496 */
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
2250 2497 static int
2251 2498 ql_quiesce(dev_info_t *dip)
2252 2499 {
2253 2500 ql_adapter_state_t *ha;
2254 2501 uint32_t timer;
2255 2502 uint32_t stat;
2256 2503
2257 2504 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2258 2505 if (ha == NULL) {
2259 2506 /* Oh well.... */
2260 - QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2507 + QL_PRINT_2(NULL, "no adapter, instance=%d\n",
2261 2508 ddi_get_instance(dip));
2262 2509 return (DDI_SUCCESS);
2263 2510 }
2264 2511
2265 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2512 + QL_PRINT_3(ha, "started\n");
2266 2513
2267 - if (CFG_IST(ha, CFG_CTRL_8021)) {
2268 - (void) ql_stop_firmware(ha);
2269 - } else if (CFG_IST(ha, CFG_CTRL_242581)) {
2514 + if (CFG_IST(ha, CFG_CTRL_82XX)) {
2515 + ql_8021_clr_hw_intr(ha);
2516 + ql_8021_clr_fw_intr(ha);
2517 + WRT16_IO_REG(ha, mailbox_in[0], MBC_TOGGLE_INTERRUPT);
2518 + WRT16_IO_REG(ha, mailbox_in[1], 0);
2519 + WRT32_IO_REG(ha, nx_host_int, NX_MBX_CMD);
2520 + for (timer = 0; timer < 20000; timer++) {
2521 + stat = RD32_IO_REG(ha, risc2host);
2522 + if (stat & BIT_15) {
2523 + ql_8021_clr_hw_intr(ha);
2524 + if ((stat & 0xff) < 0x12) {
2525 + ql_8021_clr_fw_intr(ha);
2526 + break;
2527 + }
2528 + ql_8021_clr_fw_intr(ha);
2529 + }
2530 + drv_usecwait(100);
2531 + }
2532 + ql_8021_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
2533 + WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE);
2534 + WRT16_IO_REG(ha, mailbox_in[1], 0);
2535 + WRT32_IO_REG(ha, nx_host_int, NX_MBX_CMD);
2536 + for (timer = 0; timer < 20000; timer++) {
2537 + stat = RD32_IO_REG(ha, risc2host);
2538 + if (stat & BIT_15) {
2539 + ql_8021_clr_hw_intr(ha);
2540 + if ((stat & 0xff) < 0x12) {
2541 + ql_8021_clr_fw_intr(ha);
2542 + break;
2543 + }
2544 + ql_8021_clr_fw_intr(ha);
2545 + }
2546 + drv_usecwait(100);
2547 + }
2548 + } else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2270 2549 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2271 2550 WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE);
2551 + WRT16_IO_REG(ha, mailbox_in[1], 0);
2552 + WRT16_IO_REG(ha, mailbox_in[2], 0);
2553 + WRT16_IO_REG(ha, mailbox_in[3], 0);
2554 + WRT16_IO_REG(ha, mailbox_in[4], 0);
2555 + WRT16_IO_REG(ha, mailbox_in[5], 0);
2556 + WRT16_IO_REG(ha, mailbox_in[6], 0);
2557 + WRT16_IO_REG(ha, mailbox_in[7], 0);
2558 + WRT16_IO_REG(ha, mailbox_in[8], 0);
2272 2559 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
2273 2560 for (timer = 0; timer < 30000; timer++) {
2274 2561 stat = RD32_IO_REG(ha, risc2host);
2275 2562 if (stat & BIT_15) {
2276 2563 if ((stat & 0xff) < 0x12) {
2277 2564 WRT32_IO_REG(ha, hccr,
2278 2565 HC24_CLR_RISC_INT);
2279 2566 break;
2280 2567 }
2281 2568 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2282 2569 }
2283 2570 drv_usecwait(100);
2284 2571 }
2285 2572 /* Reset the chip. */
2286 - WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2287 - MWB_4096_BYTES);
2573 + if (CFG_IST(ha, CFG_MWB_4096_SUPPORT)) {
2574 + WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2575 + MWB_4096_BYTES);
2576 + } else {
2577 + WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN);
2578 + }
2288 2579 drv_usecwait(100);
2289 2580
2290 2581 } else {
2291 2582 /* Disable ISP interrupts. */
2292 2583 WRT16_IO_REG(ha, ictrl, 0);
2293 2584 /* Select RISC module registers. */
2294 2585 WRT16_IO_REG(ha, ctrl_status, 0);
2295 2586 /* Reset ISP semaphore. */
2296 2587 WRT16_IO_REG(ha, semaphore, 0);
2297 2588 /* Reset RISC module. */
2298 2589 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
2299 2590 /* Release RISC module. */
2300 2591 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
2301 2592 }
2302 2593
2303 - ql_disable_intr(ha);
2594 + QL_PRINT_3(ha, "done\n");
2304 2595
2305 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2306 -
2307 2596 return (DDI_SUCCESS);
2308 2597 }
2309 2598
2310 2599 /* ************************************************************************ */
2311 2600 /* Fibre Channel Adapter (FCA) Transport Functions. */
2312 2601 /* ************************************************************************ */
2313 2602
2314 2603 /*
2315 2604 * ql_bind_port
2316 2605 * Handling port binding. The FC Transport attempts to bind an FCA port
2317 2606 * when it is ready to start transactions on the port. The FC Transport
2318 2607 * will call the fca_bind_port() function specified in the fca_transport
2319 2608 * structure it receives. The FCA must fill in the port_info structure
2320 2609 * passed in the call and also stash the information for future calls.
2321 2610 *
2322 2611 * Input:
2323 2612 * dip = pointer to FCA information structure.
2324 2613 * port_info = pointer to port information structure.
2325 2614 * bind_info = pointer to bind information structure.
2326 2615 *
2327 2616 * Returns:
2328 2617 * NULL = failure
2329 2618 *
2330 2619 * Context:
2331 2620 * Kernel context.
2332 2621 */
2333 2622 static opaque_t
2334 2623 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2335 2624 fc_fca_bind_info_t *bind_info)
2336 2625 {
|
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
2337 2626 ql_adapter_state_t *ha, *vha;
2338 2627 opaque_t fca_handle = NULL;
2339 2628 port_id_t d_id;
2340 2629 int port_npiv = bind_info->port_npiv;
2341 2630 uchar_t *port_nwwn = bind_info->port_nwwn.raw_wwn;
2342 2631 uchar_t *port_pwwn = bind_info->port_pwwn.raw_wwn;
2343 2632
2344 2633 /* get state info based on the dip */
2345 2634 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2346 2635 if (ha == NULL) {
2347 - QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2636 + QL_PRINT_2(ha, "no adapter, instance=%d\n",
2348 2637 ddi_get_instance(dip));
2349 2638 return (NULL);
2350 2639 }
2351 - QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2640 + QL_PRINT_10(ha, "started\n");
2352 2641
2353 2642 /* Verify port number is supported. */
2354 2643 if (port_npiv != 0) {
2355 2644 if (!(ha->flags & VP_ENABLED)) {
2356 - QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_NOT_SUPPORTED\n",
2357 - ha->instance);
2645 + QL_PRINT_2(ha, "FC_NPIV_NOT_SUPPORTED\n");
2358 2646 port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2359 2647 return (NULL);
2360 2648 }
2361 2649 if (!(ha->flags & POINT_TO_POINT)) {
2362 - QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_WRONG_TOPOLOGY\n",
2363 - ha->instance);
2650 + QL_PRINT_2(ha, "FC_NPIV_WRONG_TOPOLOGY\n");
2364 2651 port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2365 2652 return (NULL);
2366 2653 }
2367 2654 if (!(ha->flags & FDISC_ENABLED)) {
2368 - QL_PRINT_2(CE_CONT, "(%d): switch does not support "
2369 - "FDISC\n", ha->instance);
2655 + QL_PRINT_2(ha, "switch does not support "
2656 + "FDISC\n");
2370 2657 port_info->pi_error = FC_NPIV_FDISC_FAILED;
2371 2658 return (NULL);
2372 2659 }
2373 - if (bind_info->port_num > (CFG_IST(ha, CFG_CTRL_2422) ?
2374 - MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
2375 - QL_PRINT_2(CE_CONT, "(%d): port number=%d "
2376 - "FC_OUTOFBOUNDS\n", ha->instance);
2660 + if (bind_info->port_num >= ha->max_vports) {
2661 + QL_PRINT_2(ha, "port number=%d "
2662 + "FC_OUTOFBOUNDS\n", bind_info->port_num);
2377 2663 port_info->pi_error = FC_OUTOFBOUNDS;
2378 2664 return (NULL);
2379 2665 }
2380 2666 } else if (bind_info->port_num != 0) {
2381 - QL_PRINT_2(CE_CONT, "(%d): failed, port number=%d is not "
2382 - "supported\n", ha->instance, bind_info->port_num);
2667 + QL_PRINT_2(ha, "failed, port number=%d is not "
2668 + "supported\n", bind_info->port_num);
2383 2669 port_info->pi_error = FC_OUTOFBOUNDS;
2384 2670 return (NULL);
2385 2671 }
2386 2672
2387 2673 /* Locate port context. */
2388 2674 for (vha = ha; vha != NULL; vha = vha->vp_next) {
2389 2675 if (vha->vp_index == bind_info->port_num) {
2390 2676 break;
2391 2677 }
2392 2678 }
2393 2679
2394 2680 /* If virtual port does not exist. */
2395 2681 if (vha == NULL) {
2396 2682 vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2397 2683 }
2398 2684
2399 2685 /* make sure this port isn't already bound */
2400 2686 if (vha->flags & FCA_BOUND) {
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
2401 2687 port_info->pi_error = FC_ALREADY;
2402 2688 } else {
2403 2689 if (vha->vp_index != 0) {
2404 2690 bcopy(port_nwwn,
2405 2691 vha->loginparams.node_ww_name.raw_wwn, 8);
2406 2692 bcopy(port_pwwn,
2407 2693 vha->loginparams.nport_ww_name.raw_wwn, 8);
2408 2694 }
2409 2695 if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2410 2696 if (ql_vport_enable(vha) != QL_SUCCESS) {
2411 - QL_PRINT_2(CE_CONT, "(%d): failed to enable "
2412 - "virtual port=%d\n", ha->instance,
2697 + QL_PRINT_2(ha, "failed to enable "
2698 + "virtual port=%d\n",
2413 2699 vha->vp_index);
2414 2700 port_info->pi_error = FC_NPIV_FDISC_FAILED;
2415 2701 return (NULL);
2416 2702 }
2417 2703 cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2418 2704 "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2419 2705 "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2420 2706 QL_NAME, ha->instance, vha->vp_index,
2421 2707 port_pwwn[0], port_pwwn[1], port_pwwn[2],
2422 2708 port_pwwn[3], port_pwwn[4], port_pwwn[5],
2423 2709 port_pwwn[6], port_pwwn[7],
2424 2710 port_nwwn[0], port_nwwn[1], port_nwwn[2],
2425 2711 port_nwwn[3], port_nwwn[4], port_nwwn[5],
2426 2712 port_nwwn[6], port_nwwn[7]);
2427 2713 }
2428 2714
2429 2715 /* stash the bind_info supplied by the FC Transport */
2430 2716 vha->bind_info.port_handle = bind_info->port_handle;
2431 - vha->bind_info.port_statec_cb =
2432 - bind_info->port_statec_cb;
2717 + vha->bind_info.port_statec_cb = bind_info->port_statec_cb;
2433 2718 vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2434 2719
2435 2720 /* Set port's source ID. */
2436 2721 port_info->pi_s_id.port_id = vha->d_id.b24;
2437 2722
2438 2723 /* copy out the default login parameters */
2439 2724 bcopy((void *)&vha->loginparams,
2440 2725 (void *)&port_info->pi_login_params,
2441 2726 sizeof (la_els_logi_t));
2442 2727
2443 2728 /* Set port's hard address if enabled. */
2444 2729 port_info->pi_hard_addr.hard_addr = 0;
2445 2730 if (bind_info->port_num == 0) {
2446 2731 d_id.b24 = ha->d_id.b24;
2447 - if (CFG_IST(ha, CFG_CTRL_24258081)) {
2732 + if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2448 2733 if (ha->init_ctrl_blk.cb24.
2449 2734 firmware_options_1[0] & BIT_0) {
2450 2735 d_id.b.al_pa = ql_index_to_alpa[ha->
2451 2736 init_ctrl_blk.cb24.
2452 2737 hard_address[0]];
2453 2738 port_info->pi_hard_addr.hard_addr =
2454 2739 d_id.b24;
2455 2740 }
2456 2741 } else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2457 2742 BIT_0) {
2458 2743 d_id.b.al_pa = ql_index_to_alpa[ha->
2459 2744 init_ctrl_blk.cb.hard_address[0]];
2460 2745 port_info->pi_hard_addr.hard_addr = d_id.b24;
2461 2746 }
2462 2747
2463 2748 /* Set the node id data */
2464 2749 if (ql_get_rnid_params(ha,
2465 2750 sizeof (port_info->pi_rnid_params.params),
2466 2751 (caddr_t)&port_info->pi_rnid_params.params) ==
2467 2752 QL_SUCCESS) {
2468 2753 port_info->pi_rnid_params.status = FC_SUCCESS;
2469 2754 } else {
2470 2755 port_info->pi_rnid_params.status = FC_FAILURE;
2471 2756 }
2472 2757
2473 2758 /* Populate T11 FC-HBA details */
2474 2759 ql_populate_hba_fru_details(ha, port_info);
2475 2760 ha->pi_attrs = kmem_zalloc(sizeof (fca_port_attrs_t),
2476 2761 KM_SLEEP);
2477 2762 if (ha->pi_attrs != NULL) {
2478 2763 bcopy(&port_info->pi_attrs, ha->pi_attrs,
2479 2764 sizeof (fca_port_attrs_t));
2480 2765 }
2481 2766 } else {
2482 2767 port_info->pi_rnid_params.status = FC_FAILURE;
2483 2768 if (ha->pi_attrs != NULL) {
2484 2769 bcopy(ha->pi_attrs, &port_info->pi_attrs,
2485 2770 sizeof (fca_port_attrs_t));
2486 2771 }
2487 2772 }
2488 2773
|
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
2489 2774 /* Generate handle for this FCA. */
2490 2775 fca_handle = (opaque_t)vha;
2491 2776
2492 2777 ADAPTER_STATE_LOCK(ha);
2493 2778 vha->flags |= FCA_BOUND;
2494 2779 ADAPTER_STATE_UNLOCK(ha);
2495 2780 /* Set port's current state. */
2496 2781 port_info->pi_port_state = vha->state;
2497 2782 }
2498 2783
2499 - QL_PRINT_10(CE_CONT, "(%d,%d): done, pi_port_state=%xh, "
2500 - "pi_s_id.port_id=%xh\n", ha->instance, ha->vp_index,
2784 + QL_PRINT_10(ha, "done, pi_port_state=%xh, "
2785 + "pi_s_id.port_id=%xh\n",
2501 2786 port_info->pi_port_state, port_info->pi_s_id.port_id);
2502 2787
2503 2788 return (fca_handle);
2504 2789 }
2505 2790
2506 2791 /*
2507 2792 * ql_unbind_port
2508 2793 * To unbind a Fibre Channel Adapter from an FC Port driver.
2509 2794 *
2510 2795 * Input:
2511 2796 * fca_handle = handle setup by ql_bind_port().
2512 2797 *
2513 2798 * Context:
2514 2799 * Kernel context.
2515 2800 */
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
2516 2801 static void
2517 2802 ql_unbind_port(opaque_t fca_handle)
2518 2803 {
2519 2804 ql_adapter_state_t *ha;
2520 2805 ql_tgt_t *tq;
2521 2806 uint32_t flgs;
2522 2807
2523 2808 ha = ql_fca_handle_to_state(fca_handle);
2524 2809 if (ha == NULL) {
2525 2810 /*EMPTY*/
2526 - QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2811 + QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
2527 2812 (void *)fca_handle);
2528 2813 } else {
2529 - QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance,
2530 - ha->vp_index);
2814 + QL_PRINT_10(ha, "started\n");
2531 2815
2532 2816 if (!(ha->flags & FCA_BOUND)) {
2533 2817 /*EMPTY*/
2534 - QL_PRINT_2(CE_CONT, "(%d): port=%d already unbound\n",
2535 - ha->instance, ha->vp_index);
2818 + QL_PRINT_2(ha, "port already unbound\n");
2536 2819 } else {
2537 2820 if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2821 + (void) ql_vport_control(ha, (uint8_t)
2822 + (CFG_IST(ha, CFG_FC_TYPE) ?
2823 + VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT));
2538 2824 if ((tq = ql_loop_id_to_queue(ha,
2539 2825 FL_PORT_24XX_HDL)) != NULL) {
2540 2826 (void) ql_logout_fabric_port(ha, tq);
2541 2827 }
2542 - (void) ql_vport_control(ha, (uint8_t)
2543 - (CFG_IST(ha, CFG_CTRL_2425) ?
2544 - VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT));
2545 2828 flgs = FCA_BOUND | VP_ENABLED;
2546 2829 } else {
2547 2830 flgs = FCA_BOUND;
2548 2831 }
2549 2832 ADAPTER_STATE_LOCK(ha);
2550 2833 ha->flags &= ~flgs;
2551 2834 ADAPTER_STATE_UNLOCK(ha);
2552 2835 }
2553 2836
2554 - QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
2555 - ha->vp_index);
2837 + QL_PRINT_10(ha, "done\n");
2556 2838 }
2557 2839 }
2558 2840
2559 2841 /*
2560 2842 * ql_init_pkt
2561 2843 * Initialize FCA portion of packet.
2562 2844 *
2563 2845 * Input:
2564 2846 * fca_handle = handle setup by ql_bind_port().
2565 2847 * pkt = pointer to fc_packet.
2566 2848 *
2567 2849 * Returns:
2568 2850 * FC_SUCCESS - the packet has successfully been initialized.
2569 2851 * FC_UNBOUND - the fca_handle specified is not bound.
2570 2852 * FC_NOMEM - the FCA failed initialization due to an allocation error.
2571 2853 * FC_FAILURE - the FCA failed initialization for undisclosed reasons
2572 2854 *
2573 2855 * Context:
2574 2856 * Kernel context.
2575 2857 */
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
2576 2858 /* ARGSUSED */
2577 2859 static int
2578 2860 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2579 2861 {
2580 2862 ql_adapter_state_t *ha;
2581 2863 ql_srb_t *sp;
2582 2864 int rval = FC_SUCCESS;
2583 2865
2584 2866 ha = ql_fca_handle_to_state(fca_handle);
2585 2867 if (ha == NULL) {
2586 - QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2868 + QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
2587 2869 (void *)fca_handle);
2588 2870 return (FC_UNBOUND);
2589 2871 }
2590 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2872 + QL_PRINT_3(ha, "started\n");
2591 2873
2592 2874 sp = (ql_srb_t *)pkt->pkt_fca_private;
2593 2875 sp->flags = 0;
2876 + sp->handle = 0;
2594 2877
2595 2878 /* init cmd links */
2596 2879 sp->cmd.base_address = sp;
2597 2880 sp->cmd.prev = NULL;
2598 2881 sp->cmd.next = NULL;
2599 2882 sp->cmd.head = NULL;
2600 2883
2601 2884 /* init watchdog links */
2602 2885 sp->wdg.base_address = sp;
2603 2886 sp->wdg.prev = NULL;
2604 2887 sp->wdg.next = NULL;
2605 2888 sp->wdg.head = NULL;
2606 2889 sp->pkt = pkt;
2607 2890 sp->ha = ha;
2608 2891 sp->magic_number = QL_FCA_BRAND;
2609 2892 sp->sg_dma.dma_handle = NULL;
2610 2893 #ifndef __sparc
2611 - if (CFG_IST(ha, CFG_CTRL_8021)) {
2894 + if (CFG_IST(ha, CFG_CTRL_82XX)) {
2612 2895 /* Setup DMA for scatter gather list. */
2613 2896 sp->sg_dma.size = sizeof (cmd6_2400_dma_t);
2614 2897 sp->sg_dma.type = LITTLE_ENDIAN_DMA;
2615 - sp->sg_dma.cookie_count = 1;
2898 + sp->sg_dma.max_cookie_count = 1;
2616 2899 sp->sg_dma.alignment = 64;
2617 2900 if (ql_alloc_phys(ha, &sp->sg_dma, KM_SLEEP) != QL_SUCCESS) {
2618 2901 rval = FC_NOMEM;
2619 2902 }
2620 2903 }
2621 2904 #endif /* __sparc */
2622 2905
2623 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2906 + QL_PRINT_3(ha, "done\n");
2624 2907
2625 2908 return (rval);
2626 2909 }
2627 2910
2628 2911 /*
2629 2912 * ql_un_init_pkt
2630 2913 * Release all local resources bound to packet.
2631 2914 *
2632 2915 * Input:
2633 2916 * fca_handle = handle setup by ql_bind_port().
2634 2917 * pkt = pointer to fc_packet.
2635 2918 *
2636 2919 * Returns:
2637 2920 * FC_SUCCESS - the packet has successfully been invalidated.
2638 2921 * FC_UNBOUND - the fca_handle specified is not bound.
2639 2922 * FC_BADPACKET - the packet has not been initialized or has
2640 2923 * already been freed by this FCA.
2641 2924 *
2642 2925 * Context:
2643 2926 * Kernel context.
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
2644 2927 */
2645 2928 static int
2646 2929 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2647 2930 {
2648 2931 ql_adapter_state_t *ha;
2649 2932 int rval;
2650 2933 ql_srb_t *sp;
2651 2934
2652 2935 ha = ql_fca_handle_to_state(fca_handle);
2653 2936 if (ha == NULL) {
2654 - QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2937 + QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
2655 2938 (void *)fca_handle);
2656 2939 return (FC_UNBOUND);
2657 2940 }
2658 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2941 + QL_PRINT_3(ha, "started\n");
2659 2942
2660 2943 sp = (ql_srb_t *)pkt->pkt_fca_private;
2661 2944
2662 2945 if (sp->magic_number != QL_FCA_BRAND) {
2663 2946 EL(ha, "failed, FC_BADPACKET\n");
2664 2947 rval = FC_BADPACKET;
2665 2948 } else {
2666 2949 sp->magic_number = NULL;
2667 2950 ql_free_phys(ha, &sp->sg_dma);
2668 2951 rval = FC_SUCCESS;
2669 2952 }
2670 2953
2671 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2954 + QL_PRINT_3(ha, "done\n");
2672 2955
2673 2956 return (rval);
2674 2957 }
2675 2958
2676 2959 /*
2677 2960 * ql_els_send
2678 2961 * Issue a extended link service request.
2679 2962 *
2680 2963 * Input:
2681 2964 * fca_handle = handle setup by ql_bind_port().
2682 2965 * pkt = pointer to fc_packet.
2683 2966 *
2684 2967 * Returns:
2685 2968 * FC_SUCCESS - the command was successful.
2686 2969 * FC_ELS_FREJECT - the command was rejected by a Fabric.
2687 2970 * FC_ELS_PREJECT - the command was rejected by an N-port.
2688 2971 * FC_TRANSPORT_ERROR - a transport error occurred.
2689 2972 * FC_UNBOUND - the fca_handle specified is not bound.
2690 2973 * FC_ELS_BAD - the FCA can not issue the requested ELS.
2691 2974 *
2692 2975 * Context:
2693 2976 * Kernel context.
2694 2977 */
2695 2978 static int
2696 2979 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2697 2980 {
|
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
2698 2981 ql_adapter_state_t *ha;
2699 2982 int rval;
2700 2983 clock_t timer = drv_usectohz(30000000);
2701 2984 ls_code_t els;
2702 2985 la_els_rjt_t rjt;
2703 2986 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
2704 2987
2705 2988 /* Verify proper command. */
2706 2989 ha = ql_cmd_setup(fca_handle, pkt, &rval);
2707 2990 if (ha == NULL) {
2708 - QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2991 + QL_PRINT_2(NULL, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2709 2992 rval, fca_handle);
2710 2993 return (FC_INVALID_REQUEST);
2711 2994 }
2712 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2995 + QL_PRINT_3(ha, "started\n");
2713 2996
2714 2997 /* Wait for suspension to end. */
2715 2998 TASK_DAEMON_LOCK(ha);
2716 - while (ha->task_daemon_flags & QL_SUSPENDED) {
2999 + while (DRIVER_SUSPENDED(ha)) {
2717 3000 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2718 3001
2719 3002 /* 30 seconds from now */
2720 3003 if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
2721 3004 &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
2722 3005 /*
2723 3006 * The timeout time 'timer' was
2724 3007 * reached without the condition
2725 3008 * being signaled.
2726 3009 */
2727 3010 pkt->pkt_state = FC_PKT_TRAN_BSY;
2728 3011 pkt->pkt_reason = FC_REASON_XCHG_BSY;
2729 3012
2730 3013 /* Release task daemon lock. */
2731 3014 TASK_DAEMON_UNLOCK(ha);
2732 3015
2733 3016 EL(ha, "QL_SUSPENDED failed=%xh\n",
2734 3017 QL_FUNCTION_TIMEOUT);
2735 3018 return (FC_TRAN_BUSY);
2736 3019 }
2737 3020 }
2738 3021 /* Release task daemon lock. */
2739 3022 TASK_DAEMON_UNLOCK(ha);
2740 3023
2741 3024 /* Setup response header. */
2742 3025 bcopy((void *)&pkt->pkt_cmd_fhdr, (void *)&pkt->pkt_resp_fhdr,
2743 3026 sizeof (fc_frame_hdr_t));
2744 3027
2745 3028 if (pkt->pkt_rsplen) {
2746 3029 bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2747 3030 }
2748 3031
2749 3032 pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2750 3033 pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2751 3034 pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
2752 3035 R_CTL_SOLICITED_CONTROL;
2753 3036 pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
2754 3037 F_CTL_END_SEQ;
2755 3038
|
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
2756 3039 sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
2757 3040 SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
2758 3041 SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
2759 3042
2760 3043 sp->flags |= SRB_ELS_PKT;
2761 3044
2762 3045 /* map the type of ELS to a function */
2763 3046 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
2764 3047 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
2765 3048
2766 -#if 0
2767 - QL_PRINT_3(CE_CONT, "(%d): command fhdr:\n", ha->instance);
2768 - QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2769 - sizeof (fc_frame_hdr_t) / 4);
2770 - QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2771 - QL_DUMP_3((uint8_t *)&els, 32, sizeof (els) / 4);
2772 -#endif
3049 + QL_PRINT_10(ha, "els.ls_code=%xh, d_id=%xh\n", els.ls_code,
3050 + pkt->pkt_cmd_fhdr.d_id);
2773 3051
2774 3052 sp->iocb = ha->els_cmd;
2775 3053 sp->req_cnt = 1;
2776 3054
2777 3055 switch (els.ls_code) {
2778 3056 case LA_ELS_RJT:
2779 3057 case LA_ELS_ACC:
2780 - EL(ha, "LA_ELS_RJT\n");
2781 3058 pkt->pkt_state = FC_PKT_SUCCESS;
2782 3059 rval = FC_SUCCESS;
2783 3060 break;
2784 3061 case LA_ELS_PLOGI:
2785 3062 case LA_ELS_PDISC:
2786 3063 rval = ql_els_plogi(ha, pkt);
2787 3064 break;
2788 3065 case LA_ELS_FLOGI:
2789 3066 case LA_ELS_FDISC:
2790 3067 rval = ql_els_flogi(ha, pkt);
2791 3068 break;
2792 3069 case LA_ELS_LOGO:
2793 3070 rval = ql_els_logo(ha, pkt);
2794 3071 break;
2795 3072 case LA_ELS_PRLI:
2796 3073 rval = ql_els_prli(ha, pkt);
2797 3074 break;
2798 3075 case LA_ELS_PRLO:
2799 3076 rval = ql_els_prlo(ha, pkt);
2800 3077 break;
2801 3078 case LA_ELS_ADISC:
2802 3079 rval = ql_els_adisc(ha, pkt);
2803 3080 break;
2804 3081 case LA_ELS_LINIT:
2805 3082 rval = ql_els_linit(ha, pkt);
2806 3083 break;
2807 3084 case LA_ELS_LPC:
2808 3085 rval = ql_els_lpc(ha, pkt);
2809 3086 break;
2810 3087 case LA_ELS_LSTS:
2811 3088 rval = ql_els_lsts(ha, pkt);
2812 3089 break;
2813 3090 case LA_ELS_SCR:
2814 3091 rval = ql_els_scr(ha, pkt);
2815 3092 break;
2816 3093 case LA_ELS_RSCN:
2817 3094 rval = ql_els_rscn(ha, pkt);
2818 3095 break;
2819 3096 case LA_ELS_FARP_REQ:
2820 3097 rval = ql_els_farp_req(ha, pkt);
2821 3098 break;
2822 3099 case LA_ELS_FARP_REPLY:
2823 3100 rval = ql_els_farp_reply(ha, pkt);
2824 3101 break;
2825 3102 case LA_ELS_RLS:
2826 3103 rval = ql_els_rls(ha, pkt);
2827 3104 break;
2828 3105 case LA_ELS_RNID:
2829 3106 rval = ql_els_rnid(ha, pkt);
2830 3107 break;
2831 3108 default:
2832 3109 EL(ha, "LA_ELS_RJT, FC_REASON_CMD_UNSUPPORTED=%xh\n",
2833 3110 els.ls_code);
2834 3111 /* Build RJT. */
2835 3112 bzero(&rjt, sizeof (rjt));
2836 3113 rjt.ls_code.ls_code = LA_ELS_RJT;
2837 3114 rjt.reason = FC_REASON_CMD_UNSUPPORTED;
|
↓ open down ↓ |
47 lines elided |
↑ open up ↑ |
2838 3115
2839 3116 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
2840 3117 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
2841 3118
2842 3119 pkt->pkt_state = FC_PKT_LOCAL_RJT;
2843 3120 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2844 3121 rval = FC_SUCCESS;
2845 3122 break;
2846 3123 }
2847 3124
2848 -#if 0
2849 - QL_PRINT_3(CE_CONT, "(%d): response fhdr:\n", ha->instance);
2850 - QL_DUMP_3((uint8_t *)&pkt->pkt_resp_fhdr, 32,
2851 - sizeof (fc_frame_hdr_t) / 4);
2852 -#endif
2853 3125 /*
2854 3126 * Return success if the srb was consumed by an iocb. The packet
2855 3127 * completion callback will be invoked by the response handler.
2856 3128 */
2857 3129 if (rval == QL_CONSUMED) {
2858 3130 rval = FC_SUCCESS;
2859 3131 } else if (rval == FC_SUCCESS &&
2860 3132 !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
2861 3133 /* Do command callback only if no error */
2862 - ql_awaken_task_daemon(ha, sp, 0, 0);
3134 + ql_io_comp(sp);
2863 3135 }
2864 3136
2865 3137 if (rval != FC_SUCCESS) {
2866 - EL(ha, "failed, rval = %xh\n", rval);
3138 + EL(ha, "rval=%x, ls_code=%xh sent to d_id=%xh, sp=%ph\n",
3139 + rval, els.ls_code, pkt->pkt_cmd_fhdr.d_id, sp);
2867 3140 } else {
2868 3141 /*EMPTY*/
2869 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3142 + QL_PRINT_10(ha, "done\n");
2870 3143 }
2871 3144 return (rval);
2872 3145 }
2873 3146
2874 3147 /*
2875 3148 * ql_get_cap
2876 3149 * Export FCA hardware and software capabilities.
2877 3150 *
2878 3151 * Input:
2879 3152 * fca_handle = handle setup by ql_bind_port().
2880 3153 * cap = pointer to the capabilities string.
2881 3154 * ptr = buffer pointer for return capability.
2882 3155 *
2883 3156 * Returns:
2884 3157 * FC_CAP_ERROR - no such capability
2885 3158 * FC_CAP_FOUND - the capability was returned and cannot be set
2886 3159 * FC_CAP_SETTABLE - the capability was returned and can be set
2887 3160 * FC_UNBOUND - the fca_handle specified is not bound.
2888 3161 *
2889 3162 * Context:
2890 3163 * Kernel context.
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
2891 3164 */
2892 3165 static int
2893 3166 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
2894 3167 {
2895 3168 ql_adapter_state_t *ha;
2896 3169 int rval;
2897 3170 uint32_t *rptr = (uint32_t *)ptr;
2898 3171
2899 3172 ha = ql_fca_handle_to_state(fca_handle);
2900 3173 if (ha == NULL) {
2901 - QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3174 + QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
2902 3175 (void *)fca_handle);
2903 3176 return (FC_UNBOUND);
2904 3177 }
2905 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3178 + QL_PRINT_3(ha, "started\n");
2906 3179
2907 3180 if (strcmp(cap, FC_NODE_WWN) == 0) {
2908 3181 bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
2909 3182 ptr, 8);
2910 3183 rval = FC_CAP_FOUND;
2911 3184 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2912 3185 bcopy((void *)&ha->loginparams, ptr,
2913 3186 sizeof (la_els_logi_t));
2914 3187 rval = FC_CAP_FOUND;
2915 3188 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2916 3189 *rptr = (uint32_t)QL_UB_LIMIT;
2917 3190 rval = FC_CAP_FOUND;
2918 3191 } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2919 3192
2920 3193 dev_info_t *psydip = NULL;
2921 3194 #ifdef __sparc
2922 3195 /*
2923 3196 * Disable streaming for certain 2 chip adapters
2924 3197 * below Psycho to handle Psycho byte hole issue.
2925 3198 */
2926 - if ((CFG_IST(ha, CFG_MULTI_CHIP_ADAPTER)) &&
2927 - (!CFG_IST(ha, CFG_SBUS_CARD))) {
3199 + if (ha->flags & MULTI_CHIP_ADAPTER &&
3200 + !CFG_IST(ha, CFG_SBUS_CARD)) {
2928 3201 for (psydip = ddi_get_parent(ha->dip); psydip;
2929 3202 psydip = ddi_get_parent(psydip)) {
2930 3203 if (strcmp(ddi_driver_name(psydip),
2931 3204 "pcipsy") == 0) {
2932 3205 break;
2933 3206 }
2934 3207 }
2935 3208 }
2936 3209 #endif /* __sparc */
2937 3210
2938 3211 if (psydip) {
2939 3212 *rptr = (uint32_t)FC_NO_STREAMING;
2940 3213 EL(ha, "No Streaming\n");
2941 3214 } else {
2942 3215 *rptr = (uint32_t)FC_ALLOW_STREAMING;
2943 3216 EL(ha, "Allow Streaming\n");
2944 3217 }
2945 3218 rval = FC_CAP_FOUND;
2946 3219 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2947 - if (CFG_IST(ha, CFG_CTRL_24258081)) {
2948 - *rptr = (uint32_t)CHAR_TO_SHORT(
2949 - ha->init_ctrl_blk.cb24.max_frame_length[0],
2950 - ha->init_ctrl_blk.cb24.max_frame_length[1]);
2951 - } else {
2952 - *rptr = (uint32_t)CHAR_TO_SHORT(
2953 - ha->init_ctrl_blk.cb.max_frame_length[0],
2954 - ha->init_ctrl_blk.cb.max_frame_length[1]);
2955 - }
3220 + *rptr = ha->loginparams.common_service.rx_bufsize;
2956 3221 rval = FC_CAP_FOUND;
2957 3222 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2958 3223 *rptr = FC_RESET_RETURN_ALL;
2959 3224 rval = FC_CAP_FOUND;
2960 3225 } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2961 3226 *rptr = FC_NO_DVMA_SPACE;
2962 3227 rval = FC_CAP_FOUND;
2963 3228 } else {
2964 3229 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2965 3230 rval = FC_CAP_ERROR;
2966 3231 }
2967 3232
2968 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3233 + QL_PRINT_3(ha, "done\n");
2969 3234
2970 3235 return (rval);
2971 3236 }
2972 3237
2973 3238 /*
2974 3239 * ql_set_cap
2975 3240 * Allow the FC Transport to set FCA capabilities if possible.
2976 3241 *
2977 3242 * Input:
2978 3243 * fca_handle = handle setup by ql_bind_port().
2979 3244 * cap = pointer to the capabilities string.
2980 3245 * ptr = buffer pointer for capability.
2981 3246 *
2982 3247 * Returns:
2983 3248 * FC_CAP_ERROR - no such capability
2984 3249 * FC_CAP_FOUND - the capability cannot be set by the FC Transport.
2985 3250 * FC_CAP_SETTABLE - the capability was successfully set.
2986 3251 * FC_UNBOUND - the fca_handle specified is not bound.
2987 3252 *
2988 3253 * Context:
2989 3254 * Kernel context.
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
2990 3255 */
2991 3256 /* ARGSUSED */
2992 3257 static int
2993 3258 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
2994 3259 {
2995 3260 ql_adapter_state_t *ha;
2996 3261 int rval;
2997 3262
2998 3263 ha = ql_fca_handle_to_state(fca_handle);
2999 3264 if (ha == NULL) {
3000 - QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3265 + QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3001 3266 (void *)fca_handle);
3002 3267 return (FC_UNBOUND);
3003 3268 }
3004 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3269 + QL_PRINT_3(ha, "started\n");
3005 3270
3006 3271 if (strcmp(cap, FC_NODE_WWN) == 0) {
3007 3272 rval = FC_CAP_FOUND;
3008 3273 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
3009 3274 rval = FC_CAP_FOUND;
3010 3275 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
3011 3276 rval = FC_CAP_FOUND;
3012 3277 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
3013 3278 rval = FC_CAP_FOUND;
3014 3279 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
3015 3280 rval = FC_CAP_FOUND;
3016 3281 } else {
3017 3282 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
3018 3283 rval = FC_CAP_ERROR;
3019 3284 }
3020 3285
3021 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3286 + QL_PRINT_3(ha, "done\n");
3022 3287
3023 3288 return (rval);
3024 3289 }
3025 3290
3026 3291 /*
3027 3292 * ql_getmap
3028 3293 * Request of Arbitrated Loop (AL-PA) map.
3029 3294 *
3030 3295 * Input:
3031 3296 * fca_handle = handle setup by ql_bind_port().
3032 3297 * mapbuf= buffer pointer for map.
3033 3298 *
3034 3299 * Returns:
3035 3300 * FC_OLDPORT - the specified port is not operating in loop mode.
3036 3301 * FC_OFFLINE - the specified port is not online.
3037 3302 * FC_NOMAP - there is no loop map available for this port.
3038 3303 * FC_UNBOUND - the fca_handle specified is not bound.
3039 3304 * FC_SUCCESS - a valid map has been placed in mapbuf.
3040 3305 *
3041 3306 * Context:
3042 3307 * Kernel context.
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
3043 3308 */
3044 3309 static int
3045 3310 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
3046 3311 {
3047 3312 ql_adapter_state_t *ha;
3048 3313 clock_t timer = drv_usectohz(30000000);
3049 3314 int rval = FC_SUCCESS;
3050 3315
3051 3316 ha = ql_fca_handle_to_state(fca_handle);
3052 3317 if (ha == NULL) {
3053 - QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3318 + QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3054 3319 (void *)fca_handle);
3055 3320 return (FC_UNBOUND);
3056 3321 }
3057 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3322 + QL_PRINT_3(ha, "started\n");
3058 3323
3059 3324 mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
3060 3325 mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
3061 3326
3062 3327 /* Wait for suspension to end. */
3063 3328 TASK_DAEMON_LOCK(ha);
3064 - while (ha->task_daemon_flags & QL_SUSPENDED) {
3329 + while (DRIVER_SUSPENDED(ha)) {
3065 3330 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
3066 3331
3067 3332 /* 30 seconds from now */
3068 3333 if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
3069 3334 &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
3070 3335 /*
3071 3336 * The timeout time 'timer' was
3072 3337 * reached without the condition
3073 3338 * being signaled.
3074 3339 */
3075 3340
3076 3341 /* Release task daemon lock. */
3077 3342 TASK_DAEMON_UNLOCK(ha);
3078 3343
3079 3344 EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
3080 3345 return (FC_TRAN_BUSY);
3081 3346 }
3082 3347 }
3083 3348 /* Release task daemon lock. */
3084 3349 TASK_DAEMON_UNLOCK(ha);
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
3085 3350
3086 3351 if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
3087 3352 (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
3088 3353 /*
3089 3354 * Now, since transport drivers cosider this as an
3090 3355 * offline condition, let's wait for few seconds
3091 3356 * for any loop transitions before we reset the.
3092 3357 * chip and restart all over again.
3093 3358 */
3094 3359 ql_delay(ha, 2000000);
3095 - EL(ha, "failed, FC_NOMAP\n");
3096 - rval = FC_NOMAP;
3360 + EL(ha, "failed, FC_NO_MAP\n");
3361 + rval = FC_NO_MAP;
3097 3362 } else {
3098 3363 /*EMPTY*/
3099 - QL_PRINT_3(CE_CONT, "(%d): my_alpa %xh len %xh "
3100 - "data %xh %xh %xh %xh\n", ha->instance,
3364 + QL_PRINT_3(ha, "my_alpa %xh len %xh "
3365 + "data %xh %xh %xh %xh\n",
3101 3366 mapbuf->lilp_myalpa, mapbuf->lilp_length,
3102 3367 mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
3103 3368 mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
3104 3369 }
3105 3370
3106 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3371 + QL_PRINT_3(ha, "done\n");
3107 3372 #if 0
3108 3373 QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
3109 3374 #endif
3110 3375 return (rval);
3111 3376 }
3112 3377
3113 3378 /*
3114 3379 * ql_transport
3115 3380 * Issue an I/O request. Handles all regular requests.
3116 3381 *
3117 3382 * Input:
3118 3383 * fca_handle = handle setup by ql_bind_port().
3119 3384 * pkt = pointer to fc_packet.
3120 3385 *
3121 3386 * Returns:
3122 3387 * FC_SUCCESS - the packet was accepted for transport.
3123 3388 * FC_TRANSPORT_ERROR - a transport error occurred.
3124 3389 * FC_BADPACKET - the packet to be transported had not been
3125 3390 * initialized by this FCA.
3126 3391 * FC_UNBOUND - the fca_handle specified is not bound.
3127 3392 *
3128 3393 * Context:
3129 3394 * Kernel context.
3130 3395 */
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
3131 3396 static int
3132 3397 ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
3133 3398 {
3134 3399 ql_adapter_state_t *ha;
3135 3400 int rval = FC_TRANSPORT_ERROR;
3136 3401 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
3137 3402
3138 3403 /* Verify proper command. */
3139 3404 ha = ql_cmd_setup(fca_handle, pkt, &rval);
3140 3405 if (ha == NULL) {
3141 - QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
3406 + QL_PRINT_2(NULL, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
3142 3407 rval, fca_handle);
3143 3408 return (rval);
3144 3409 }
3145 - QL_PRINT_3(CE_CONT, "(%d): started command:\n", ha->instance);
3146 -#if 0
3147 - QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
3148 - sizeof (fc_frame_hdr_t) / 4);
3149 - QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
3150 - QL_DUMP_3((uint8_t *)pkt->pkt_cmd, 8, pkt->pkt_cmdlen);
3151 -#endif
3410 + QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
3152 3411
3153 3412 /* Reset SRB flags. */
3154 3413 sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
3155 - SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_ABORT | SRB_UB_CALLBACK |
3414 + SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_UB_CALLBACK |
3156 3415 SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
3157 3416 SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
3158 3417 SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
3159 3418 SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
3160 3419 SRB_MS_PKT | SRB_ELS_PKT);
3161 3420
3162 3421 pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
3163 3422 pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
3164 3423 pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
3165 3424 pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
3166 3425 pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
3167 3426
3168 3427 switch (pkt->pkt_cmd_fhdr.r_ctl) {
3169 3428 case R_CTL_COMMAND:
3170 3429 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
3171 3430 sp->flags |= SRB_FCP_CMD_PKT;
3172 3431 rval = ql_fcp_scsi_cmd(ha, pkt, sp);
3432 + } else {
3433 + pkt->pkt_state = FC_PKT_LOCAL_RJT;
3434 + pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3435 + rval = FC_TRANSPORT_ERROR;
3173 3436 }
3174 3437 break;
3175 3438
3176 3439 default:
3177 3440 /* Setup response header and buffer. */
3178 3441 if (pkt->pkt_rsplen) {
3179 3442 bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
3180 3443 }
3181 3444
3182 3445 switch (pkt->pkt_cmd_fhdr.r_ctl) {
3183 3446 case R_CTL_UNSOL_DATA:
3184 3447 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
3185 - sp->flags |= SRB_IP_PKT;
3186 - rval = ql_fcp_ip_cmd(ha, pkt, sp);
3448 + if (CFG_IST(ha, CFG_FCIP_SUPPORT) &&
3449 + ha->vp_index == 0) {
3450 + sp->flags |= SRB_IP_PKT;
3451 + rval = ql_fcp_ip_cmd(ha, pkt, sp);
3452 + } else {
3453 + cmn_err(CE_NOTE, "%s(%d) FC-IP is not "
3454 + "supported on this adapter\n",
3455 + QL_NAME, ha->instance);
3456 + pkt->pkt_state = FC_PKT_LOCAL_RJT;
3457 + pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3458 + rval = FC_TRANSPORT_ERROR;
3459 + }
3187 3460 }
3188 3461 break;
3189 3462
3190 3463 case R_CTL_UNSOL_CONTROL:
3191 3464 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
3192 3465 sp->flags |= SRB_GENERIC_SERVICES_PKT;
3193 3466 rval = ql_fc_services(ha, pkt);
3467 + } else {
3468 + pkt->pkt_state = FC_PKT_LOCAL_RJT;
3469 + pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3470 + rval = FC_TRANSPORT_ERROR;
3194 3471 }
3195 3472 break;
3196 3473
3197 3474 case R_CTL_SOLICITED_DATA:
3198 3475 case R_CTL_STATUS:
3199 3476 default:
3200 3477 pkt->pkt_state = FC_PKT_LOCAL_RJT;
3201 3478 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3202 3479 rval = FC_TRANSPORT_ERROR;
3203 3480 EL(ha, "unknown, r_ctl=%xh\n",
3204 3481 pkt->pkt_cmd_fhdr.r_ctl);
3205 3482 break;
3206 3483 }
3207 3484 }
3208 3485
3209 3486 if (rval != FC_SUCCESS) {
3210 3487 EL(ha, "failed, rval = %xh\n", rval);
3211 3488 } else {
3212 3489 /*EMPTY*/
3213 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3490 + QL_PRINT_3(ha, "done\n");
3214 3491 }
3215 3492
3216 3493 return (rval);
3217 3494 }
3218 3495
3219 3496 /*
3220 3497 * ql_ub_alloc
3221 3498 * Allocate buffers for unsolicited exchanges.
3222 3499 *
3223 3500 * Input:
3224 3501 * fca_handle = handle setup by ql_bind_port().
3225 3502 * tokens = token array for each buffer.
3226 3503 * size = size of each buffer.
3227 3504 * count = pointer to number of buffers.
3228 3505 * type = the FC-4 type the buffers are reserved for.
3229 3506 * 1 = Extended Link Services, 5 = LLC/SNAP
3230 3507 *
3231 3508 * Returns:
3232 3509 * FC_FAILURE - buffers could not be allocated.
3233 3510 * FC_TOOMANY - the FCA could not allocate the requested
3234 3511 * number of buffers.
3235 3512 * FC_SUCCESS - unsolicited buffers were allocated.
3236 3513 * FC_UNBOUND - the fca_handle specified is not bound.
3237 3514 *
3238 3515 * Context:
3239 3516 * Kernel context.
3240 3517 */
3241 3518 static int
3242 3519 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
3243 3520 uint32_t *count, uint32_t type)
3244 3521 {
3245 3522 ql_adapter_state_t *ha;
3246 3523 caddr_t bufp = NULL;
3247 3524 fc_unsol_buf_t *ubp;
|
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
3248 3525 ql_srb_t *sp;
3249 3526 uint32_t index;
3250 3527 uint32_t cnt;
3251 3528 uint32_t ub_array_index = 0;
3252 3529 int rval = FC_SUCCESS;
3253 3530 int ub_updated = FALSE;
3254 3531
3255 3532 /* Check handle. */
3256 3533 ha = ql_fca_handle_to_state(fca_handle);
3257 3534 if (ha == NULL) {
3258 - QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3535 + QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3259 3536 (void *)fca_handle);
3260 3537 return (FC_UNBOUND);
3261 3538 }
3262 - QL_PRINT_3(CE_CONT, "(%d,%d): started, count = %xh\n",
3263 - ha->instance, ha->vp_index, *count);
3539 + QL_PRINT_3(ha, "started, count = %xh\n", *count);
3264 3540
3265 3541 QL_PM_LOCK(ha);
3266 3542 if (ha->power_level != PM_LEVEL_D0) {
3267 3543 QL_PM_UNLOCK(ha);
3268 - QL_PRINT_3(CE_CONT, "(%d,%d): down done\n", ha->instance,
3269 - ha->vp_index);
3544 + QL_PRINT_3(ha, "down done\n");
3270 3545 return (FC_FAILURE);
3271 3546 }
3272 3547 QL_PM_UNLOCK(ha);
3273 3548
3274 - /* Acquire adapter state lock. */
3275 - ADAPTER_STATE_LOCK(ha);
3276 -
3277 3549 /* Check the count. */
3278 3550 if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
3279 3551 *count = 0;
3280 3552 EL(ha, "failed, FC_TOOMANY\n");
3281 3553 rval = FC_TOOMANY;
3282 3554 }
3283 3555
3284 3556 /*
3285 3557 * reset ub_array_index
3286 3558 */
3287 3559 ub_array_index = 0;
3288 3560
3289 3561 /*
3290 3562 * Now proceed to allocate any buffers required
3291 3563 */
3292 3564 for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
3293 3565 /* Allocate all memory needed. */
3294 3566 ubp = (fc_unsol_buf_t *)kmem_zalloc(sizeof (fc_unsol_buf_t),
3295 3567 KM_SLEEP);
3296 3568 if (ubp == NULL) {
3297 3569 EL(ha, "failed, FC_FAILURE\n");
3298 3570 rval = FC_FAILURE;
3299 3571 } else {
3300 3572 sp = kmem_zalloc(sizeof (ql_srb_t), KM_SLEEP);
3301 3573 if (sp == NULL) {
3302 3574 kmem_free(ubp, sizeof (fc_unsol_buf_t));
3303 3575 rval = FC_FAILURE;
3304 3576 } else {
3305 3577 if (type == FC_TYPE_IS8802_SNAP) {
3306 3578 #ifdef __sparc
3307 3579 if (ql_get_dma_mem(ha,
3308 3580 &sp->ub_buffer, size,
3309 3581 BIG_ENDIAN_DMA,
3310 3582 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3311 3583 rval = FC_FAILURE;
3312 3584 kmem_free(ubp,
3313 3585 sizeof (fc_unsol_buf_t));
3314 3586 kmem_free(sp,
3315 3587 sizeof (ql_srb_t));
3316 3588 } else {
3317 3589 bufp = sp->ub_buffer.bp;
3318 3590 sp->ub_size = size;
3319 3591 }
3320 3592 #else
3321 3593 if (ql_get_dma_mem(ha,
3322 3594 &sp->ub_buffer, size,
3323 3595 LITTLE_ENDIAN_DMA,
3324 3596 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3325 3597 rval = FC_FAILURE;
3326 3598 kmem_free(ubp,
3327 3599 sizeof (fc_unsol_buf_t));
3328 3600 kmem_free(sp,
3329 3601 sizeof (ql_srb_t));
3330 3602 } else {
3331 3603 bufp = sp->ub_buffer.bp;
3332 3604 sp->ub_size = size;
3333 3605 }
3334 3606 #endif
3335 3607 } else {
3336 3608 bufp = kmem_zalloc(size, KM_SLEEP);
3337 3609 if (bufp == NULL) {
3338 3610 rval = FC_FAILURE;
3339 3611 kmem_free(ubp,
3340 3612 sizeof (fc_unsol_buf_t));
3341 3613 kmem_free(sp,
3342 3614 sizeof (ql_srb_t));
3343 3615 } else {
3344 3616 sp->ub_size = size;
3345 3617 }
3346 3618 }
3347 3619 }
3348 3620 }
3349 3621
3350 3622 if (rval == FC_SUCCESS) {
3351 3623 /* Find next available slot. */
3352 3624 QL_UB_LOCK(ha);
3353 3625 while (ha->ub_array[ub_array_index] != NULL) {
3354 3626 ub_array_index++;
3355 3627 }
3356 3628
3357 3629 ubp->ub_fca_private = (void *)sp;
3358 3630
3359 3631 /* init cmd links */
3360 3632 sp->cmd.base_address = sp;
3361 3633 sp->cmd.prev = NULL;
3362 3634 sp->cmd.next = NULL;
3363 3635 sp->cmd.head = NULL;
3364 3636
3365 3637 /* init wdg links */
3366 3638 sp->wdg.base_address = sp;
3367 3639 sp->wdg.prev = NULL;
3368 3640 sp->wdg.next = NULL;
3369 3641 sp->wdg.head = NULL;
3370 3642 sp->ha = ha;
3371 3643
3372 3644 ubp->ub_buffer = bufp;
3373 3645 ubp->ub_bufsize = size;
3374 3646 ubp->ub_port_handle = fca_handle;
3375 3647 ubp->ub_token = ub_array_index;
3376 3648
3377 3649 /* Save the token. */
3378 3650 tokens[index] = ub_array_index;
3379 3651
3380 3652 /* Setup FCA private information. */
3381 3653 sp->ub_type = type;
|
↓ open down ↓ |
95 lines elided |
↑ open up ↑ |
3382 3654 sp->handle = ub_array_index;
3383 3655 sp->flags |= SRB_UB_IN_FCA;
3384 3656
3385 3657 ha->ub_array[ub_array_index] = ubp;
3386 3658 ha->ub_allocated++;
3387 3659 ub_updated = TRUE;
3388 3660 QL_UB_UNLOCK(ha);
3389 3661 }
3390 3662 }
3391 3663
3392 - /* Release adapter state lock. */
3393 - ADAPTER_STATE_UNLOCK(ha);
3394 -
3395 3664 /* IP buffer. */
3396 3665 if (ub_updated) {
3397 - if ((type == FC_TYPE_IS8802_SNAP) &&
3398 - (!(CFG_IST(ha, (CFG_CTRL_6322 | CFG_CTRL_2581))))) {
3666 + if (type == FC_TYPE_IS8802_SNAP &&
3667 + CFG_IST(ha, CFG_FCIP_SUPPORT) &&
3668 + ha->vp_index == 0) {
3399 3669
3400 3670 ADAPTER_STATE_LOCK(ha);
3401 3671 ha->flags |= IP_ENABLED;
3402 3672 ADAPTER_STATE_UNLOCK(ha);
3403 3673
3404 3674 if (!(ha->flags & IP_INITIALIZED)) {
3405 - if (CFG_IST(ha, CFG_CTRL_2422)) {
3675 + if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
3406 3676 ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3407 3677 LSB(ql_ip_mtu);
3408 3678 ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3409 3679 MSB(ql_ip_mtu);
3410 3680 ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3411 3681 LSB(size);
3412 3682 ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3413 3683 MSB(size);
3414 3684
3415 3685 cnt = CHAR_TO_SHORT(
3416 3686 ha->ip_init_ctrl_blk.cb24.cc[0],
3417 3687 ha->ip_init_ctrl_blk.cb24.cc[1]);
3418 3688
3419 3689 if (cnt < *count) {
3420 3690 ha->ip_init_ctrl_blk.cb24.cc[0]
3421 3691 = LSB(*count);
3422 3692 ha->ip_init_ctrl_blk.cb24.cc[1]
3423 3693 = MSB(*count);
3424 3694 }
3425 3695 } else {
3426 3696 ha->ip_init_ctrl_blk.cb.mtu_size[0] =
3427 3697 LSB(ql_ip_mtu);
3428 3698 ha->ip_init_ctrl_blk.cb.mtu_size[1] =
3429 3699 MSB(ql_ip_mtu);
3430 3700 ha->ip_init_ctrl_blk.cb.buf_size[0] =
3431 3701 LSB(size);
3432 3702 ha->ip_init_ctrl_blk.cb.buf_size[1] =
3433 3703 MSB(size);
3434 3704
3435 3705 cnt = CHAR_TO_SHORT(
3436 3706 ha->ip_init_ctrl_blk.cb.cc[0],
3437 3707 ha->ip_init_ctrl_blk.cb.cc[1]);
3438 3708
3439 3709 if (cnt < *count) {
3440 3710 ha->ip_init_ctrl_blk.cb.cc[0] =
3441 3711 LSB(*count);
3442 3712 ha->ip_init_ctrl_blk.cb.cc[1] =
3443 3713 MSB(*count);
3444 3714 }
3445 3715 }
3446 3716
|
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
3447 3717 (void) ql_initialize_ip(ha);
3448 3718 }
3449 3719 ql_isp_rcvbuf(ha);
3450 3720 }
3451 3721 }
3452 3722
3453 3723 if (rval != FC_SUCCESS) {
3454 3724 EL(ha, "failed=%xh\n", rval);
3455 3725 } else {
3456 3726 /*EMPTY*/
3457 - QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3458 - ha->vp_index);
3727 + QL_PRINT_3(ha, "done\n");
3459 3728 }
3460 3729 return (rval);
3461 3730 }
3462 3731
3463 3732 /*
3464 3733 * ql_ub_free
3465 3734 * Free unsolicited buffers.
3466 3735 *
3467 3736 * Input:
3468 3737 * fca_handle = handle setup by ql_bind_port().
3469 3738 * count = number of buffers.
3470 3739 * tokens = token array for each buffer.
3471 3740 *
3472 3741 * Returns:
3473 3742 * FC_SUCCESS - the requested buffers have been freed.
3474 3743 * FC_UNBOUND - the fca_handle specified is not bound.
3475 3744 * FC_UB_BADTOKEN - an invalid token was encountered.
3476 3745 * No buffers have been released.
3477 3746 *
3478 3747 * Context:
3479 3748 * Kernel context.
3480 3749 */
3481 3750 static int
3482 3751 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
3483 3752 {
3484 3753 ql_adapter_state_t *ha;
3485 3754 ql_srb_t *sp;
3486 3755 uint32_t index;
3487 3756 uint64_t ub_array_index;
3488 3757 int rval = FC_SUCCESS;
3489 3758
3490 3759 /* Check handle. */
3491 3760 ha = ql_fca_handle_to_state(fca_handle);
3492 3761 if (ha == NULL) {
3493 - QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3762 + QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3494 3763 (void *)fca_handle);
3495 3764 return (FC_UNBOUND);
3496 3765 }
3497 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3766 + QL_PRINT_3(ha, "started\n");
3498 3767
3499 - /* Acquire adapter state lock. */
3500 - ADAPTER_STATE_LOCK(ha);
3501 -
3502 3768 /* Check all returned tokens. */
3503 3769 for (index = 0; index < count; index++) {
3504 3770 fc_unsol_buf_t *ubp;
3505 3771
3506 3772 /* Check the token range. */
3507 3773 if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3508 3774 EL(ha, "failed, FC_UB_BADTOKEN\n");
3509 3775 rval = FC_UB_BADTOKEN;
3510 3776 break;
3511 3777 }
3512 3778
3513 3779 /* Check the unsolicited buffer array. */
3514 3780 QL_UB_LOCK(ha);
3515 3781 ubp = ha->ub_array[ub_array_index];
3516 3782
3517 3783 if (ubp == NULL) {
3518 3784 EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3519 3785 rval = FC_UB_BADTOKEN;
3520 3786 QL_UB_UNLOCK(ha);
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
3521 3787 break;
3522 3788 }
3523 3789
3524 3790 /* Check the state of the unsolicited buffer. */
3525 3791 sp = ha->ub_array[ub_array_index]->ub_fca_private;
3526 3792 sp->flags |= SRB_UB_FREE_REQUESTED;
3527 3793
3528 3794 while (!(sp->flags & SRB_UB_IN_FCA) ||
3529 3795 (sp->flags & (SRB_UB_CALLBACK | SRB_UB_ACQUIRED))) {
3530 3796 QL_UB_UNLOCK(ha);
3531 - ADAPTER_STATE_UNLOCK(ha);
3532 3797 delay(drv_usectohz(100000));
3533 - ADAPTER_STATE_LOCK(ha);
3534 3798 QL_UB_LOCK(ha);
3535 3799 }
3536 3800 ha->ub_array[ub_array_index] = NULL;
3537 3801 QL_UB_UNLOCK(ha);
3538 3802 ql_free_unsolicited_buffer(ha, ubp);
3539 3803 }
3540 3804
3541 3805 if (rval == FC_SUCCESS) {
3542 3806 /*
3543 3807 * Signal any pending hardware reset when there are
3544 3808 * no more unsolicited buffers in use.
3545 3809 */
3546 3810 if (ha->ub_allocated == 0) {
3811 + QL_UB_LOCK(ha);
3547 3812 cv_broadcast(&ha->pha->cv_ub);
3813 + QL_UB_UNLOCK(ha);
3548 3814 }
3549 3815 }
3550 3816
3551 - /* Release adapter state lock. */
3552 - ADAPTER_STATE_UNLOCK(ha);
3553 -
3554 3817 if (rval != FC_SUCCESS) {
3555 3818 EL(ha, "failed=%xh\n", rval);
3556 3819 } else {
3557 3820 /*EMPTY*/
3558 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3821 + QL_PRINT_3(ha, "done\n");
3559 3822 }
3560 3823 return (rval);
3561 3824 }
3562 3825
3563 3826 /*
3564 3827 * ql_ub_release
3565 3828 * Release unsolicited buffers from FC Transport
3566 3829 * to FCA for future use.
3567 3830 *
3568 3831 * Input:
3569 3832 * fca_handle = handle setup by ql_bind_port().
3570 3833 * count = number of buffers.
3571 3834 * tokens = token array for each buffer.
3572 3835 *
3573 3836 * Returns:
3574 3837 * FC_SUCCESS - the requested buffers have been released.
3575 3838 * FC_UNBOUND - the fca_handle specified is not bound.
3576 3839 * FC_UB_BADTOKEN - an invalid token was encountered.
3577 3840 * No buffers have been released.
3578 3841 *
3579 3842 * Context:
3580 3843 * Kernel context.
3581 3844 */
3582 3845 static int
3583 3846 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3584 3847 {
|
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
3585 3848 ql_adapter_state_t *ha;
3586 3849 ql_srb_t *sp;
3587 3850 uint32_t index;
3588 3851 uint64_t ub_array_index;
3589 3852 int rval = FC_SUCCESS;
3590 3853 int ub_ip_updated = FALSE;
3591 3854
3592 3855 /* Check handle. */
3593 3856 ha = ql_fca_handle_to_state(fca_handle);
3594 3857 if (ha == NULL) {
3595 - QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3858 + QL_PRINT_2(NULL, ": failed, no adapter=%ph\n",
3596 3859 (void *)fca_handle);
3597 3860 return (FC_UNBOUND);
3598 3861 }
3599 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3862 + QL_PRINT_3(ha, "started\n");
3600 3863
3601 3864 /* Acquire adapter state lock. */
3602 - ADAPTER_STATE_LOCK(ha);
3603 3865 QL_UB_LOCK(ha);
3604 3866
3605 3867 /* Check all returned tokens. */
3606 3868 for (index = 0; index < count; index++) {
3607 3869 /* Check the token range. */
3608 3870 if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3609 3871 EL(ha, "failed, FC_UB_BADTOKEN\n");
3610 3872 rval = FC_UB_BADTOKEN;
3611 3873 break;
3612 3874 }
3613 3875
3614 3876 /* Check the unsolicited buffer array. */
3615 3877 if (ha->ub_array[ub_array_index] == NULL) {
3616 3878 EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3617 3879 rval = FC_UB_BADTOKEN;
3618 3880 break;
3619 3881 }
3620 3882
3621 3883 /* Check the state of the unsolicited buffer. */
3622 3884 sp = ha->ub_array[ub_array_index]->ub_fca_private;
3623 3885 if (sp->flags & SRB_UB_IN_FCA) {
3624 3886 EL(ha, "failed, FC_UB_BADTOKEN-3\n");
3625 3887 rval = FC_UB_BADTOKEN;
3626 3888 break;
3627 3889 }
3628 3890 }
3629 3891
3630 3892 /* If all tokens checkout, release the buffers. */
3631 3893 if (rval == FC_SUCCESS) {
3632 3894 /* Check all returned tokens. */
3633 3895 for (index = 0; index < count; index++) {
3634 3896 fc_unsol_buf_t *ubp;
3635 3897
3636 3898 ub_array_index = tokens[index];
3637 3899 ubp = ha->ub_array[ub_array_index];
3638 3900 sp = ubp->ub_fca_private;
3639 3901
3640 3902 ubp->ub_resp_flags = 0;
3641 3903 sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
|
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
3642 3904 sp->flags |= SRB_UB_IN_FCA;
3643 3905
3644 3906 /* IP buffer. */
3645 3907 if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3646 3908 ub_ip_updated = TRUE;
3647 3909 }
3648 3910 }
3649 3911 }
3650 3912
3651 3913 QL_UB_UNLOCK(ha);
3652 - /* Release adapter state lock. */
3653 - ADAPTER_STATE_UNLOCK(ha);
3654 3914
3655 3915 /*
3656 3916 * XXX: We should call ql_isp_rcvbuf() to return a
3657 3917 * buffer to ISP only if the number of buffers fall below
3658 3918 * the low water mark.
3659 3919 */
3660 3920 if (ub_ip_updated) {
3661 3921 ql_isp_rcvbuf(ha);
3662 3922 }
3663 3923
3664 3924 if (rval != FC_SUCCESS) {
3665 3925 EL(ha, "failed, rval = %xh\n", rval);
3666 3926 } else {
3667 3927 /*EMPTY*/
3668 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3928 + QL_PRINT_3(ha, "done\n");
3669 3929 }
3670 3930 return (rval);
3671 3931 }
3672 3932
3673 3933 /*
3674 3934 * ql_abort
3675 3935 * Abort a packet.
3676 3936 *
3677 3937 * Input:
3678 3938 * fca_handle = handle setup by ql_bind_port().
3679 3939 * pkt = pointer to fc_packet.
3680 3940 * flags = KM_SLEEP flag.
3681 3941 *
3682 3942 * Returns:
3683 3943 * FC_SUCCESS - the packet has successfully aborted.
3684 3944 * FC_ABORTED - the packet has successfully aborted.
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
3685 3945 * FC_ABORTING - the packet is being aborted.
3686 3946 * FC_ABORT_FAILED - the packet could not be aborted.
3687 3947 * FC_TRANSPORT_ERROR - a transport error occurred while attempting
3688 3948 * to abort the packet.
3689 3949 * FC_BADEXCHANGE - no packet found.
3690 3950 * FC_UNBOUND - the fca_handle specified is not bound.
3691 3951 *
3692 3952 * Context:
3693 3953 * Kernel context.
3694 3954 */
3955 +/*ARGSUSED*/
3695 3956 static int
3696 3957 ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3697 3958 {
3698 3959 port_id_t d_id;
3699 3960 ql_link_t *link;
3700 3961 ql_adapter_state_t *ha, *pha;
3701 - ql_srb_t *sp;
3702 3962 ql_tgt_t *tq;
3703 3963 ql_lun_t *lq;
3704 3964 int rval = FC_ABORTED;
3965 + ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
3705 3966
3706 3967 ha = ql_fca_handle_to_state(fca_handle);
3707 3968 if (ha == NULL) {
3708 - QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3969 + QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3709 3970 (void *)fca_handle);
3710 3971 return (FC_UNBOUND);
3711 3972 }
3712 3973
3713 3974 pha = ha->pha;
3714 3975
3715 - QL_PRINT_3(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3976 + QL_PRINT_3(ha, "started\n");
3716 3977
3717 3978 /* Get target queue pointer. */
3718 3979 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3719 3980 tq = ql_d_id_to_queue(ha, d_id);
3720 3981
3721 - if ((tq == NULL) || (pha->task_daemon_flags & LOOP_DOWN)) {
3722 - if (tq == NULL) {
3982 + if ((tq == NULL) || (lq = sp->lun_queue) == NULL ||
3983 + (pha->task_daemon_flags & LOOP_DOWN)) {
3984 + if (tq == NULL || lq == NULL) {
3723 3985 EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3724 3986 rval = FC_TRANSPORT_ERROR;
3725 3987 } else {
3726 3988 EL(ha, "failed, FC_OFFLINE\n");
3727 3989 rval = FC_OFFLINE;
3728 3990 }
3729 3991 return (rval);
3730 3992 }
3731 3993
3732 - sp = (ql_srb_t *)pkt->pkt_fca_private;
3733 - lq = sp->lun_queue;
3734 -
3735 - /* Set poll flag if sleep wanted. */
3736 - if (flags == KM_SLEEP) {
3737 - sp->flags |= SRB_POLL;
3738 - }
3739 -
3740 3994 /* Acquire target queue lock. */
3741 3995 DEVICE_QUEUE_LOCK(tq);
3742 3996 REQUEST_RING_LOCK(ha);
3743 3997
3744 3998 /* If command not already started. */
3745 3999 if (!(sp->flags & SRB_ISP_STARTED)) {
3746 4000 /* Check pending queue for command. */
3747 4001 sp = NULL;
3748 4002 for (link = pha->pending_cmds.first; link != NULL;
3749 4003 link = link->next) {
3750 4004 sp = link->base_address;
3751 4005 if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3752 4006 /* Remove srb from q. */
3753 4007 ql_remove_link(&pha->pending_cmds, &sp->cmd);
3754 4008 break;
3755 4009 } else {
3756 4010 sp = NULL;
3757 4011 }
3758 4012 }
3759 4013 REQUEST_RING_UNLOCK(ha);
3760 4014
3761 4015 if (sp == NULL) {
3762 4016 /* Check for cmd on device queue. */
3763 4017 for (link = lq->cmd.first; link != NULL;
3764 4018 link = link->next) {
3765 4019 sp = link->base_address;
3766 4020 if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3767 4021 /* Remove srb from q. */
3768 4022 ql_remove_link(&lq->cmd, &sp->cmd);
3769 4023 break;
3770 4024 } else {
3771 4025 sp = NULL;
3772 4026 }
3773 4027 }
3774 4028 }
3775 4029 /* Release device lock */
|
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
3776 4030 DEVICE_QUEUE_UNLOCK(tq);
3777 4031
3778 4032 /* If command on target queue. */
3779 4033 if (sp != NULL) {
3780 4034 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
3781 4035
3782 4036 /* Set return status */
3783 4037 pkt->pkt_reason = CS_ABORTED;
3784 4038
3785 4039 sp->cmd.next = NULL;
3786 - ql_done(&sp->cmd);
4040 + ql_done(&sp->cmd, B_TRUE);
3787 4041 rval = FC_ABORTED;
3788 4042 } else {
3789 4043 EL(ha, "failed, FC_BADEXCHANGE\n");
3790 4044 rval = FC_BADEXCHANGE;
3791 4045 }
3792 4046 } else if (sp->flags & SRB_ISP_COMPLETED) {
3793 4047 /* Release device queue lock. */
3794 4048 REQUEST_RING_UNLOCK(ha);
3795 4049 DEVICE_QUEUE_UNLOCK(tq);
3796 4050 EL(ha, "failed, already done, FC_FAILURE\n");
3797 4051 rval = FC_FAILURE;
3798 4052 } else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
3799 4053 (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
3800 4054 /*
3801 4055 * If here, target data/resp ctio is with Fw.
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
3802 4056 * Since firmware is supposed to terminate such I/Os
3803 4057 * with an error, we need not do any thing. If FW
3804 4058 * decides not to terminate those IOs and simply keep
3805 4059 * quite then we need to initiate cleanup here by
3806 4060 * calling ql_done.
3807 4061 */
3808 4062 REQUEST_RING_UNLOCK(ha);
3809 4063 DEVICE_QUEUE_UNLOCK(tq);
3810 4064 rval = FC_ABORTED;
3811 4065 } else {
3812 - request_t *ep = pha->request_ring_bp;
3813 - uint16_t cnt;
4066 + ql_request_q_t *req_q;
4067 + request_t *pio;
4068 + uint32_t index;
3814 4069
4070 + REQUEST_RING_UNLOCK(ha);
4071 + DEVICE_QUEUE_UNLOCK(tq);
4072 +
4073 + INTR_LOCK(ha);
4074 + sp->flags |= SRB_ABORTING;
3815 4075 if (sp->handle != 0) {
3816 - for (cnt = 0; cnt < REQUEST_ENTRY_CNT; cnt++) {
3817 - if (sp->handle == ddi_get32(
3818 - pha->hba_buf.acc_handle, &ep->handle)) {
3819 - ep->entry_type = INVALID_ENTRY_TYPE;
3820 - break;
4076 + index = sp->handle & OSC_INDEX_MASK;
4077 + if (ha->outstanding_cmds[index] == sp) {
4078 + ha->outstanding_cmds[index] =
4079 + QL_ABORTED_SRB(ha);
4080 + }
4081 + if (ha->req_q[1] != NULL && sp->rsp_q_number != 0) {
4082 + req_q = ha->req_q[1];
4083 + } else {
4084 + req_q = ha->req_q[0];
4085 + }
4086 + pio = sp->request_ring_ptr;
4087 + if (sp->handle ==
4088 + ddi_get32(req_q->req_ring.acc_handle,
4089 + &pio->handle)) {
4090 + EL(ha, "inflight sp=%ph, handle=%xh, "
4091 + "invalidated\n", (void *)sp, sp->handle);
4092 + for (index = 0; index < sp->req_cnt; index++) {
4093 + ddi_put8(req_q->req_ring.acc_handle,
4094 + &pio->entry_type,
4095 + ABORTED_ENTRY_TYPE);
4096 + pio++;
4097 + if (pio == (request_t *)
4098 + ((uintptr_t)req_q->req_ring.bp +
4099 + req_q->req_ring.size)) {
4100 + pio = req_q->req_ring.bp;
4101 + }
3821 4102 }
3822 - ep++;
3823 4103 }
4104 + /* Decrement outstanding commands on device. */
4105 + if (tq->outcnt != 0) {
4106 + tq->outcnt--;
4107 + }
4108 + if (sp->flags & SRB_FCP_CMD_PKT &&
4109 + lq->lun_outcnt != 0) {
4110 + lq->lun_outcnt--;
4111 + }
4112 + /* Remove command from watchdog queue. */
4113 + if (sp->flags & SRB_WATCHDOG_ENABLED) {
4114 + ql_remove_link(&tq->wdg, &sp->wdg);
4115 + sp->flags &= ~SRB_WATCHDOG_ENABLED;
4116 + }
4117 + /* Release device queue lock. */
4118 + INTR_UNLOCK(ha);
4119 +
4120 + (void) ql_abort_command(ha, sp);
4121 + sp->handle = 0;
4122 + } else {
4123 + /* Release device queue lock. */
4124 + INTR_UNLOCK(ha);
3824 4125 }
3825 4126
3826 - /* Release device queue lock. */
3827 - REQUEST_RING_UNLOCK(ha);
3828 - DEVICE_QUEUE_UNLOCK(tq);
3829 -
3830 - sp->flags |= SRB_ABORTING;
3831 - (void) ql_abort_command(ha, sp);
4127 + sp->flags &= ~SRB_IN_TOKEN_ARRAY;
4128 + sp->flags |= SRB_ISP_COMPLETED;
3832 4129 pkt->pkt_reason = CS_ABORTED;
3833 4130 rval = FC_ABORTED;
3834 4131 }
3835 4132
3836 - QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
4133 + QL_PRINT_3(ha, "done\n");
3837 4134
3838 4135 return (rval);
3839 4136 }
3840 4137
3841 4138 /*
3842 4139 * ql_reset
3843 4140 * Reset link or hardware.
3844 4141 *
3845 4142 * Input:
3846 4143 * fca_handle = handle setup by ql_bind_port().
3847 4144 * cmd = reset type command.
3848 4145 *
3849 4146 * Returns:
3850 4147 * FC_SUCCESS - reset has successfully finished.
3851 4148 * FC_UNBOUND - the fca_handle specified is not bound.
3852 4149 * FC_FAILURE - reset failed.
3853 4150 *
3854 4151 * Context:
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
3855 4152 * Kernel context.
3856 4153 */
3857 4154 static int
3858 4155 ql_reset(opaque_t fca_handle, uint32_t cmd)
3859 4156 {
3860 4157 ql_adapter_state_t *ha;
3861 4158 int rval = FC_SUCCESS, rval2;
3862 4159
3863 4160 ha = ql_fca_handle_to_state(fca_handle);
3864 4161 if (ha == NULL) {
3865 - QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4162 + QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3866 4163 (void *)fca_handle);
3867 4164 return (FC_UNBOUND);
3868 4165 }
3869 4166
3870 - QL_PRINT_3(CE_CONT, "(%d,%d): started, cmd=%d\n", ha->instance,
3871 - ha->vp_index, cmd);
4167 + QL_PRINT_3(ha, "started, cmd=%d\n", cmd);
3872 4168
4169 + if (ha->task_daemon_flags & (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE |
4170 + DRIVER_STALL | ISP_ABORT_NEEDED | LOOP_RESYNC_NEEDED)) {
4171 + EL(ha, "driver stalled, FC_TRAN_BUSY, dtf=%xh\n",
4172 + ha->task_daemon_flags);
4173 + return (FC_TRAN_BUSY);
4174 + }
4175 +
3873 4176 switch (cmd) {
3874 4177 case FC_FCA_CORE:
3875 4178 /* dump firmware core if specified. */
3876 4179 if (ha->vp_index == 0) {
3877 4180 if (ql_dump_firmware(ha) != QL_SUCCESS) {
3878 4181 EL(ha, "failed, FC_FAILURE\n");
3879 4182 rval = FC_FAILURE;
3880 4183 }
3881 4184 }
3882 4185 break;
3883 4186 case FC_FCA_LINK_RESET:
3884 4187 if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
3885 4188 if (ql_loop_reset(ha) != QL_SUCCESS) {
3886 4189 EL(ha, "failed, FC_FAILURE-2\n");
3887 4190 rval = FC_FAILURE;
3888 4191 }
3889 4192 }
3890 4193 break;
3891 4194 case FC_FCA_RESET_CORE:
3892 4195 case FC_FCA_RESET:
3893 4196 /* if dump firmware core if specified. */
3894 4197 if (cmd == FC_FCA_RESET_CORE) {
3895 4198 if (ha->vp_index != 0) {
3896 4199 rval2 = ha->pha->task_daemon_flags & LOOP_DOWN
3897 4200 ? QL_SUCCESS : ql_loop_reset(ha);
3898 4201 } else {
3899 4202 rval2 = ql_dump_firmware(ha);
3900 4203 }
3901 4204 if (rval2 != QL_SUCCESS) {
3902 4205 EL(ha, "failed, FC_FAILURE-3\n");
3903 4206 rval = FC_FAILURE;
3904 4207 }
3905 4208 }
3906 4209
3907 4210 /* Free up all unsolicited buffers. */
3908 4211 if (ha->ub_allocated != 0) {
3909 4212 /* Inform to release buffers. */
3910 4213 ha->state = FC_PORT_SPEED_MASK(ha->state);
3911 4214 ha->state |= FC_STATE_RESET_REQUESTED;
3912 4215 if (ha->flags & FCA_BOUND) {
3913 4216 (ha->bind_info.port_statec_cb)
3914 4217 (ha->bind_info.port_handle,
3915 4218 ha->state);
3916 4219 }
3917 4220 }
3918 4221
3919 4222 ha->state = FC_PORT_SPEED_MASK(ha->state);
3920 4223
3921 4224 /* All buffers freed */
3922 4225 if (ha->ub_allocated == 0) {
3923 4226 /* Hardware reset. */
3924 4227 if (cmd == FC_FCA_RESET) {
3925 4228 if (ha->vp_index == 0) {
3926 4229 (void) ql_abort_isp(ha);
3927 4230 } else if (!(ha->pha->task_daemon_flags &
3928 4231 LOOP_DOWN)) {
3929 4232 (void) ql_loop_reset(ha);
3930 4233 }
3931 4234 }
3932 4235
3933 4236 /* Inform that the hardware has been reset */
3934 4237 ha->state |= FC_STATE_RESET;
3935 4238 } else {
3936 4239 /*
3937 4240 * the port driver expects an online if
3938 4241 * buffers are not freed.
3939 4242 */
3940 4243 if (ha->topology & QL_LOOP_CONNECTION) {
3941 4244 ha->state |= FC_STATE_LOOP;
3942 4245 } else {
3943 4246 ha->state |= FC_STATE_ONLINE;
3944 4247 }
3945 4248 }
3946 4249
3947 4250 TASK_DAEMON_LOCK(ha);
3948 4251 ha->task_daemon_flags |= FC_STATE_CHANGE;
3949 4252 TASK_DAEMON_UNLOCK(ha);
3950 4253
3951 4254 ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
3952 4255
|
↓ open down ↓ |
70 lines elided |
↑ open up ↑ |
3953 4256 break;
3954 4257 default:
3955 4258 EL(ha, "unknown cmd=%xh\n", cmd);
3956 4259 break;
3957 4260 }
3958 4261
3959 4262 if (rval != FC_SUCCESS) {
3960 4263 EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
3961 4264 } else {
3962 4265 /*EMPTY*/
3963 - QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3964 - ha->vp_index);
4266 + QL_PRINT_3(ha, "done\n");
3965 4267 }
3966 4268
3967 4269 return (rval);
3968 4270 }
3969 4271
3970 4272 /*
3971 4273 * ql_port_manage
3972 4274 * Perform port management or diagnostics.
3973 4275 *
3974 4276 * Input:
3975 4277 * fca_handle = handle setup by ql_bind_port().
3976 4278 * cmd = pointer to command structure.
3977 4279 *
3978 4280 * Returns:
3979 4281 * FC_SUCCESS - the request completed successfully.
3980 4282 * FC_FAILURE - the request did not complete successfully.
3981 4283 * FC_UNBOUND - the fca_handle specified is not bound.
3982 4284 *
3983 4285 * Context:
3984 4286 * Kernel context.
3985 4287 */
3986 4288 static int
3987 4289 ql_port_manage(opaque_t fca_handle, fc_fca_pm_t *cmd)
3988 4290 {
3989 4291 clock_t timer;
3990 4292 uint16_t index;
3991 4293 uint32_t *bp;
3992 4294 port_id_t d_id;
3993 4295 ql_link_t *link;
3994 4296 ql_adapter_state_t *ha, *pha;
3995 4297 ql_tgt_t *tq;
3996 4298 dma_mem_t buffer_xmt, buffer_rcv;
3997 4299 size_t length;
|
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
3998 4300 uint32_t cnt;
3999 4301 char buf[80];
4000 4302 lbp_t *lb;
4001 4303 ql_mbx_data_t mr;
4002 4304 app_mbx_cmd_t *mcp;
4003 4305 int i0;
4004 4306 uint8_t *bptr;
4005 4307 int rval2, rval = FC_SUCCESS;
4006 4308 uint32_t opcode;
4007 4309 uint32_t set_flags = 0;
4310 + fc_fca_p2p_info_t *p2p_info;
4008 4311
4009 4312 ha = ql_fca_handle_to_state(fca_handle);
4010 4313 if (ha == NULL) {
4011 - QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
4314 + QL_PRINT_2(NULL, ": failed, no adapter=%ph\n",
4012 4315 (void *)fca_handle);
4013 4316 return (FC_UNBOUND);
4014 4317 }
4015 4318 pha = ha->pha;
4016 4319
4017 - QL_PRINT_3(CE_CONT, "(%d): started=%xh\n", ha->instance,
4018 - cmd->pm_cmd_code);
4320 +#ifdef QL_DEBUG_LEVEL_10
4321 + if (cmd->pm_cmd_code != FC_PORT_GET_FW_REV) {
4322 + QL_PRINT_10(ha, "started=%xh\n", cmd->pm_cmd_code);
4323 + }
4324 +#endif
4019 4325
4020 - ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
4021 -
4022 - /*
4023 - * Wait for all outstanding commands to complete
4024 - */
4025 - index = (uint16_t)ql_wait_outstanding(ha);
4026 -
4027 - if (index != MAX_OUTSTANDING_COMMANDS) {
4028 - ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
4029 - ql_restart_queues(ha);
4030 - EL(ha, "failed, FC_TRAN_BUSY\n");
4326 + if (ha->task_daemon_flags & (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE |
4327 + DRIVER_STALL | ISP_ABORT_NEEDED | LOOP_RESYNC_NEEDED)) {
4328 + EL(ha, "driver stalled, FC_TRAN_BUSY, dtf=%xh\n",
4329 + ha->task_daemon_flags);
4031 4330 return (FC_TRAN_BUSY);
4032 4331 }
4033 4332
4034 4333 switch (cmd->pm_cmd_code) {
4035 4334 case FC_PORT_BYPASS:
4036 4335 d_id.b24 = *cmd->pm_cmd_buf;
4037 4336 tq = ql_d_id_to_queue(ha, d_id);
4038 4337 if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
4039 4338 EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
4040 4339 rval = FC_FAILURE;
4041 4340 }
4042 4341 break;
4043 4342 case FC_PORT_UNBYPASS:
4044 4343 d_id.b24 = *cmd->pm_cmd_buf;
4045 4344 tq = ql_d_id_to_queue(ha, d_id);
4046 4345 if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
4047 4346 EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
4048 4347 rval = FC_FAILURE;
4049 4348 }
4050 4349 break;
4051 4350 case FC_PORT_GET_FW_REV:
4052 4351 (void) sprintf(buf, "%d.%d.%d", pha->fw_major_version,
4053 4352 pha->fw_minor_version, pha->fw_subminor_version);
4054 4353 length = strlen(buf) + 1;
4055 4354 if (cmd->pm_data_len < length) {
4056 4355 cmd->pm_data_len = length;
4057 4356 EL(ha, "failed, FC_PORT_GET_FW_REV FC_FAILURE\n");
4058 4357 rval = FC_FAILURE;
4059 4358 } else {
4060 4359 (void) strcpy(cmd->pm_data_buf, buf);
4061 4360 }
4062 4361 break;
4063 4362
4064 4363 case FC_PORT_GET_FCODE_REV: {
4065 4364 caddr_t fcode_ver_buf = NULL;
4066 4365
4067 4366 i0 = 0;
4068 4367 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
4069 4368 rval2 = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
4070 4369 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version",
4071 4370 (caddr_t)&fcode_ver_buf, &i0);
4072 4371 length = (uint_t)i0;
4073 4372
4074 4373 if (rval2 != DDI_PROP_SUCCESS) {
4075 4374 EL(ha, "failed, getting version = %xh\n", rval2);
4076 4375 length = 20;
4077 4376 fcode_ver_buf = kmem_alloc(length, KM_SLEEP);
4078 4377 if (fcode_ver_buf != NULL) {
4079 4378 (void) sprintf(fcode_ver_buf,
4080 4379 "NO FCODE FOUND");
4081 4380 }
4082 4381 }
4083 4382
4084 4383 if (cmd->pm_data_len < length) {
4085 4384 EL(ha, "length error, FC_PORT_GET_FCODE_REV "
4086 4385 "dst=%ld, src=%ld\n", cmd->pm_data_len, length);
4087 4386 cmd->pm_data_len = length;
4088 4387 rval = FC_FAILURE;
4089 4388 } else if (fcode_ver_buf != NULL) {
4090 4389 bcopy((void *)fcode_ver_buf, (void *)cmd->pm_data_buf,
4091 4390 length);
4092 4391 }
4093 4392
4094 4393 if (fcode_ver_buf != NULL) {
4095 4394 kmem_free(fcode_ver_buf, length);
4096 4395 }
4097 4396 break;
4098 4397 }
4099 4398
4100 4399 case FC_PORT_GET_DUMP:
4101 4400 QL_DUMP_LOCK(pha);
4102 4401 if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
4103 4402 EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
4104 4403 "length=%lxh\n", cmd->pm_data_len);
4105 4404 cmd->pm_data_len = pha->risc_dump_size;
4106 4405 rval = FC_FAILURE;
4107 4406 } else if (pha->ql_dump_state & QL_DUMPING) {
4108 4407 EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
4109 4408 rval = FC_TRAN_BUSY;
|
↓ open down ↓ |
69 lines elided |
↑ open up ↑ |
4110 4409 } else if (pha->ql_dump_state & QL_DUMP_VALID) {
4111 4410 (void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
4112 4411 pha->ql_dump_state |= QL_DUMP_UPLOADED;
4113 4412 } else {
4114 4413 EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
4115 4414 rval = FC_FAILURE;
4116 4415 }
4117 4416 QL_DUMP_UNLOCK(pha);
4118 4417 break;
4119 4418 case FC_PORT_FORCE_DUMP:
4120 - PORTMANAGE_LOCK(ha);
4121 4419 if (ql_dump_firmware(ha) != QL_SUCCESS) {
4122 4420 EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
4123 4421 rval = FC_FAILURE;
4124 4422 }
4125 - PORTMANAGE_UNLOCK(ha);
4126 4423 break;
4127 - case FC_PORT_DOWNLOAD_FW:
4128 - PORTMANAGE_LOCK(ha);
4129 - if (CFG_IST(ha, CFG_CTRL_24258081)) {
4130 - if (ql_24xx_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4131 - (uint32_t)cmd->pm_data_len,
4132 - ha->flash_fw_addr << 2) != QL_SUCCESS) {
4133 - EL(ha, "failed, FC_PORT_DOWNLOAD_FW\n");
4134 - rval = FC_FAILURE;
4135 - }
4136 - ql_reset_chip(ha);
4137 - set_flags |= ISP_ABORT_NEEDED;
4138 - } else {
4139 - /* Save copy of the firmware. */
4140 - if (pha->risc_code != NULL) {
4141 - kmem_free(pha->risc_code, pha->risc_code_size);
4142 - pha->risc_code = NULL;
4143 - pha->risc_code_size = 0;
4144 - }
4145 -
4146 - pha->risc_code = kmem_alloc(cmd->pm_data_len,
4147 - KM_SLEEP);
4148 - if (pha->risc_code != NULL) {
4149 - pha->risc_code_size =
4150 - (uint32_t)cmd->pm_data_len;
4151 - bcopy(cmd->pm_data_buf, pha->risc_code,
4152 - cmd->pm_data_len);
4153 -
4154 - /* Do abort to force reload. */
4155 - ql_reset_chip(ha);
4156 - if (ql_abort_isp(ha) != QL_SUCCESS) {
4157 - kmem_free(pha->risc_code,
4158 - pha->risc_code_size);
4159 - pha->risc_code = NULL;
4160 - pha->risc_code_size = 0;
4161 - ql_reset_chip(ha);
4162 - (void) ql_abort_isp(ha);
4163 - EL(ha, "failed, FC_PORT_DOWNLOAD_FW"
4164 - " FC_FAILURE\n");
4165 - rval = FC_FAILURE;
4166 - }
4167 - }
4168 - }
4169 - PORTMANAGE_UNLOCK(ha);
4170 - break;
4171 4424 case FC_PORT_GET_DUMP_SIZE:
4172 4425 bp = (uint32_t *)cmd->pm_data_buf;
4173 4426 *bp = pha->risc_dump_size;
4174 4427 break;
4175 4428 case FC_PORT_DIAG:
4176 - /*
4177 - * Prevents concurrent diags
4178 - */
4179 - PORTMANAGE_LOCK(ha);
4429 + EL(ha, "diag cmd=%xh\n", cmd->pm_cmd_flags);
4180 4430
4181 4431 /* Wait for suspension to end. */
4182 4432 for (timer = 0; timer < 3000 &&
4183 4433 pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
4184 4434 ql_delay(ha, 10000);
4185 4435 }
4186 4436
4187 4437 if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
4188 4438 EL(ha, "failed, FC_TRAN_BUSY-2\n");
4189 4439 rval = FC_TRAN_BUSY;
4190 - PORTMANAGE_UNLOCK(ha);
4191 4440 break;
4192 4441 }
4193 4442
4443 + if ((rval2 = ql_stall_driver(ha, 0)) != QL_SUCCESS) {
4444 + EL(ha, "stall_driver status=%xh, FC_TRAN_BUSY\n",
4445 + rval2);
4446 + ql_restart_driver(ha);
4447 + rval = FC_TRAN_BUSY;
4448 + break;
4449 + }
4450 +
4194 4451 switch (cmd->pm_cmd_flags) {
4195 4452 case QL_DIAG_EXEFMW:
4196 4453 if (ql_start_firmware(ha) != QL_SUCCESS) {
4197 4454 EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
4198 4455 rval = FC_FAILURE;
4199 4456 }
4200 4457 break;
4201 4458 case QL_DIAG_CHKCMDQUE:
4202 - for (i0 = 1, cnt = 0; i0 < MAX_OUTSTANDING_COMMANDS;
4459 + for (i0 = 1, cnt = 0; i0 < pha->osc_max_cnt;
4203 4460 i0++) {
4204 4461 cnt += (pha->outstanding_cmds[i0] != NULL);
4205 4462 }
4206 4463 if (cnt != 0) {
4207 4464 EL(ha, "failed, QL_DIAG_CHKCMDQUE "
4208 4465 "FC_FAILURE\n");
4209 4466 rval = FC_FAILURE;
4210 4467 }
4211 4468 break;
4212 4469 case QL_DIAG_FMWCHKSUM:
4213 4470 if (ql_verify_checksum(ha) != QL_SUCCESS) {
4214 4471 EL(ha, "failed, QL_DIAG_FMWCHKSUM "
4215 4472 "FC_FAILURE\n");
4216 4473 rval = FC_FAILURE;
4217 4474 }
4218 4475 break;
4219 4476 case QL_DIAG_SLFTST:
4220 4477 if (ql_online_selftest(ha) != QL_SUCCESS) {
4221 4478 EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
4222 4479 rval = FC_FAILURE;
4223 4480 }
4224 4481 ql_reset_chip(ha);
4225 4482 set_flags |= ISP_ABORT_NEEDED;
4226 4483 break;
4227 4484 case QL_DIAG_REVLVL:
4228 4485 if (cmd->pm_stat_len <
4229 4486 sizeof (ql_adapter_revlvl_t)) {
4230 4487 EL(ha, "failed, QL_DIAG_REVLVL FC_NOMEM, "
4231 4488 "slen=%lxh, rlvllen=%lxh\n",
4232 4489 cmd->pm_stat_len,
4233 4490 sizeof (ql_adapter_revlvl_t));
4234 4491 rval = FC_NOMEM;
4235 4492 } else {
4236 4493 bcopy((void *)&(pha->adapter_stats->revlvl),
4237 4494 cmd->pm_stat_buf,
4238 4495 (size_t)cmd->pm_stat_len);
4239 4496 cmd->pm_stat_len =
4240 4497 sizeof (ql_adapter_revlvl_t);
4241 4498 }
4242 4499 break;
4243 4500 case QL_DIAG_LPBMBX:
4244 4501
4245 4502 if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
4246 4503 EL(ha, "failed, QL_DIAG_LPBMBX "
|
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
4247 4504 "FC_INVALID_REQUEST, pmlen=%lxh, "
4248 4505 "reqd=%lxh\n", cmd->pm_data_len,
4249 4506 sizeof (struct app_mbx_cmd));
4250 4507 rval = FC_INVALID_REQUEST;
4251 4508 break;
4252 4509 }
4253 4510 /*
4254 4511 * Don't do the wrap test on a 2200 when the
4255 4512 * firmware is running.
4256 4513 */
4257 - if (!CFG_IST(ha, CFG_CTRL_2200)) {
4514 + if (!CFG_IST(ha, CFG_CTRL_22XX)) {
4258 4515 mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4259 4516 mr.mb[1] = mcp->mb[1];
4260 4517 mr.mb[2] = mcp->mb[2];
4261 4518 mr.mb[3] = mcp->mb[3];
4262 4519 mr.mb[4] = mcp->mb[4];
4263 4520 mr.mb[5] = mcp->mb[5];
4264 4521 mr.mb[6] = mcp->mb[6];
4265 4522 mr.mb[7] = mcp->mb[7];
4266 4523
4267 4524 bcopy(&mr.mb[0], &mr.mb[10],
4268 4525 sizeof (uint16_t) * 8);
4269 4526
4270 4527 if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4271 4528 EL(ha, "failed, QL_DIAG_LPBMBX "
4272 4529 "FC_FAILURE\n");
4273 4530 rval = FC_FAILURE;
4274 4531 break;
4275 4532 } else {
4276 4533 for (i0 = 1; i0 < 8; i0++) {
4277 4534 if (mr.mb[i0] !=
4278 4535 mr.mb[i0 + 10]) {
4279 4536 EL(ha, "failed, "
4280 4537 "QL_DIAG_LPBMBX "
4281 4538 "FC_FAILURE-2\n");
4282 4539 rval = FC_FAILURE;
4283 4540 break;
4284 4541 }
4285 4542 }
4286 4543 }
4287 4544
4288 4545 if (rval == FC_FAILURE) {
4289 4546 (void) ql_flash_errlog(ha,
4290 4547 FLASH_ERRLOG_ISP_ERR, 0,
4291 4548 RD16_IO_REG(ha, hccr),
4292 4549 RD16_IO_REG(ha, istatus));
4293 4550 set_flags |= ISP_ABORT_NEEDED;
4294 4551 }
4295 4552 }
4296 4553 break;
4297 4554 case QL_DIAG_LPBDTA:
4298 4555 /*
4299 4556 * For loopback data, we receive the
4300 4557 * data back in pm_stat_buf. This provides
4301 4558 * the user an opportunity to compare the
4302 4559 * transmitted and received data.
4303 4560 *
4304 4561 * NB: lb->options are:
4305 4562 * 0 --> Ten bit loopback
4306 4563 * 1 --> One bit loopback
4307 4564 * 2 --> External loopback
4308 4565 */
4309 4566 if (cmd->pm_data_len > 65536) {
4310 4567 rval = FC_TOOMANY;
4311 4568 EL(ha, "failed, QL_DIAG_LPBDTA "
4312 4569 "FC_TOOMANY=%lxh\n", cmd->pm_data_len);
4313 4570 break;
4314 4571 }
4315 4572 if (ql_get_dma_mem(ha, &buffer_xmt,
4316 4573 (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4317 4574 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4318 4575 EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4319 4576 rval = FC_NOMEM;
4320 4577 break;
4321 4578 }
4322 4579 if (ql_get_dma_mem(ha, &buffer_rcv,
4323 4580 (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4324 4581 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
|
↓ open down ↓ |
57 lines elided |
↑ open up ↑ |
4325 4582 EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4326 4583 rval = FC_NOMEM;
4327 4584 break;
4328 4585 }
4329 4586 ddi_rep_put8(buffer_xmt.acc_handle,
4330 4587 (uint8_t *)cmd->pm_data_buf,
4331 4588 (uint8_t *)buffer_xmt.bp,
4332 4589 cmd->pm_data_len, DDI_DEV_AUTOINCR);
4333 4590
4334 4591 /* 22xx's adapter must be in loop mode for test. */
4335 - if (CFG_IST(ha, CFG_CTRL_2200)) {
4592 + if (CFG_IST(ha, CFG_CTRL_22XX)) {
4336 4593 bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4337 4594 if (ha->flags & POINT_TO_POINT ||
4338 4595 (ha->task_daemon_flags & LOOP_DOWN &&
4339 4596 *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4340 4597 cnt = *bptr;
4341 4598 *bptr = (uint8_t)
4342 4599 (*bptr & ~(BIT_6|BIT_5|BIT_4));
4343 4600 (void) ql_abort_isp(ha);
4344 4601 *bptr = (uint8_t)cnt;
4345 4602 }
4346 4603 }
4347 4604
4348 4605 /* Shutdown IP. */
4349 4606 if (pha->flags & IP_INITIALIZED) {
4350 4607 (void) ql_shutdown_ip(pha);
4351 4608 }
4352 4609
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
4353 4610 lb = (lbp_t *)cmd->pm_cmd_buf;
4354 4611 lb->transfer_count =
4355 4612 (uint32_t)cmd->pm_data_len;
4356 4613 lb->transfer_segment_count = 0;
4357 4614 lb->receive_segment_count = 0;
4358 4615 lb->transfer_data_address =
4359 4616 buffer_xmt.cookie.dmac_address;
4360 4617 lb->receive_data_address =
4361 4618 buffer_rcv.cookie.dmac_address;
4362 4619
4620 + if (CFG_IST(ha, CFG_LOOP_POINT_SUPPORT)) {
4621 + (void) ql_set_loop_point(ha, lb->options);
4622 + }
4623 +
4363 4624 if (ql_loop_back(ha, 0, lb,
4364 4625 buffer_xmt.cookie.dmac_notused,
4365 4626 buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4366 4627 bzero((void *)cmd->pm_stat_buf,
4367 4628 cmd->pm_stat_len);
4368 4629 ddi_rep_get8(buffer_rcv.acc_handle,
4369 4630 (uint8_t *)cmd->pm_stat_buf,
4370 4631 (uint8_t *)buffer_rcv.bp,
4371 4632 cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4372 4633 rval = FC_SUCCESS;
4373 4634 } else {
4374 4635 EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4375 4636 rval = FC_FAILURE;
4376 4637 }
4377 4638
4639 + if (CFG_IST(ha, CFG_LOOP_POINT_SUPPORT)) {
4640 + (void) ql_set_loop_point(ha, 0);
4641 + }
4642 +
4378 4643 ql_free_phys(ha, &buffer_xmt);
4379 4644 ql_free_phys(ha, &buffer_rcv);
4380 4645
4381 4646 /* Needed to recover the f/w */
4382 4647 set_flags |= ISP_ABORT_NEEDED;
4383 4648
4384 4649 /* Restart IP if it was shutdown. */
4385 4650 if (pha->flags & IP_ENABLED &&
4386 4651 !(pha->flags & IP_INITIALIZED)) {
4387 4652 (void) ql_initialize_ip(pha);
4388 4653 ql_isp_rcvbuf(pha);
4389 4654 }
4390 4655
4391 4656 break;
4392 4657 case QL_DIAG_ECHO: {
4393 4658 /*
4394 4659 * issue an echo command with a user supplied
4395 4660 * data pattern and destination address
4396 4661 */
4397 4662 echo_t echo; /* temp echo struct */
4398 4663
4399 4664 /* Setup echo cmd & adjust for platform */
4400 4665 opcode = QL_ECHO_CMD;
4401 4666 BIG_ENDIAN_32(&opcode);
4402 4667
4403 4668 /*
4404 4669 * due to limitations in the ql
4405 4670 * firmaware the echo data field is
4406 4671 * limited to 220
4407 4672 */
4408 4673 if ((cmd->pm_cmd_len > QL_ECHO_CMD_LENGTH) ||
4409 4674 (cmd->pm_stat_len > QL_ECHO_CMD_LENGTH)) {
4410 4675 EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY, "
4411 4676 "cmdl1=%lxh, statl2=%lxh\n",
4412 4677 cmd->pm_cmd_len, cmd->pm_stat_len);
4413 4678 rval = FC_TOOMANY;
4414 4679 break;
4415 4680 }
4416 4681
4417 4682 /*
4418 4683 * the input data buffer has the user
4419 4684 * supplied data pattern. The "echoed"
4420 4685 * data will be DMAed into the output
4421 4686 * data buffer. Therefore the length
4422 4687 * of the output buffer must be equal
4423 4688 * to or greater then the input buffer
4424 4689 * length
4425 4690 */
4426 4691 if (cmd->pm_cmd_len > cmd->pm_stat_len) {
4427 4692 EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4428 4693 " cmdl1=%lxh, statl2=%lxh\n",
4429 4694 cmd->pm_cmd_len, cmd->pm_stat_len);
4430 4695 rval = FC_TOOMANY;
4431 4696 break;
4432 4697 }
4433 4698 /* add four bytes for the opcode */
4434 4699 echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4435 4700
4436 4701 /*
|
↓ open down ↓ |
49 lines elided |
↑ open up ↑ |
4437 4702 * are we 32 or 64 bit addressed???
4438 4703 * We need to get the appropriate
4439 4704 * DMA and set the command options;
4440 4705 * 64 bit (bit 6) or 32 bit
4441 4706 * (no bit 6) addressing.
4442 4707 * while we are at it lets ask for
4443 4708 * real echo (bit 15)
4444 4709 */
4445 4710 echo.options = BIT_15;
4446 4711 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) &&
4447 - !(CFG_IST(ha, CFG_CTRL_8081))) {
4712 + !(CFG_IST(ha, CFG_FCOE_SUPPORT))) {
4448 4713 echo.options = (uint16_t)
4449 4714 (echo.options | BIT_6);
4450 4715 }
4451 4716
4452 4717 /*
4453 4718 * Set up the DMA mappings for the
4454 4719 * output and input data buffers.
4455 4720 * First the output buffer
4456 4721 */
4457 4722 if (ql_get_dma_mem(ha, &buffer_xmt,
4458 4723 (uint32_t)(cmd->pm_data_len + 4),
4459 4724 LITTLE_ENDIAN_DMA,
4460 4725 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4461 4726 EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4462 4727 rval = FC_NOMEM;
4463 4728 break;
4464 4729 }
4465 4730 echo.transfer_data_address = buffer_xmt.cookie;
4466 4731
4467 4732 /* Next the input buffer */
4468 4733 if (ql_get_dma_mem(ha, &buffer_rcv,
4469 4734 (uint32_t)(cmd->pm_data_len + 4),
4470 4735 LITTLE_ENDIAN_DMA,
4471 4736 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4472 4737 /*
4473 4738 * since we could not allocate
4474 4739 * DMA space for the input
4475 4740 * buffer we need to clean up
4476 4741 * by freeing the DMA space
4477 4742 * we allocated for the output
4478 4743 * buffer
4479 4744 */
4480 4745 ql_free_phys(ha, &buffer_xmt);
4481 4746 EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM-2\n");
4482 4747 rval = FC_NOMEM;
4483 4748 break;
4484 4749 }
4485 4750 echo.receive_data_address = buffer_rcv.cookie;
4486 4751
4487 4752 /*
4488 4753 * copy the 4 byte ECHO op code to the
4489 4754 * allocated DMA space
4490 4755 */
4491 4756 ddi_rep_put8(buffer_xmt.acc_handle, (uint8_t *)&opcode,
4492 4757 (uint8_t *)buffer_xmt.bp, 4, DDI_DEV_AUTOINCR);
4493 4758
4494 4759 /*
4495 4760 * copy the user supplied data to the
4496 4761 * allocated DMA space
4497 4762 */
4498 4763 ddi_rep_put8(buffer_xmt.acc_handle,
4499 4764 (uint8_t *)cmd->pm_cmd_buf,
4500 4765 (uint8_t *)buffer_xmt.bp + 4, cmd->pm_cmd_len,
4501 4766 DDI_DEV_AUTOINCR);
4502 4767
4503 4768 /* Shutdown IP. */
4504 4769 if (pha->flags & IP_INITIALIZED) {
4505 4770 (void) ql_shutdown_ip(pha);
4506 4771 }
4507 4772
4508 4773 /* send the echo */
4509 4774 if (ql_echo(ha, 0, &echo) == QL_SUCCESS) {
4510 4775 ddi_rep_put8(buffer_rcv.acc_handle,
4511 4776 (uint8_t *)buffer_rcv.bp + 4,
4512 4777 (uint8_t *)cmd->pm_stat_buf,
4513 4778 cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4514 4779 } else {
4515 4780 EL(ha, "failed, QL_DIAG_ECHO FC_FAILURE\n");
4516 4781 rval = FC_FAILURE;
4517 4782 }
4518 4783
4519 4784 /* Restart IP if it was shutdown. */
4520 4785 if (pha->flags & IP_ENABLED &&
4521 4786 !(pha->flags & IP_INITIALIZED)) {
4522 4787 (void) ql_initialize_ip(pha);
4523 4788 ql_isp_rcvbuf(pha);
4524 4789 }
4525 4790 /* free up our DMA buffers */
|
↓ open down ↓ |
68 lines elided |
↑ open up ↑ |
4526 4791 ql_free_phys(ha, &buffer_xmt);
4527 4792 ql_free_phys(ha, &buffer_rcv);
4528 4793 break;
4529 4794 }
4530 4795 default:
4531 4796 EL(ha, "unknown=%xh, FC_PORT_DIAG "
4532 4797 "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4533 4798 rval = FC_INVALID_REQUEST;
4534 4799 break;
4535 4800 }
4536 - PORTMANAGE_UNLOCK(ha);
4801 + ql_restart_driver(ha);
4537 4802 break;
4538 4803 case FC_PORT_LINK_STATE:
4539 4804 /* Check for name equal to null. */
4540 4805 for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4541 4806 index++) {
4542 4807 if (cmd->pm_cmd_buf[index] != 0) {
4543 4808 break;
4544 4809 }
4545 4810 }
4546 4811
4547 4812 /* If name not null. */
4548 4813 if (index < 8 && cmd->pm_cmd_len >= 8) {
4549 4814 /* Locate device queue. */
4550 4815 tq = NULL;
4551 4816 for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4552 4817 tq == NULL; index++) {
4553 4818 for (link = ha->dev[index].first; link != NULL;
4554 4819 link = link->next) {
4555 4820 tq = link->base_address;
4556 4821
4557 4822 if (bcmp((void *)&tq->port_name[0],
4558 4823 (void *)cmd->pm_cmd_buf, 8) == 0) {
4559 4824 break;
4560 4825 } else {
4561 4826 tq = NULL;
4562 4827 }
4563 4828 }
4564 4829 }
4565 4830
4566 4831 if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4567 4832 cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4568 4833 cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4569 4834 } else {
4570 4835 cnt = FC_PORT_SPEED_MASK(ha->state) |
|
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
4571 4836 FC_STATE_OFFLINE;
4572 4837 cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4573 4838 cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4574 4839 }
4575 4840 } else {
4576 4841 cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4577 4842 cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4578 4843 }
4579 4844 break;
4580 4845 case FC_PORT_INITIALIZE:
4846 + if ((rval2 = ql_stall_driver(ha, 0)) != QL_SUCCESS) {
4847 + EL(ha, "stall_driver status=%xh, FC_TRAN_BUSY\n",
4848 + rval2);
4849 + ql_restart_driver(ha);
4850 + rval = FC_TRAN_BUSY;
4851 + break;
4852 + }
4581 4853 if (cmd->pm_cmd_len >= 8) {
4582 4854 tq = NULL;
4583 4855 for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4584 4856 tq == NULL; index++) {
4585 4857 for (link = ha->dev[index].first; link != NULL;
4586 4858 link = link->next) {
4587 4859 tq = link->base_address;
4588 4860
4589 4861 if (bcmp((void *)&tq->port_name[0],
4590 4862 (void *)cmd->pm_cmd_buf, 8) == 0) {
4591 4863 if (!VALID_DEVICE_ID(ha,
4592 4864 tq->loop_id)) {
4593 4865 tq = NULL;
4594 4866 }
4595 4867 break;
4596 4868 } else {
4597 4869 tq = NULL;
4598 4870 }
4599 4871 }
4600 4872 }
4601 4873
4602 4874 if (tq == NULL || ql_target_reset(ha, tq,
4603 4875 ha->loop_reset_delay) != QL_SUCCESS) {
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
4604 4876 EL(ha, "failed, FC_PORT_INITIALIZE "
4605 4877 "FC_FAILURE\n");
4606 4878 rval = FC_FAILURE;
4607 4879 }
4608 4880 } else {
4609 4881 EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4610 4882 "clen=%lxh\n", cmd->pm_cmd_len);
4611 4883
4612 4884 rval = FC_FAILURE;
4613 4885 }
4886 + ql_restart_driver(ha);
4614 4887 break;
4615 4888 case FC_PORT_RLS:
4616 4889 if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4617 4890 EL(ha, "failed, buffer size passed: %lxh, "
4618 4891 "req: %lxh\n", cmd->pm_data_len,
4619 4892 (sizeof (fc_rls_acc_t)));
4620 4893 rval = FC_FAILURE;
4621 4894 } else if (LOOP_NOT_READY(pha)) {
4622 4895 EL(ha, "loop NOT ready\n");
4623 4896 bzero(cmd->pm_data_buf, cmd->pm_data_len);
4624 4897 } else if (ql_get_link_status(ha, ha->loop_id,
4625 4898 cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
4626 4899 EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4627 4900 rval = FC_FAILURE;
4628 4901 #ifdef _BIG_ENDIAN
4629 4902 } else {
4630 4903 fc_rls_acc_t *rls;
4631 4904
4632 4905 rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4633 4906 LITTLE_ENDIAN_32(&rls->rls_link_fail);
4634 4907 LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4635 4908 LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4909 + LITTLE_ENDIAN_32(&rls->rls_prim_seq_err);
4910 + LITTLE_ENDIAN_32(&rls->rls_invalid_word);
4636 4911 LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4637 4912 #endif /* _BIG_ENDIAN */
4638 4913 }
4639 4914 break;
4640 4915 case FC_PORT_GET_NODE_ID:
4641 4916 if (ql_get_rnid_params(ha, cmd->pm_data_len,
4642 4917 cmd->pm_data_buf) != QL_SUCCESS) {
4643 4918 EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4644 4919 rval = FC_FAILURE;
4645 4920 }
4646 4921 break;
4647 4922 case FC_PORT_SET_NODE_ID:
4648 4923 if (ql_set_rnid_params(ha, cmd->pm_data_len,
4649 4924 cmd->pm_data_buf) != QL_SUCCESS) {
4650 4925 EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4651 4926 rval = FC_FAILURE;
4652 4927 }
4653 4928 break;
4654 4929 case FC_PORT_DOWNLOAD_FCODE:
4655 - PORTMANAGE_LOCK(ha);
4656 - if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
4930 + if ((rval2 = ql_stall_driver(ha, 0)) != QL_SUCCESS) {
4931 + EL(ha, "stall_driver status=%xh, FC_TRAN_BUSY\n",
4932 + rval2);
4933 + ql_restart_driver(ha);
4934 + rval = FC_TRAN_BUSY;
4935 + break;
4936 + }
4937 + if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
4657 4938 rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4658 4939 (uint32_t)cmd->pm_data_len);
4659 4940 } else {
4660 4941 if (cmd->pm_data_buf[0] == 4 &&
4661 4942 cmd->pm_data_buf[8] == 0 &&
4662 4943 cmd->pm_data_buf[9] == 0x10 &&
4663 4944 cmd->pm_data_buf[10] == 0 &&
4664 4945 cmd->pm_data_buf[11] == 0) {
4665 4946 rval = ql_24xx_load_flash(ha,
4666 4947 (uint8_t *)cmd->pm_data_buf,
4667 4948 (uint32_t)cmd->pm_data_len,
4668 4949 ha->flash_fw_addr << 2);
4669 4950 } else {
4670 4951 rval = ql_24xx_load_flash(ha,
4671 4952 (uint8_t *)cmd->pm_data_buf,
4672 4953 (uint32_t)cmd->pm_data_len, 0);
4673 4954 }
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
4674 4955 }
4675 4956
4676 4957 if (rval != QL_SUCCESS) {
4677 4958 EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4678 4959 rval = FC_FAILURE;
4679 4960 } else {
4680 4961 rval = FC_SUCCESS;
4681 4962 }
4682 4963 ql_reset_chip(ha);
4683 4964 set_flags |= ISP_ABORT_NEEDED;
4684 - PORTMANAGE_UNLOCK(ha);
4965 + ql_restart_driver(ha);
4685 4966 break;
4967 +
4968 + case FC_PORT_GET_P2P_INFO:
4969 +
4970 + bzero(cmd->pm_data_buf, cmd->pm_data_len);
4971 + if (cmd->pm_data_len < sizeof (fc_fca_p2p_info_t)) {
4972 + EL(ha, "inadequate data length")
4973 + rval = FC_NOMEM;
4974 + break;
4975 + }
4976 +
4977 + p2p_info = (fc_fca_p2p_info_t *)cmd->pm_data_buf;
4978 +
4979 + if ((ha->topology & QL_N_PORT) &&
4980 + (ha->flags & POINT_TO_POINT)) {
4981 + p2p_info->fca_d_id = ha->d_id.b24;
4982 + p2p_info->d_id = ha->n_port->d_id.b24;
4983 +
4984 + bcopy((void *) &ha->n_port->port_name[0],
4985 + (caddr_t)&p2p_info->pwwn, 8);
4986 + bcopy((void *) &ha->n_port->node_name[0],
4987 + (caddr_t)&p2p_info->nwwn, 8);
4988 + rval = FC_SUCCESS;
4989 +
4990 + EL(ha, "P2P HID=%xh, d_id=%xh, WWPN=%02x%02x%02x%02x"
4991 + "%02x%02x%02x%02x : "
4992 + "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
4993 + p2p_info->fca_d_id, p2p_info->d_id,
4994 + ha->n_port->port_name[0],
4995 + ha->n_port->port_name[1], ha->n_port->port_name[2],
4996 + ha->n_port->port_name[3], ha->n_port->port_name[4],
4997 + ha->n_port->port_name[5], ha->n_port->port_name[6],
4998 + ha->n_port->port_name[7], ha->n_port->node_name[0],
4999 + ha->n_port->node_name[1], ha->n_port->node_name[2],
5000 + ha->n_port->node_name[3], ha->n_port->node_name[4],
5001 + ha->n_port->node_name[5], ha->n_port->node_name[6],
5002 + ha->n_port->node_name[7]);
5003 + break;
5004 + } else {
5005 + EL(ha, "No p2p info reported in non n2n topology\n");
5006 + rval = FC_BADCMD;
5007 + }
5008 + break;
5009 +
5010 + case FC_PORT_DOWNLOAD_FW:
5011 + EL(ha, "unsupported=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
5012 + rval = FC_BADCMD;
5013 + break;
4686 5014 default:
4687 5015 EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
4688 5016 rval = FC_BADCMD;
4689 5017 break;
4690 5018 }
4691 5019
4692 5020 /* Wait for suspension to end. */
4693 5021 ql_awaken_task_daemon(ha, NULL, set_flags, DRIVER_STALL);
4694 5022 timer = 0;
4695 5023
4696 5024 while (timer++ < 3000 &&
4697 5025 ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
4698 5026 ql_delay(ha, 10000);
4699 5027 }
4700 5028
4701 - ql_restart_queues(ha);
4702 -
4703 5029 if (rval != FC_SUCCESS) {
4704 5030 EL(ha, "failed, rval = %xh\n", rval);
4705 5031 } else {
4706 5032 /*EMPTY*/
4707 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5033 + QL_PRINT_3(ha, "done\n");
4708 5034 }
4709 5035
4710 5036 return (rval);
4711 5037 }
4712 5038
4713 5039 static opaque_t
4714 5040 ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
4715 5041 {
4716 5042 port_id_t id;
4717 5043 ql_adapter_state_t *ha;
4718 5044 ql_tgt_t *tq;
4719 5045
4720 5046 id.r.rsvd_1 = 0;
4721 5047 id.b24 = d_id.port_id;
4722 5048
4723 5049 ha = ql_fca_handle_to_state(fca_handle);
4724 5050 if (ha == NULL) {
4725 - QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
5051 + QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
4726 5052 (void *)fca_handle);
4727 5053 return (NULL);
4728 5054 }
4729 - QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance, id.b24);
5055 + QL_PRINT_3(ha, "started, d_id=%xh\n", id.b24);
4730 5056
4731 5057 tq = ql_d_id_to_queue(ha, id);
4732 5058
4733 - if (tq == NULL) {
4734 - EL(ha, "failed, tq=NULL\n");
5059 + if (tq == NULL && id.b24 != 0 && id.b24 != FS_BROADCAST) {
5060 + EL(ha, "failed, no tq available for d_id: %xh\n", id.b24);
4735 5061 } else {
4736 5062 /*EMPTY*/
4737 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5063 + QL_PRINT_3(ha, "done\n");
4738 5064 }
4739 5065 return (tq);
4740 5066 }
4741 5067
4742 5068 /* ************************************************************************ */
4743 5069 /* FCA Driver Local Support Functions. */
4744 5070 /* ************************************************************************ */
4745 5071
4746 5072 /*
4747 5073 * ql_cmd_setup
4748 5074 * Verifies proper command.
4749 5075 *
4750 5076 * Input:
4751 5077 * fca_handle = handle setup by ql_bind_port().
4752 5078 * pkt = pointer to fc_packet.
4753 5079 * rval = pointer for return value.
4754 5080 *
4755 5081 * Returns:
4756 5082 * Adapter state pointer, NULL = failure.
4757 5083 *
4758 5084 * Context:
4759 5085 * Kernel context.
4760 5086 */
4761 5087 static ql_adapter_state_t *
4762 5088 ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
4763 5089 {
4764 5090 ql_adapter_state_t *ha, *pha;
4765 5091 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
4766 5092 ql_tgt_t *tq;
4767 5093 port_id_t d_id;
4768 5094
4769 5095 pkt->pkt_resp_resid = 0;
4770 5096 pkt->pkt_data_resid = 0;
4771 5097
4772 5098 /* check that the handle is assigned by this FCA */
4773 5099 ha = ql_fca_handle_to_state(fca_handle);
4774 5100 if (ha == NULL) {
4775 5101 *rval = FC_UNBOUND;
4776 - QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
5102 + QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
4777 5103 (void *)fca_handle);
4778 5104 return (NULL);
4779 5105 }
4780 5106 pha = ha->pha;
4781 5107
4782 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5108 + QL_PRINT_3(ha, "started\n");
4783 5109
4784 5110 if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
4785 5111 return (ha);
4786 5112 }
4787 5113
4788 5114 if (!(pha->flags & ONLINE)) {
4789 5115 pkt->pkt_state = FC_PKT_LOCAL_RJT;
4790 5116 pkt->pkt_reason = FC_REASON_HW_ERROR;
4791 5117 *rval = FC_TRANSPORT_ERROR;
4792 5118 EL(ha, "failed, not online hf=%xh\n", pha->flags);
4793 5119 return (NULL);
4794 5120 }
4795 5121
4796 5122 /* Exit on loop down. */
4797 5123 if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
4798 5124 pha->task_daemon_flags & LOOP_DOWN &&
4799 5125 pha->loop_down_timer <= pha->loop_down_abort_time) {
4800 5126 pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4801 5127 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4802 5128 *rval = FC_OFFLINE;
4803 5129 EL(ha, "failed, loop down tdf=%xh\n", pha->task_daemon_flags);
4804 5130 return (NULL);
4805 5131 }
4806 5132
4807 5133 if (pkt->pkt_cmd_fhdr.r_ctl == R_CTL_COMMAND &&
4808 5134 pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
4809 5135 tq = (ql_tgt_t *)pkt->pkt_fca_device;
4810 5136 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id))) {
4811 5137 d_id.r.rsvd_1 = 0;
4812 5138 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4813 5139 tq = ql_d_id_to_queue(ha, d_id);
4814 5140
4815 5141 pkt->pkt_fca_device = (opaque_t)tq;
4816 5142 }
4817 5143
4818 5144 if (tq != NULL) {
4819 5145 DEVICE_QUEUE_LOCK(tq);
4820 5146 if (tq->flags & (TQF_RSCN_RCVD |
4821 5147 TQF_NEED_AUTHENTICATION)) {
|
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
4822 5148 *rval = FC_DEVICE_BUSY;
4823 5149 DEVICE_QUEUE_UNLOCK(tq);
4824 5150 EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
4825 5151 tq->flags, tq->d_id.b24);
4826 5152 return (NULL);
4827 5153 }
4828 5154 DEVICE_QUEUE_UNLOCK(tq);
4829 5155 }
4830 5156 }
4831 5157
5158 + /* Check for packet already running. */
5159 + if (sp->handle != 0) {
5160 + *rval = FC_DEVICE_BUSY;
5161 + cmn_err(CE_WARN, "%s(%d) already running pkt=%p, sp=%p, "
5162 + "sp->pkt=%p, sp->hdl=%x, spf=%x, cq=%p\n", QL_NAME,
5163 + ha->instance, (void *)pkt, (void *)sp, (void *)sp->pkt,
5164 + sp->handle, sp->flags, (void *)sp->cmd.head);
5165 + return (NULL);
5166 + }
5167 + if (ha->rsp_queues_cnt > 1) {
5168 + ADAPTER_STATE_LOCK(ha);
5169 + sp->rsp_q_number = ha->rsp_q_number++;
5170 + if (ha->rsp_q_number == ha->rsp_queues_cnt) {
5171 + ha->rsp_q_number = 0;
5172 + }
5173 + ADAPTER_STATE_UNLOCK(ha);
5174 + } else {
5175 + sp->rsp_q_number = 0;
5176 + }
5177 +
4832 5178 /*
4833 5179 * Check DMA pointers.
4834 5180 */
4835 5181 *rval = DDI_SUCCESS;
4836 5182 if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
4837 5183 QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
4838 - *rval = ddi_check_dma_handle(pkt->pkt_cmd_dma);
4839 - if (*rval == DDI_SUCCESS) {
4840 - *rval = ddi_check_acc_handle(pkt->pkt_cmd_acc);
5184 +
5185 + *rval = qlc_fm_check_dma_handle(ha, pkt->pkt_cmd_dma);
5186 + if (*rval == DDI_FM_OK) {
5187 + *rval = qlc_fm_check_acc_handle(ha,
5188 + pkt->pkt_cmd_acc);
4841 5189 }
4842 5190 }
4843 5191
4844 5192 if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
4845 5193 pkt->pkt_rsplen != 0) {
4846 5194 QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
4847 - *rval = ddi_check_dma_handle(pkt->pkt_resp_dma);
4848 - if (*rval == DDI_SUCCESS) {
4849 - *rval = ddi_check_acc_handle(pkt->pkt_resp_acc);
5195 +
5196 + *rval = qlc_fm_check_dma_handle(ha, pkt->pkt_resp_dma);
5197 + if (*rval == DDI_FM_OK) {
5198 + *rval = qlc_fm_check_acc_handle(ha,
5199 + pkt->pkt_resp_acc);
4850 5200 }
4851 5201 }
4852 5202
4853 5203 /*
4854 5204 * Minimum branch conditional; Change it with care.
4855 5205 */
4856 5206 if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
4857 5207 (pkt->pkt_datalen != 0)) != 0) {
4858 5208 QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
4859 - *rval = ddi_check_dma_handle(pkt->pkt_data_dma);
4860 - if (*rval == DDI_SUCCESS) {
4861 - *rval = ddi_check_acc_handle(pkt->pkt_data_acc);
5209 +
5210 + *rval = qlc_fm_check_dma_handle(ha, pkt->pkt_data_dma);
5211 + if (*rval == DDI_FM_OK) {
5212 + *rval = qlc_fm_check_acc_handle(ha,
5213 + pkt->pkt_data_acc);
4862 5214 }
4863 5215 }
4864 5216
4865 - if (*rval != DDI_SUCCESS) {
5217 + if (*rval != DDI_FM_OK) {
4866 5218 pkt->pkt_state = FC_PKT_TRAN_ERROR;
4867 5219 pkt->pkt_reason = FC_REASON_DMA_ERROR;
5220 + pkt->pkt_expln = FC_EXPLN_NONE;
5221 + pkt->pkt_action = FC_ACTION_RETRYABLE;
4868 5222
4869 5223 /* Do command callback. */
4870 5224 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
4871 - ql_awaken_task_daemon(ha, sp, 0, 0);
5225 + ql_io_comp(sp);
4872 5226 }
4873 5227 *rval = FC_BADPACKET;
4874 5228 EL(ha, "failed, bad DMA pointers\n");
4875 5229 return (NULL);
4876 5230 }
4877 5231
4878 5232 if (sp->magic_number != QL_FCA_BRAND) {
4879 5233 *rval = FC_BADPACKET;
4880 5234 EL(ha, "failed, magic number=%xh\n", sp->magic_number);
4881 5235 return (NULL);
4882 5236 }
4883 5237 *rval = FC_SUCCESS;
4884 5238
4885 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5239 + QL_PRINT_3(ha, "done\n");
4886 5240
4887 5241 return (ha);
4888 5242 }
4889 5243
4890 5244 /*
4891 5245 * ql_els_plogi
4892 5246 * Issue a extended link service port login request.
4893 5247 *
4894 5248 * Input:
4895 5249 * ha = adapter state pointer.
4896 5250 * pkt = pointer to fc_packet.
4897 5251 *
4898 5252 * Returns:
4899 5253 * FC_SUCCESS - the packet was accepted for transport.
4900 5254 * FC_TRANSPORT_ERROR - a transport error occurred.
4901 5255 *
4902 5256 * Context:
4903 5257 * Kernel context.
4904 5258 */
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
4905 5259 static int
4906 5260 ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4907 5261 {
4908 5262 ql_tgt_t *tq = NULL;
4909 5263 port_id_t d_id;
4910 5264 la_els_logi_t acc;
4911 5265 class_svc_param_t *class3_param;
4912 5266 int ret;
4913 5267 int rval = FC_SUCCESS;
4914 5268
4915 - QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4916 - pkt->pkt_cmd_fhdr.d_id);
5269 + QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
4917 5270
4918 5271 TASK_DAEMON_LOCK(ha);
4919 5272 if (!(ha->task_daemon_flags & STATE_ONLINE)) {
4920 5273 TASK_DAEMON_UNLOCK(ha);
4921 - QL_PRINT_3(CE_CONT, "(%d): offline done\n", ha->instance);
5274 + QL_PRINT_3(ha, "offline done\n");
4922 5275 return (FC_OFFLINE);
4923 5276 }
4924 5277 TASK_DAEMON_UNLOCK(ha);
4925 5278
4926 5279 bzero(&acc, sizeof (acc));
4927 5280 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4928 5281
4929 5282 ret = QL_SUCCESS;
4930 5283
4931 - if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
5284 + if (CFG_IST(ha, CFG_N2N_SUPPORT) && ha->topology & QL_N_PORT) {
4932 5285 /*
4933 5286 * In p2p topology it sends a PLOGI after determining
4934 5287 * it has the N_Port login initiative.
4935 5288 */
4936 5289 ret = ql_p2p_plogi(ha, pkt);
4937 5290 }
4938 5291 if (ret == QL_CONSUMED) {
4939 5292 return (ret);
4940 5293 }
4941 5294
4942 5295 switch (ret = ql_login_port(ha, d_id)) {
4943 5296 case QL_SUCCESS:
4944 5297 tq = ql_d_id_to_queue(ha, d_id);
4945 5298 break;
4946 5299
4947 5300 case QL_LOOP_ID_USED:
4948 5301 if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
4949 5302 tq = ql_d_id_to_queue(ha, d_id);
4950 5303 }
4951 5304 break;
4952 5305
4953 5306 default:
4954 5307 break;
4955 5308 }
4956 5309
4957 5310 if (ret != QL_SUCCESS) {
4958 5311 /*
4959 5312 * Invalidate this entry so as to seek a fresh loop ID
4960 5313 * in case firmware reassigns it to something else
4961 5314 */
4962 5315 tq = ql_d_id_to_queue(ha, d_id);
4963 5316 if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
4964 5317 tq->loop_id = PORT_NO_LOOP_ID;
4965 5318 }
4966 5319 } else if (tq) {
|
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
4967 5320 (void) ql_get_port_database(ha, tq, PDF_ADISC);
4968 5321 }
4969 5322
4970 5323 if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
4971 5324 (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
4972 5325
4973 5326 /* Build ACC. */
4974 5327 acc.ls_code.ls_code = LA_ELS_ACC;
4975 5328 acc.common_service.fcph_version = 0x2006;
4976 5329 acc.common_service.cmn_features = 0x8800;
4977 - acc.common_service.rx_bufsize = QL_MAX_FRAME_SIZE(ha);
5330 + acc.common_service.rx_bufsize =
5331 + ha->loginparams.common_service.rx_bufsize;
4978 5332 acc.common_service.conc_sequences = 0xff;
4979 5333 acc.common_service.relative_offset = 0x03;
4980 5334 acc.common_service.e_d_tov = 0x7d0;
4981 5335
4982 5336 bcopy((void *)&tq->port_name[0],
4983 5337 (void *)&acc.nport_ww_name.raw_wwn[0], 8);
4984 5338 bcopy((void *)&tq->node_name[0],
4985 5339 (void *)&acc.node_ww_name.raw_wwn[0], 8);
4986 5340
4987 5341 class3_param = (class_svc_param_t *)&acc.class_3;
4988 5342 class3_param->class_valid_svc_opt = 0x8000;
4989 5343 class3_param->recipient_ctl = tq->class3_recipient_ctl;
4990 5344 class3_param->rcv_data_size = tq->class3_rcv_data_size;
4991 5345 class3_param->conc_sequences = tq->class3_conc_sequences;
4992 5346 class3_param->open_sequences_per_exch =
4993 5347 tq->class3_open_sequences_per_exch;
4994 5348
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
4995 5349 if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
4996 5350 acc.ls_code.ls_code = LA_ELS_RJT;
4997 5351 pkt->pkt_state = FC_PKT_TRAN_BSY;
4998 5352 pkt->pkt_reason = FC_REASON_XCHG_BSY;
4999 5353 EL(ha, "LA_ELS_RJT, FC_REASON_XCHG_BSY\n");
5000 5354 rval = FC_TRAN_BUSY;
5001 5355 } else {
5002 5356 DEVICE_QUEUE_LOCK(tq);
5003 5357 tq->logout_sent = 0;
5004 5358 tq->flags &= ~TQF_NEED_AUTHENTICATION;
5005 - if (CFG_IST(ha, CFG_CTRL_242581)) {
5359 + if (CFG_IST(ha, CFG_IIDMA_SUPPORT)) {
5006 5360 tq->flags |= TQF_IIDMA_NEEDED;
5007 5361 }
5008 5362 DEVICE_QUEUE_UNLOCK(tq);
5009 5363
5010 - if (CFG_IST(ha, CFG_CTRL_242581)) {
5364 + if (CFG_IST(ha, CFG_IIDMA_SUPPORT)) {
5011 5365 TASK_DAEMON_LOCK(ha);
5012 5366 ha->task_daemon_flags |= TD_IIDMA_NEEDED;
5013 5367 TASK_DAEMON_UNLOCK(ha);
5014 5368 }
5015 5369
5016 5370 pkt->pkt_state = FC_PKT_SUCCESS;
5017 5371 }
5018 5372 } else {
5019 5373 /* Build RJT. */
5020 5374 acc.ls_code.ls_code = LA_ELS_RJT;
5021 5375
5022 5376 switch (ret) {
5023 5377 case QL_FUNCTION_TIMEOUT:
5024 5378 pkt->pkt_state = FC_PKT_TIMEOUT;
5025 5379 pkt->pkt_reason = FC_REASON_HW_ERROR;
5026 5380 break;
5027 5381
5028 5382 case QL_MEMORY_ALLOC_FAILED:
5029 5383 pkt->pkt_state = FC_PKT_LOCAL_BSY;
5030 5384 pkt->pkt_reason = FC_REASON_NOMEM;
5031 5385 rval = FC_TRAN_BUSY;
5032 5386 break;
5033 5387
5034 5388 case QL_FABRIC_NOT_INITIALIZED:
5035 5389 pkt->pkt_state = FC_PKT_FABRIC_BSY;
5036 5390 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5037 5391 rval = FC_TRAN_BUSY;
5038 5392 break;
5039 5393
5040 5394 default:
5041 5395 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5042 5396 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5043 5397 break;
5044 5398 }
5045 5399
5046 5400 EL(ha, "Plogi unsuccess for %xh state %xh reason %xh "
5047 5401 "ret %xh rval %xh\n", d_id.b24, pkt->pkt_state,
5048 5402 pkt->pkt_reason, ret, rval);
5049 5403 }
5050 5404
5051 5405 if (tq != NULL) {
5052 5406 DEVICE_QUEUE_LOCK(tq);
5053 5407 tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
5054 5408 if (rval == FC_TRAN_BUSY) {
5055 5409 if (tq->d_id.b24 != BROADCAST_ADDR) {
5056 5410 tq->flags |= TQF_NEED_AUTHENTICATION;
5057 5411 }
5058 5412 }
|
↓ open down ↓ |
38 lines elided |
↑ open up ↑ |
5059 5413 DEVICE_QUEUE_UNLOCK(tq);
5060 5414 }
5061 5415
5062 5416 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5063 5417 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5064 5418
5065 5419 if (rval != FC_SUCCESS) {
5066 5420 EL(ha, "failed, rval = %xh\n", rval);
5067 5421 } else {
5068 5422 /*EMPTY*/
5069 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5423 + QL_PRINT_3(ha, "done\n");
5070 5424 }
5071 5425 return (rval);
5072 5426 }
5073 5427
5074 5428 /*
5075 5429 * ql_p2p_plogi
5076 5430 * Start an extended link service port login request using
5077 5431 * an ELS Passthru iocb.
5078 5432 *
5079 5433 * Input:
5080 5434 * ha = adapter state pointer.
5081 5435 * pkt = pointer to fc_packet.
5082 5436 *
5083 5437 * Returns:
5084 5438 * QL_CONSUMMED - the iocb was queued for transport.
5085 5439 *
5086 5440 * Context:
5087 5441 * Kernel context.
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
5088 5442 */
5089 5443 static int
5090 5444 ql_p2p_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5091 5445 {
5092 5446 uint16_t id;
5093 5447 ql_tgt_t tmp;
5094 5448 ql_tgt_t *tq = &tmp;
5095 5449 int rval;
5096 5450 port_id_t d_id;
5097 5451 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
5452 + uint16_t loop_id;
5098 5453
5099 5454 tq->d_id.b.al_pa = 0;
5100 5455 tq->d_id.b.area = 0;
5101 5456 tq->d_id.b.domain = 0;
5102 5457
5103 5458 /*
5104 5459 * Verify that the port database hasn't moved beneath our feet by
5105 5460 * switching to the appropriate n_port_handle if necessary. This is
5106 5461 * less unplesant than the error recovery if the wrong one is used.
5107 5462 */
5108 5463 for (id = 0; id <= LAST_LOCAL_LOOP_ID; id++) {
5109 5464 tq->loop_id = id;
5110 5465 rval = ql_get_port_database(ha, tq, PDF_NONE);
5111 - EL(ha, "rval=%xh\n", rval);
5466 + EL(ha, "rval=%xh, id=%x\n", rval, id);
5112 5467 /* check all the ones not logged in for possible use */
5113 5468 if (rval == QL_NOT_LOGGED_IN) {
5114 5469 if (tq->master_state == PD_STATE_PLOGI_PENDING) {
5115 5470 ha->n_port->n_port_handle = tq->loop_id;
5116 - EL(ha, "n_port_handle =%xh, master state=%x\n",
5471 + EL(ha, "loop_id=%xh, master state=%x\n",
5117 5472 tq->loop_id, tq->master_state);
5118 5473 break;
5119 5474 }
5120 5475 /*
5121 5476 * Use a 'port unavailable' entry only
5122 5477 * if we used it before.
5123 5478 */
5124 5479 if (tq->master_state == PD_STATE_PORT_UNAVAILABLE) {
5125 5480 /* if the port_id matches, reuse it */
5126 5481 if (pkt->pkt_cmd_fhdr.d_id == tq->d_id.b24) {
5127 - EL(ha, "n_port_handle =%xh,"
5482 + EL(ha, "n_port_handle loop_id=%xh, "
5128 5483 "master state=%xh\n",
5129 5484 tq->loop_id, tq->master_state);
5130 5485 break;
5131 5486 } else if (tq->loop_id ==
5132 5487 ha->n_port->n_port_handle) {
5133 - // avoid a lint error
5488 + /* avoid a lint error */
5134 5489 uint16_t *hndl;
5135 5490 uint16_t val;
5136 5491
5137 5492 hndl = &ha->n_port->n_port_handle;
5138 5493 val = *hndl;
5139 5494 val++;
5140 5495 val++;
5141 5496 *hndl = val;
5142 5497 }
5143 - EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
5498 + EL(ha, "rval=%xh, id=%d, n_port_handle loop_id=%xh, "
5144 5499 "master state=%x\n", rval, id, tq->loop_id,
5145 5500 tq->master_state);
5146 5501 }
5147 5502
5148 5503 }
5149 5504 if (rval == QL_SUCCESS) {
5150 5505 if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
5151 5506 ha->n_port->n_port_handle = tq->loop_id;
5152 5507 EL(ha, "n_port_handle =%xh, master state=%x\n",
5153 5508 tq->loop_id, tq->master_state);
5154 5509 break;
5155 5510 }
5156 - EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
5511 + EL(ha, "rval=%xh, id=%d, n_port_handle loop_id=%xh, "
5157 5512 "master state=%x\n", rval, id, tq->loop_id,
5158 5513 tq->master_state);
5159 5514 }
5160 5515 }
5161 5516 (void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0, DDI_DMA_SYNC_FORDEV);
5162 5517
5163 5518 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5519 +
5520 + /*
5521 + * In case fw does not have the loop id ready, driver assume 0 is
5522 + * used since this is p2p and there is only one remote port.
5523 + */
5524 + if (id == LAST_LOCAL_LOOP_ID + 1) {
5525 + EL(ha, "out of range loop id; rval=%xh, id=%xh, d_id=%xh\n",
5526 + rval, id, d_id.b24);
5527 + } else {
5528 + EL(ha, "remote port loop_id '%x' has been logged in, d_id=%x\n",
5529 + id, d_id.b24);
5530 + }
5531 +
5164 5532 tq = ql_d_id_to_queue(ha, d_id);
5533 +
5534 + /*
5535 + * LV could use any d_id it likes.
5536 + * tq may not be available yet.
5537 + */
5538 + if (tq == NULL) {
5539 + if (id != LAST_LOCAL_LOOP_ID + 1) {
5540 + loop_id = id;
5541 + } else {
5542 + loop_id = 0;
5543 + }
5544 + /* Acquire adapter state lock. */
5545 + ADAPTER_STATE_LOCK(ha);
5546 +
5547 + tq = ql_dev_init(ha, d_id, loop_id);
5548 +
5549 + ADAPTER_STATE_UNLOCK(ha);
5550 + }
5551 +
5552 + /*
5553 + * Lun0 should always allocated since tq is
5554 + * derived from lun queue in ql_els_passthru_entry
5555 + * in the interrupt handler.
5556 + */
5557 + sp->lun_queue = ql_lun_queue(ha, tq, 0);
5558 +
5559 + DEVICE_QUEUE_LOCK(tq);
5165 5560 ql_timeout_insert(ha, tq, sp);
5561 + DEVICE_QUEUE_UNLOCK(tq);
5562 +
5166 5563 ql_start_iocb(ha, sp);
5167 5564
5168 5565 return (QL_CONSUMED);
5169 5566 }
5170 5567
5171 5568
5172 5569 /*
5173 5570 * ql_els_flogi
5174 5571 * Issue a extended link service fabric login request.
5175 5572 *
5176 5573 * Input:
5177 5574 * ha = adapter state pointer.
5178 5575 * pkt = pointer to fc_packet.
5179 5576 *
5180 5577 * Returns:
5181 5578 * FC_SUCCESS - the packet was accepted for transport.
5182 5579 * FC_TRANSPORT_ERROR - a transport error occurred.
5183 5580 *
5184 5581 * Context:
5185 5582 * Kernel context.
5186 5583 */
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
5187 5584 static int
5188 5585 ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5189 5586 {
5190 5587 ql_tgt_t *tq = NULL;
5191 5588 port_id_t d_id;
5192 5589 la_els_logi_t acc;
5193 5590 class_svc_param_t *class3_param;
5194 5591 int rval = FC_SUCCESS;
5195 5592 int accept = 0;
5196 5593
5197 - QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5198 - pkt->pkt_cmd_fhdr.d_id);
5594 + QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
5199 5595
5200 5596 bzero(&acc, sizeof (acc));
5201 5597 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5202 5598
5203 - if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
5599 + if (CFG_IST(ha, CFG_N2N_SUPPORT) && ha->topology & QL_N_PORT) {
5204 5600 /*
5205 5601 * d_id of zero in a FLOGI accept response in a point to point
5206 5602 * topology triggers evaluation of N Port login initiative.
5207 5603 */
5208 5604 pkt->pkt_resp_fhdr.d_id = 0;
5209 5605 /*
5210 5606 * An N_Port already logged in with the firmware
5211 5607 * will have the only database entry.
5212 5608 */
5213 5609 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
5214 5610 tq = ql_loop_id_to_queue(ha, ha->n_port->n_port_handle);
5215 5611 }
5216 5612
5217 5613 if (tq != NULL) {
5218 5614 /*
5219 5615 * If the target port has initiative send
5220 5616 * up a PLOGI about the new device.
5221 5617 */
5222 - if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
5223 - (la_wwn_t *)(CFG_IST(ha, CFG_CTRL_2425) ?
5224 - &ha->init_ctrl_blk.cb24.port_name[0] :
5225 - &ha->init_ctrl_blk.cb.port_name[0])) == 1)) {
5618 + if (ql_wwn_cmp(ha, (la_wwn_t *)tq->port_name,
5619 + (la_wwn_t *)ha->loginparams.nport_ww_name.raw_wwn)
5620 + == 1) {
5226 5621 ha->send_plogi_timer = 3;
5227 5622 } else {
5228 5623 ha->send_plogi_timer = 0;
5229 5624 }
5230 5625 pkt->pkt_resp_fhdr.s_id = tq->d_id.b24;
5231 5626 } else {
5232 5627 /*
5233 5628 * An N_Port not logged in with the firmware will not
5234 5629 * have a database entry. We accept anyway and rely
5235 5630 * on a PLOGI from the upper layers to set the d_id
5236 5631 * and s_id.
5237 5632 */
5238 5633 accept = 1;
5239 5634 }
5240 5635 } else {
5241 5636 tq = ql_d_id_to_queue(ha, d_id);
5242 5637 }
5243 5638 if ((tq != NULL) || (accept != NULL)) {
5244 5639 /* Build ACC. */
5245 5640 pkt->pkt_state = FC_PKT_SUCCESS;
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
5246 5641 class3_param = (class_svc_param_t *)&acc.class_3;
5247 5642
5248 5643 acc.ls_code.ls_code = LA_ELS_ACC;
5249 5644 acc.common_service.fcph_version = 0x2006;
5250 5645 if (ha->topology & QL_N_PORT) {
5251 5646 /* clear F_Port indicator */
5252 5647 acc.common_service.cmn_features = 0x0800;
5253 5648 } else {
5254 5649 acc.common_service.cmn_features = 0x1b00;
5255 5650 }
5256 - CFG_IST(ha, CFG_CTRL_24258081) ?
5257 - (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5258 - ha->init_ctrl_blk.cb24.max_frame_length[0],
5259 - ha->init_ctrl_blk.cb24.max_frame_length[1])) :
5260 - (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5261 - ha->init_ctrl_blk.cb.max_frame_length[0],
5262 - ha->init_ctrl_blk.cb.max_frame_length[1]));
5651 + acc.common_service.rx_bufsize =
5652 + ha->loginparams.common_service.rx_bufsize;
5263 5653 acc.common_service.conc_sequences = 0xff;
5264 5654 acc.common_service.relative_offset = 0x03;
5265 5655 acc.common_service.e_d_tov = 0x7d0;
5266 5656 if (accept) {
5267 5657 /* Use the saved N_Port WWNN and WWPN */
5268 5658 if (ha->n_port != NULL) {
5269 5659 bcopy((void *)&ha->n_port->port_name[0],
5270 5660 (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5271 5661 bcopy((void *)&ha->n_port->node_name[0],
5272 5662 (void *)&acc.node_ww_name.raw_wwn[0], 8);
5273 5663 /* mark service options invalid */
5274 5664 class3_param->class_valid_svc_opt = 0x0800;
5275 5665 } else {
5276 5666 EL(ha, "ha->n_port is NULL\n");
5277 5667 /* Build RJT. */
5278 5668 acc.ls_code.ls_code = LA_ELS_RJT;
5279 5669
5280 5670 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5281 5671 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5282 5672 }
5283 5673 } else {
5284 5674 bcopy((void *)&tq->port_name[0],
5285 5675 (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5286 5676 bcopy((void *)&tq->node_name[0],
5287 5677 (void *)&acc.node_ww_name.raw_wwn[0], 8);
5288 5678
5289 5679 class3_param = (class_svc_param_t *)&acc.class_3;
5290 5680 class3_param->class_valid_svc_opt = 0x8800;
5291 5681 class3_param->recipient_ctl = tq->class3_recipient_ctl;
5292 5682 class3_param->rcv_data_size = tq->class3_rcv_data_size;
5293 5683 class3_param->conc_sequences =
5294 5684 tq->class3_conc_sequences;
5295 5685 class3_param->open_sequences_per_exch =
5296 5686 tq->class3_open_sequences_per_exch;
5297 5687 }
5298 5688 } else {
5299 5689 /* Build RJT. */
5300 5690 acc.ls_code.ls_code = LA_ELS_RJT;
5301 5691
5302 5692 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5303 5693 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
|
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
5304 5694 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5305 5695 }
5306 5696
5307 5697 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5308 5698 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5309 5699
5310 5700 if (rval != FC_SUCCESS) {
5311 5701 EL(ha, "failed, rval = %xh\n", rval);
5312 5702 } else {
5313 5703 /*EMPTY*/
5314 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5704 + QL_PRINT_3(ha, "done\n");
5315 5705 }
5316 5706 return (rval);
5317 5707 }
5318 5708
5319 5709 /*
5320 5710 * ql_els_logo
5321 5711 * Issue a extended link service logout request.
5322 5712 *
5323 5713 * Input:
5324 5714 * ha = adapter state pointer.
5325 5715 * pkt = pointer to fc_packet.
5326 5716 *
5327 5717 * Returns:
5328 5718 * FC_SUCCESS - the packet was accepted for transport.
5329 5719 * FC_TRANSPORT_ERROR - a transport error occurred.
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
5330 5720 *
5331 5721 * Context:
5332 5722 * Kernel context.
5333 5723 */
5334 5724 static int
5335 5725 ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5336 5726 {
5337 5727 port_id_t d_id;
5338 5728 ql_tgt_t *tq;
5339 5729 la_els_logo_t acc;
5340 - int rval = FC_SUCCESS;
5341 5730
5342 - QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5343 - pkt->pkt_cmd_fhdr.d_id);
5731 + QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
5344 5732
5345 5733 bzero(&acc, sizeof (acc));
5346 5734 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5347 5735
5348 5736 tq = ql_d_id_to_queue(ha, d_id);
5349 5737 if (tq) {
5350 5738 DEVICE_QUEUE_LOCK(tq);
5351 5739 if (tq->d_id.b24 == BROADCAST_ADDR) {
5352 5740 DEVICE_QUEUE_UNLOCK(tq);
5353 5741 return (FC_SUCCESS);
5354 5742 }
5355 5743
5356 5744 tq->flags |= TQF_NEED_AUTHENTICATION;
5357 5745
5358 5746 do {
5359 5747 DEVICE_QUEUE_UNLOCK(tq);
5360 5748 (void) ql_abort_device(ha, tq, 1);
5361 5749
5362 5750 /*
5363 5751 * Wait for commands to drain in F/W (doesn't
5364 5752 * take more than a few milliseconds)
5365 5753 */
5366 5754 ql_delay(ha, 10000);
5367 5755
5368 5756 DEVICE_QUEUE_LOCK(tq);
5369 5757 } while (tq->outcnt);
5370 5758
5371 5759 DEVICE_QUEUE_UNLOCK(tq);
5372 5760 }
5373 5761
5374 5762 if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5375 5763 /* Build ACC. */
5376 5764 acc.ls_code.ls_code = LA_ELS_ACC;
5377 5765
5378 5766 pkt->pkt_state = FC_PKT_SUCCESS;
5379 5767 } else {
5380 5768 /* Build RJT. */
|
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
5381 5769 acc.ls_code.ls_code = LA_ELS_RJT;
5382 5770
5383 5771 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5384 5772 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5385 5773 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5386 5774 }
5387 5775
5388 5776 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5389 5777 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5390 5778
5391 - if (rval != FC_SUCCESS) {
5392 - EL(ha, "failed, rval = %xh\n", rval);
5393 - } else {
5394 - /*EMPTY*/
5395 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5396 - }
5397 - return (rval);
5779 + QL_PRINT_3(ha, "done\n");
5780 +
5781 + return (FC_SUCCESS);
5398 5782 }
5399 5783
5400 5784 /*
5401 5785 * ql_els_prli
5402 5786 * Issue a extended link service process login request.
5403 5787 *
5404 5788 * Input:
5405 5789 * ha = adapter state pointer.
5406 5790 * pkt = pointer to fc_packet.
5407 5791 *
5408 5792 * Returns:
5409 5793 * FC_SUCCESS - the packet was accepted for transport.
5410 5794 * FC_TRANSPORT_ERROR - a transport error occurred.
5411 5795 *
5412 5796 * Context:
5413 5797 * Kernel context.
5414 5798 */
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
5415 5799 static int
5416 5800 ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5417 5801 {
5418 5802 ql_tgt_t *tq;
5419 5803 port_id_t d_id;
5420 5804 la_els_prli_t acc;
5421 5805 prli_svc_param_t *param;
5422 5806 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
5423 5807 int rval = FC_SUCCESS;
5424 5808
5425 - QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5426 - pkt->pkt_cmd_fhdr.d_id);
5809 + QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
5427 5810
5428 5811 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5429 5812
5430 5813 tq = ql_d_id_to_queue(ha, d_id);
5431 5814 if (tq != NULL) {
5432 5815 (void) ql_get_port_database(ha, tq, PDF_NONE);
5433 5816
5434 5817 if ((ha->topology & QL_N_PORT) &&
5435 5818 (tq->master_state == PD_STATE_PLOGI_COMPLETED)) {
5819 +
5820 + /* always set lun_queue */
5821 + sp->lun_queue = ql_lun_queue(ha, tq, 0);
5822 +
5823 + DEVICE_QUEUE_LOCK(tq);
5436 5824 ql_timeout_insert(ha, tq, sp);
5825 + DEVICE_QUEUE_UNLOCK(tq);
5437 5826 ql_start_iocb(ha, sp);
5438 5827 rval = QL_CONSUMED;
5439 5828 } else {
5440 5829 /* Build ACC. */
5441 5830 bzero(&acc, sizeof (acc));
5442 5831 acc.ls_code = LA_ELS_ACC;
5443 5832 acc.page_length = 0x10;
5444 5833 acc.payload_length = tq->prli_payload_length;
5445 5834
5446 5835 param = (prli_svc_param_t *)&acc.service_params[0];
5447 5836 param->type = 0x08;
5448 5837 param->rsvd = 0x00;
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
5449 5838 param->process_assoc_flags = tq->prli_svc_param_word_0;
5450 5839 param->process_flags = tq->prli_svc_param_word_3;
5451 5840
5452 5841 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5453 5842 (uint8_t *)pkt->pkt_resp, sizeof (acc),
5454 5843 DDI_DEV_AUTOINCR);
5455 5844
5456 5845 pkt->pkt_state = FC_PKT_SUCCESS;
5457 5846 }
5458 5847 } else {
5459 - la_els_rjt_t rjt;
5848 + /* in case of P2P, tq might not have been created yet */
5849 + if (ha->topology & QL_N_PORT) {
5460 5850
5461 - /* Build RJT. */
5462 - bzero(&rjt, sizeof (rjt));
5463 - rjt.ls_code.ls_code = LA_ELS_RJT;
5851 + /* Acquire adapter state lock. */
5852 + ADAPTER_STATE_LOCK(ha);
5853 + tq = ql_dev_init(ha, d_id, ha->n_port->n_port_handle);
5854 + ADAPTER_STATE_UNLOCK(ha);
5464 5855
5465 - ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5466 - (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5856 + /* always alloc lun #0 */
5857 + sp->lun_queue = ql_lun_queue(ha, tq, 0);
5858 + bcopy((void *)&ha->n_port->port_name[0],
5859 + (void *) &tq->port_name[0], 8);
5860 + bcopy((void *)&ha->n_port->node_name[0],
5861 + (void *) &tq->node_name[0], 8);
5467 5862
5468 - pkt->pkt_state = FC_PKT_TRAN_ERROR;
5469 - pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5470 - EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5863 + DEVICE_QUEUE_LOCK(tq);
5864 + ql_timeout_insert(ha, tq, sp);
5865 + DEVICE_QUEUE_UNLOCK(tq);
5866 +
5867 + ql_start_iocb(ha, sp);
5868 + rval = QL_CONSUMED;
5869 +
5870 + } else {
5871 +
5872 + la_els_rjt_t rjt;
5873 +
5874 + /* Build RJT. */
5875 + bzero(&rjt, sizeof (rjt));
5876 + rjt.ls_code.ls_code = LA_ELS_RJT;
5877 +
5878 + ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5879 + (uint8_t *)pkt->pkt_resp, sizeof (rjt),
5880 + DDI_DEV_AUTOINCR);
5881 +
5882 + pkt->pkt_state = FC_PKT_TRAN_ERROR;
5883 + pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5884 + EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5885 + }
5471 5886 }
5472 5887
5473 5888 if ((rval != FC_SUCCESS) && (rval != QL_CONSUMED)) {
5474 5889 EL(ha, "failed, rval = %xh\n", rval);
5475 5890 } else {
5476 5891 /*EMPTY*/
5477 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5892 + QL_PRINT_3(ha, "done\n");
5478 5893 }
5479 5894 return (rval);
5480 5895 }
5481 5896
5482 5897 /*
5483 5898 * ql_els_prlo
5484 5899 * Issue a extended link service process logout request.
5485 5900 *
5486 5901 * Input:
5487 5902 * ha = adapter state pointer.
5488 5903 * pkt = pointer to fc_packet.
5489 5904 *
5490 5905 * Returns:
5491 5906 * FC_SUCCESS - the packet was accepted for transport.
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
5492 5907 * FC_TRANSPORT_ERROR - a transport error occurred.
5493 5908 *
5494 5909 * Context:
5495 5910 * Kernel context.
5496 5911 */
5497 5912 /* ARGSUSED */
5498 5913 static int
5499 5914 ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5500 5915 {
5501 5916 la_els_prli_t acc;
5502 - int rval = FC_SUCCESS;
5503 5917
5504 - QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5505 - pkt->pkt_cmd_fhdr.d_id);
5918 + QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
5506 5919
5507 5920 /* Build ACC. */
5508 5921 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5509 5922 (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5510 5923
5511 5924 acc.ls_code = LA_ELS_ACC;
5512 5925 acc.service_params[2] = 1;
5513 5926
5514 5927 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5515 5928 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5516 5929
5517 5930 pkt->pkt_state = FC_PKT_SUCCESS;
5518 5931
5519 - if (rval != FC_SUCCESS) {
5520 - EL(ha, "failed, rval = %xh\n", rval);
5521 - } else {
5522 - /*EMPTY*/
5523 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5524 - }
5525 - return (rval);
5932 + QL_PRINT_3(ha, "done\n");
5933 +
5934 + return (FC_SUCCESS);
5526 5935 }
5527 5936
5528 5937 /*
5529 5938 * ql_els_adisc
5530 5939 * Issue a extended link service address discovery request.
5531 5940 *
5532 5941 * Input:
5533 5942 * ha = adapter state pointer.
5534 5943 * pkt = pointer to fc_packet.
5535 5944 *
5536 5945 * Returns:
5537 5946 * FC_SUCCESS - the packet was accepted for transport.
5538 5947 * FC_TRANSPORT_ERROR - a transport error occurred.
5539 5948 *
5540 5949 * Context:
5541 5950 * Kernel context.
5542 5951 */
5543 5952 static int
5544 5953 ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
5545 5954 {
5546 5955 ql_dev_id_list_t *list;
5547 5956 uint32_t list_size;
5548 5957 ql_link_t *link;
5549 5958 ql_tgt_t *tq;
5550 5959 ql_lun_t *lq;
5551 5960 port_id_t d_id;
5552 5961 la_els_adisc_t acc;
5553 5962 uint16_t index, loop_id;
5554 5963 ql_mbx_data_t mr;
5555 - int rval = FC_SUCCESS;
5556 5964
5557 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5965 + QL_PRINT_3(ha, "started\n");
5558 5966
5559 5967 bzero(&acc, sizeof (acc));
5560 5968 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5561 5969
5562 5970 /*
5563 5971 * MBC_GET_PORT_DATABASE causes ADISC to go out to
5564 5972 * the device from the firmware
5565 5973 */
5566 5974 index = ql_alpa_to_index[d_id.b.al_pa];
5567 5975 tq = NULL;
5568 5976 for (link = ha->dev[index].first; link != NULL; link = link->next) {
5569 5977 tq = link->base_address;
5570 5978 if (tq->d_id.b24 == d_id.b24) {
5571 5979 break;
5572 5980 } else {
5573 5981 tq = NULL;
5574 5982 }
5575 5983 }
5576 5984
5577 5985 if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5578 5986 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
5579 5987 list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
5580 5988
5581 5989 if (list != NULL &&
5582 5990 ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
5583 5991 QL_SUCCESS) {
5584 5992
5585 5993 for (index = 0; index < mr.mb[1]; index++) {
5586 5994 ql_dev_list(ha, list, index, &d_id, &loop_id);
5587 5995
5588 5996 if (tq->d_id.b24 == d_id.b24) {
5589 5997 tq->loop_id = loop_id;
5590 5998 break;
5591 5999 }
5592 6000 }
5593 6001 } else {
5594 6002 cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
5595 6003 QL_NAME, ha->instance, d_id.b24);
5596 6004 tq = NULL;
5597 6005 }
5598 6006 if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5599 6007 cmn_err(CE_WARN, "!%s(%d) no loop_id for adisc %xh",
5600 6008 QL_NAME, ha->instance, tq->d_id.b24);
5601 6009 tq = NULL;
5602 6010 }
5603 6011
5604 6012 if (list != NULL) {
5605 6013 kmem_free(list, list_size);
5606 6014 }
5607 6015 }
5608 6016
5609 6017 if ((tq != NULL) && (VALID_DEVICE_ID(ha, tq->loop_id)) &&
5610 6018 ql_get_port_database(ha, tq, PDF_ADISC) == QL_SUCCESS) {
5611 6019
5612 6020 /* Build ACC. */
5613 6021
5614 6022 DEVICE_QUEUE_LOCK(tq);
5615 6023 tq->flags &= ~TQF_NEED_AUTHENTICATION;
5616 6024 if (tq->prli_svc_param_word_3 & PRLI_W3_RETRY) {
5617 6025 for (link = tq->lun_queues.first; link != NULL;
5618 6026 link = link->next) {
5619 6027 lq = link->base_address;
5620 6028
5621 6029 if (lq->cmd.first != NULL) {
5622 6030 ql_next(ha, lq);
5623 6031 DEVICE_QUEUE_LOCK(tq);
5624 6032 }
5625 6033 }
5626 6034 }
5627 6035 DEVICE_QUEUE_UNLOCK(tq);
5628 6036
5629 6037 acc.ls_code.ls_code = LA_ELS_ACC;
5630 6038 acc.hard_addr.hard_addr = tq->hard_addr.b24;
5631 6039
5632 6040 bcopy((void *)&tq->port_name[0],
5633 6041 (void *)&acc.port_wwn.raw_wwn[0], 8);
5634 6042 bcopy((void *)&tq->node_name[0],
5635 6043 (void *)&acc.node_wwn.raw_wwn[0], 8);
5636 6044
5637 6045 acc.nport_id.port_id = tq->d_id.b24;
5638 6046
5639 6047 pkt->pkt_state = FC_PKT_SUCCESS;
5640 6048 } else {
5641 6049 /* Build RJT. */
|
↓ open down ↓ |
74 lines elided |
↑ open up ↑ |
5642 6050 acc.ls_code.ls_code = LA_ELS_RJT;
5643 6051
5644 6052 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5645 6053 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5646 6054 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5647 6055 }
5648 6056
5649 6057 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5650 6058 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5651 6059
5652 - if (rval != FC_SUCCESS) {
5653 - EL(ha, "failed, rval = %xh\n", rval);
5654 - } else {
5655 - /*EMPTY*/
5656 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5657 - }
5658 - return (rval);
6060 + QL_PRINT_3(ha, "done\n");
6061 +
6062 + return (FC_SUCCESS);
5659 6063 }
5660 6064
5661 6065 /*
5662 6066 * ql_els_linit
5663 6067 * Issue a extended link service loop initialize request.
5664 6068 *
5665 6069 * Input:
5666 6070 * ha = adapter state pointer.
5667 6071 * pkt = pointer to fc_packet.
5668 6072 *
5669 6073 * Returns:
5670 6074 * FC_SUCCESS - the packet was accepted for transport.
5671 6075 * FC_TRANSPORT_ERROR - a transport error occurred.
5672 6076 *
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
5673 6077 * Context:
5674 6078 * Kernel context.
5675 6079 */
5676 6080 static int
5677 6081 ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
5678 6082 {
5679 6083 ddi_dma_cookie_t *cp;
5680 6084 uint32_t cnt;
5681 6085 conv_num_t n;
5682 6086 port_id_t d_id;
5683 - int rval = FC_SUCCESS;
5684 6087
5685 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6088 + QL_PRINT_3(ha, "started\n");
5686 6089
5687 6090 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5688 - if (ha->topology & QL_SNS_CONNECTION) {
6091 + if (ha->topology & QL_FABRIC_CONNECTION) {
5689 6092 fc_linit_req_t els;
5690 6093 lfa_cmd_t lfa;
5691 6094
5692 6095 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5693 6096 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5694 6097
5695 6098 /* Setup LFA mailbox command data. */
5696 6099 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5697 6100
5698 6101 lfa.resp_buffer_length[0] = 4;
5699 6102
5700 6103 cp = pkt->pkt_resp_cookie;
5701 6104 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5702 6105 n.size64 = (uint64_t)cp->dmac_laddress;
5703 6106 LITTLE_ENDIAN_64(&n.size64);
5704 6107 } else {
5705 6108 n.size32[0] = LSD(cp->dmac_laddress);
5706 6109 LITTLE_ENDIAN_32(&n.size32[0]);
5707 6110 n.size32[1] = MSD(cp->dmac_laddress);
5708 6111 LITTLE_ENDIAN_32(&n.size32[1]);
5709 6112 }
5710 6113
5711 6114 /* Set buffer address. */
5712 6115 for (cnt = 0; cnt < 8; cnt++) {
5713 6116 lfa.resp_buffer_address[cnt] = n.size8[cnt];
5714 6117 }
5715 6118
5716 6119 lfa.subcommand_length[0] = 4;
5717 6120 n.size32[0] = d_id.b24;
5718 6121 LITTLE_ENDIAN_32(&n.size32[0]);
5719 6122 lfa.addr[0] = n.size8[0];
5720 6123 lfa.addr[1] = n.size8[1];
5721 6124 lfa.addr[2] = n.size8[2];
5722 6125 lfa.subcommand[1] = 0x70;
5723 6126 lfa.payload[2] = els.func;
5724 6127 lfa.payload[4] = els.lip_b3;
5725 6128 lfa.payload[5] = els.lip_b4;
5726 6129
5727 6130 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5728 6131 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5729 6132 } else {
5730 6133 pkt->pkt_state = FC_PKT_SUCCESS;
5731 6134 }
5732 6135 } else {
5733 6136 fc_linit_resp_t rjt;
5734 6137
5735 6138 /* Build RJT. */
5736 6139 bzero(&rjt, sizeof (rjt));
|
↓ open down ↓ |
38 lines elided |
↑ open up ↑ |
5737 6140 rjt.ls_code.ls_code = LA_ELS_RJT;
5738 6141
5739 6142 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5740 6143 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5741 6144
5742 6145 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5743 6146 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5744 6147 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5745 6148 }
5746 6149
5747 - if (rval != FC_SUCCESS) {
5748 - EL(ha, "failed, rval = %xh\n", rval);
5749 - } else {
5750 - /*EMPTY*/
5751 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5752 - }
5753 - return (rval);
6150 + QL_PRINT_3(ha, "done\n");
6151 +
6152 + return (FC_SUCCESS);
5754 6153 }
5755 6154
5756 6155 /*
5757 6156 * ql_els_lpc
5758 6157 * Issue a extended link service loop control request.
5759 6158 *
5760 6159 * Input:
5761 6160 * ha = adapter state pointer.
5762 6161 * pkt = pointer to fc_packet.
5763 6162 *
5764 6163 * Returns:
5765 6164 * FC_SUCCESS - the packet was accepted for transport.
5766 6165 * FC_TRANSPORT_ERROR - a transport error occurred.
5767 6166 *
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
5768 6167 * Context:
5769 6168 * Kernel context.
5770 6169 */
5771 6170 static int
5772 6171 ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5773 6172 {
5774 6173 ddi_dma_cookie_t *cp;
5775 6174 uint32_t cnt;
5776 6175 conv_num_t n;
5777 6176 port_id_t d_id;
5778 - int rval = FC_SUCCESS;
5779 6177
5780 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6178 + QL_PRINT_3(ha, "started\n");
5781 6179
5782 6180 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5783 - if (ha->topology & QL_SNS_CONNECTION) {
6181 + if (ha->topology & QL_FABRIC_CONNECTION) {
5784 6182 ql_lpc_t els;
5785 6183 lfa_cmd_t lfa;
5786 6184
5787 6185 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5788 6186 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5789 6187
5790 6188 /* Setup LFA mailbox command data. */
5791 6189 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5792 6190
5793 6191 lfa.resp_buffer_length[0] = 4;
5794 6192
5795 6193 cp = pkt->pkt_resp_cookie;
5796 6194 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5797 6195 n.size64 = (uint64_t)(cp->dmac_laddress);
5798 6196 LITTLE_ENDIAN_64(&n.size64);
5799 6197 } else {
5800 6198 n.size32[0] = cp->dmac_address;
5801 6199 LITTLE_ENDIAN_32(&n.size32[0]);
5802 6200 n.size32[1] = 0;
5803 6201 }
5804 6202
5805 6203 /* Set buffer address. */
5806 6204 for (cnt = 0; cnt < 8; cnt++) {
5807 6205 lfa.resp_buffer_address[cnt] = n.size8[cnt];
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
5808 6206 }
5809 6207
5810 6208 lfa.subcommand_length[0] = 20;
5811 6209 n.size32[0] = d_id.b24;
5812 6210 LITTLE_ENDIAN_32(&n.size32[0]);
5813 6211 lfa.addr[0] = n.size8[0];
5814 6212 lfa.addr[1] = n.size8[1];
5815 6213 lfa.addr[2] = n.size8[2];
5816 6214 lfa.subcommand[1] = 0x71;
5817 6215 lfa.payload[4] = els.port_control;
5818 - bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 32);
6216 + bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 16);
5819 6217
5820 6218 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5821 6219 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5822 6220 } else {
5823 6221 pkt->pkt_state = FC_PKT_SUCCESS;
5824 6222 }
5825 6223 } else {
5826 6224 ql_lpc_resp_t rjt;
5827 6225
5828 6226 /* Build RJT. */
5829 6227 bzero(&rjt, sizeof (rjt));
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
5830 6228 rjt.ls_code.ls_code = LA_ELS_RJT;
5831 6229
5832 6230 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5833 6231 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5834 6232
5835 6233 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5836 6234 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5837 6235 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5838 6236 }
5839 6237
5840 - if (rval != FC_SUCCESS) {
5841 - EL(ha, "failed, rval = %xh\n", rval);
5842 - } else {
5843 - /*EMPTY*/
5844 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5845 - }
5846 - return (rval);
6238 + QL_PRINT_3(ha, "done\n");
6239 +
6240 + return (FC_SUCCESS);
5847 6241 }
5848 6242
5849 6243 /*
5850 6244 * ql_els_lsts
5851 6245 * Issue a extended link service loop status request.
5852 6246 *
5853 6247 * Input:
5854 6248 * ha = adapter state pointer.
5855 6249 * pkt = pointer to fc_packet.
5856 6250 *
5857 6251 * Returns:
5858 6252 * FC_SUCCESS - the packet was accepted for transport.
5859 6253 * FC_TRANSPORT_ERROR - a transport error occurred.
5860 6254 *
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
5861 6255 * Context:
5862 6256 * Kernel context.
5863 6257 */
5864 6258 static int
5865 6259 ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
5866 6260 {
5867 6261 ddi_dma_cookie_t *cp;
5868 6262 uint32_t cnt;
5869 6263 conv_num_t n;
5870 6264 port_id_t d_id;
5871 - int rval = FC_SUCCESS;
5872 6265
5873 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6266 + QL_PRINT_3(ha, "started\n");
5874 6267
5875 6268 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5876 - if (ha->topology & QL_SNS_CONNECTION) {
6269 + if (ha->topology & QL_FABRIC_CONNECTION) {
5877 6270 fc_lsts_req_t els;
5878 6271 lfa_cmd_t lfa;
5879 6272
5880 6273 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5881 6274 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5882 6275
5883 6276 /* Setup LFA mailbox command data. */
5884 6277 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5885 6278
5886 6279 lfa.resp_buffer_length[0] = 84;
5887 6280
5888 6281 cp = pkt->pkt_resp_cookie;
5889 6282 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5890 6283 n.size64 = cp->dmac_laddress;
5891 6284 LITTLE_ENDIAN_64(&n.size64);
5892 6285 } else {
5893 6286 n.size32[0] = cp->dmac_address;
5894 6287 LITTLE_ENDIAN_32(&n.size32[0]);
5895 6288 n.size32[1] = 0;
5896 6289 }
5897 6290
5898 6291 /* Set buffer address. */
5899 6292 for (cnt = 0; cnt < 8; cnt++) {
5900 6293 lfa.resp_buffer_address[cnt] = n.size8[cnt];
5901 6294 }
5902 6295
5903 6296 lfa.subcommand_length[0] = 2;
5904 6297 n.size32[0] = d_id.b24;
5905 6298 LITTLE_ENDIAN_32(&n.size32[0]);
5906 6299 lfa.addr[0] = n.size8[0];
5907 6300 lfa.addr[1] = n.size8[1];
5908 6301 lfa.addr[2] = n.size8[2];
5909 6302 lfa.subcommand[1] = 0x72;
5910 6303
5911 6304 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5912 6305 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5913 6306 } else {
5914 6307 pkt->pkt_state = FC_PKT_SUCCESS;
5915 6308 }
5916 6309 } else {
5917 6310 fc_lsts_resp_t rjt;
5918 6311
5919 6312 /* Build RJT. */
5920 6313 bzero(&rjt, sizeof (rjt));
|
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
5921 6314 rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
5922 6315
5923 6316 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5924 6317 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5925 6318
5926 6319 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5927 6320 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5928 6321 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5929 6322 }
5930 6323
5931 - if (rval != FC_SUCCESS) {
5932 - EL(ha, "failed=%xh\n", rval);
5933 - } else {
5934 - /*EMPTY*/
5935 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5936 - }
5937 - return (rval);
6324 + QL_PRINT_3(ha, "done\n");
6325 +
6326 + return (FC_SUCCESS);
5938 6327 }
5939 6328
5940 6329 /*
5941 6330 * ql_els_scr
5942 6331 * Issue a extended link service state change registration request.
5943 6332 *
5944 6333 * Input:
5945 6334 * ha = adapter state pointer.
5946 6335 * pkt = pointer to fc_packet.
5947 6336 *
5948 6337 * Returns:
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
5949 6338 * FC_SUCCESS - the packet was accepted for transport.
5950 6339 * FC_TRANSPORT_ERROR - a transport error occurred.
5951 6340 *
5952 6341 * Context:
5953 6342 * Kernel context.
5954 6343 */
5955 6344 static int
5956 6345 ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
5957 6346 {
5958 6347 fc_scr_resp_t acc;
5959 - int rval = FC_SUCCESS;
5960 6348
5961 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6349 + QL_PRINT_3(ha, "started\n");
5962 6350
5963 6351 bzero(&acc, sizeof (acc));
5964 - if (ha->topology & QL_SNS_CONNECTION) {
6352 + if (ha->topology & QL_FABRIC_CONNECTION) {
5965 6353 fc_scr_req_t els;
5966 6354
5967 6355 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5968 6356 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5969 6357
5970 6358 if (ql_send_change_request(ha, els.scr_func) ==
5971 6359 QL_SUCCESS) {
5972 6360 /* Build ACC. */
5973 6361 acc.scr_acc = LA_ELS_ACC;
5974 6362
5975 6363 pkt->pkt_state = FC_PKT_SUCCESS;
5976 6364 } else {
5977 6365 /* Build RJT. */
5978 6366 acc.scr_acc = LA_ELS_RJT;
5979 6367
5980 6368 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5981 6369 pkt->pkt_reason = FC_REASON_HW_ERROR;
5982 6370 EL(ha, "LA_ELS_RJT, FC_REASON_HW_ERROR\n");
5983 6371 }
5984 6372 } else {
5985 6373 /* Build RJT. */
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
5986 6374 acc.scr_acc = LA_ELS_RJT;
5987 6375
5988 6376 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5989 6377 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5990 6378 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5991 6379 }
5992 6380
5993 6381 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5994 6382 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5995 6383
5996 - if (rval != FC_SUCCESS) {
5997 - EL(ha, "failed, rval = %xh\n", rval);
5998 - } else {
5999 - /*EMPTY*/
6000 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6001 - }
6002 - return (rval);
6384 + QL_PRINT_3(ha, "done\n");
6385 +
6386 + return (FC_SUCCESS);
6003 6387 }
6004 6388
6005 6389 /*
6006 6390 * ql_els_rscn
6007 6391 * Issue a extended link service register state
6008 6392 * change notification request.
6009 6393 *
6010 6394 * Input:
6011 6395 * ha = adapter state pointer.
6012 6396 * pkt = pointer to fc_packet.
6013 6397 *
6014 6398 * Returns:
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
6015 6399 * FC_SUCCESS - the packet was accepted for transport.
6016 6400 * FC_TRANSPORT_ERROR - a transport error occurred.
6017 6401 *
6018 6402 * Context:
6019 6403 * Kernel context.
6020 6404 */
6021 6405 static int
6022 6406 ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
6023 6407 {
6024 6408 ql_rscn_resp_t acc;
6025 - int rval = FC_SUCCESS;
6026 6409
6027 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6410 + QL_PRINT_3(ha, "started\n");
6028 6411
6029 6412 bzero(&acc, sizeof (acc));
6030 - if (ha->topology & QL_SNS_CONNECTION) {
6413 + if (ha->topology & QL_FABRIC_CONNECTION) {
6031 6414 /* Build ACC. */
6032 6415 acc.scr_acc = LA_ELS_ACC;
6033 6416
6034 6417 pkt->pkt_state = FC_PKT_SUCCESS;
6035 6418 } else {
6036 6419 /* Build RJT. */
6037 6420 acc.scr_acc = LA_ELS_RJT;
6038 6421
6039 6422 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6040 6423 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6041 6424 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6042 6425 }
6043 6426
6044 6427 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6045 6428 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6046 6429
6047 - if (rval != FC_SUCCESS) {
6048 - EL(ha, "failed, rval = %xh\n", rval);
6049 - } else {
6050 - /*EMPTY*/
6051 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6052 - }
6053 - return (rval);
6430 + QL_PRINT_3(ha, "done\n");
6431 +
6432 + return (FC_SUCCESS);
6054 6433 }
6055 6434
6056 6435 /*
6057 6436 * ql_els_farp_req
6058 6437 * Issue FC Address Resolution Protocol (FARP)
6059 6438 * extended link service request.
6060 6439 *
6061 6440 * Note: not supported.
6062 6441 *
6063 6442 * Input:
6064 6443 * ha = adapter state pointer.
6065 6444 * pkt = pointer to fc_packet.
6066 6445 *
6067 6446 * Returns:
6068 6447 * FC_SUCCESS - the packet was accepted for transport.
6069 6448 * FC_TRANSPORT_ERROR - a transport error occurred.
6070 6449 *
6071 6450 * Context:
6072 6451 * Kernel context.
6073 6452 */
6453 +/* ARGSUSED */
6074 6454 static int
6075 6455 ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
6076 6456 {
6077 6457 ql_acc_rjt_t acc;
6078 - int rval = FC_SUCCESS;
6079 6458
6080 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6459 + QL_PRINT_3(ha, "started\n");
6081 6460
6082 6461 bzero(&acc, sizeof (acc));
6083 6462
6084 6463 /* Build ACC. */
6085 6464 acc.ls_code.ls_code = LA_ELS_ACC;
6086 6465
6087 6466 pkt->pkt_state = FC_PKT_SUCCESS;
6088 6467
6089 6468 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6090 6469 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6091 6470
6092 - if (rval != FC_SUCCESS) {
6093 - EL(ha, "failed, rval = %xh\n", rval);
6094 - } else {
6095 - /*EMPTY*/
6096 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6097 - }
6098 - return (rval);
6471 + QL_PRINT_3(ha, "done\n");
6472 +
6473 + return (FC_SUCCESS);
6099 6474 }
6100 6475
6101 6476 /*
6102 6477 * ql_els_farp_reply
6103 6478 * Issue FC Address Resolution Protocol (FARP)
6104 6479 * extended link service reply.
6105 6480 *
6106 6481 * Note: not supported.
6107 6482 *
6108 6483 * Input:
6109 6484 * ha = adapter state pointer.
6110 6485 * pkt = pointer to fc_packet.
6111 6486 *
6112 6487 * Returns:
6113 6488 * FC_SUCCESS - the packet was accepted for transport.
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
6114 6489 * FC_TRANSPORT_ERROR - a transport error occurred.
6115 6490 *
6116 6491 * Context:
6117 6492 * Kernel context.
6118 6493 */
6119 6494 /* ARGSUSED */
6120 6495 static int
6121 6496 ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
6122 6497 {
6123 6498 ql_acc_rjt_t acc;
6124 - int rval = FC_SUCCESS;
6125 6499
6126 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6500 + QL_PRINT_3(ha, "started\n");
6127 6501
6128 6502 bzero(&acc, sizeof (acc));
6129 6503
6130 6504 /* Build ACC. */
6131 6505 acc.ls_code.ls_code = LA_ELS_ACC;
6132 6506
6133 6507 pkt->pkt_state = FC_PKT_SUCCESS;
6134 6508
6135 6509 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6136 6510 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6137 6511
6138 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6512 + QL_PRINT_3(ha, "done\n");
6139 6513
6140 - return (rval);
6514 + return (FC_SUCCESS);
6141 6515 }
6142 6516
6143 6517 static int
6144 6518 ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
6145 6519 {
6146 6520 uchar_t *rnid_acc;
6147 6521 port_id_t d_id;
6148 6522 ql_link_t *link;
6149 6523 ql_tgt_t *tq;
6150 6524 uint16_t index;
6151 6525 la_els_rnid_acc_t acc;
6152 6526 la_els_rnid_t *req;
6153 6527 size_t req_len;
6154 6528
6155 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6529 + QL_PRINT_3(ha, "started\n");
6156 6530
6157 - req_len = FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
6531 + req_len = FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
6158 6532 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6159 6533 index = ql_alpa_to_index[d_id.b.al_pa];
6160 6534
6161 6535 tq = NULL;
6162 6536 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6163 6537 tq = link->base_address;
6164 6538 if (tq->d_id.b24 == d_id.b24) {
6165 6539 break;
6166 6540 } else {
6167 6541 tq = NULL;
6168 6542 }
6169 6543 }
6170 6544
6171 6545 /* Allocate memory for rnid status block */
6172 6546 rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
6173 6547
6174 6548 bzero(&acc, sizeof (acc));
6175 6549
6176 6550 req = (la_els_rnid_t *)pkt->pkt_cmd;
6177 6551 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6178 6552 (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
6179 6553 (caddr_t)rnid_acc) != QL_SUCCESS)) {
6180 6554
6181 6555 kmem_free(rnid_acc, req_len);
6182 6556 acc.ls_code.ls_code = LA_ELS_RJT;
6183 6557
6184 6558 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
6185 6559 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6186 6560
6187 6561 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6188 6562 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6189 6563 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6190 6564
6191 6565 return (FC_FAILURE);
6192 6566 }
6193 6567
6194 6568 acc.ls_code.ls_code = LA_ELS_ACC;
6195 - bcopy(rnid_acc, &acc.hdr, req_len);
6569 + bcopy(rnid_acc, &acc.hdr, sizeof (fc_rnid_hdr_t));
6196 6570 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6197 6571 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6198 6572
6199 6573 kmem_free(rnid_acc, req_len);
6200 6574 pkt->pkt_state = FC_PKT_SUCCESS;
6201 6575
6202 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6576 + QL_PRINT_3(ha, "done\n");
6203 6577
6204 6578 return (FC_SUCCESS);
6205 6579 }
6206 6580
6207 6581 static int
6208 6582 ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
6209 6583 {
6210 6584 fc_rls_acc_t *rls_acc;
6211 6585 port_id_t d_id;
6212 6586 ql_link_t *link;
6213 6587 ql_tgt_t *tq;
6214 6588 uint16_t index;
6215 6589 la_els_rls_acc_t acc;
6216 6590
6217 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6591 + QL_PRINT_3(ha, "started\n");
6218 6592
6219 6593 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6220 6594 index = ql_alpa_to_index[d_id.b.al_pa];
6221 6595
6222 6596 tq = NULL;
6223 6597 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6224 6598 tq = link->base_address;
6225 6599 if (tq->d_id.b24 == d_id.b24) {
6226 6600 break;
6227 6601 } else {
6228 6602 tq = NULL;
6229 6603 }
6230 6604 }
6231 6605
6232 6606 /* Allocate memory for link error status block */
6233 6607 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
6234 6608
6235 6609 bzero(&acc, sizeof (la_els_rls_acc_t));
6236 6610
6237 6611 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6238 6612 (ql_get_link_status(ha, tq->loop_id, sizeof (*rls_acc),
6239 6613 (caddr_t)rls_acc, 0) != QL_SUCCESS)) {
6240 6614
6241 6615 kmem_free(rls_acc, sizeof (*rls_acc));
6242 6616 acc.ls_code.ls_code = LA_ELS_RJT;
6243 6617
6244 6618 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6245 6619 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6246 6620
6247 6621 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6248 6622 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6249 6623 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6250 6624
6251 6625 return (FC_FAILURE);
6252 6626 }
|
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
6253 6627
6254 6628 LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
6255 6629 LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
6256 6630 LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
6257 6631 LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
6258 6632 LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
6259 6633
6260 6634 acc.ls_code.ls_code = LA_ELS_ACC;
6261 6635 acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
6262 6636 acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
6263 - acc.rls_link_params.rls_sig_loss = rls_acc->rls_sig_loss;
6637 + acc.rls_link_params.rls_sig_loss = rls_acc->rls_sig_loss;
6264 6638 acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
6265 6639 acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
6266 6640 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6267 6641 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6268 6642
6269 6643 kmem_free(rls_acc, sizeof (*rls_acc));
6270 6644 pkt->pkt_state = FC_PKT_SUCCESS;
6271 6645
6272 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6646 + QL_PRINT_3(ha, "done\n");
6273 6647
6274 6648 return (FC_SUCCESS);
6275 6649 }
6276 6650
6277 6651 static int
6278 6652 ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
6279 6653 {
6280 6654 port_id_t d_id;
6281 6655 ql_srb_t *sp;
6282 - fc_unsol_buf_t *ubp;
6656 + fc_unsol_buf_t *ubp;
6283 6657 ql_link_t *link, *next_link;
6284 6658 int rval = FC_SUCCESS;
6285 6659 int cnt = 5;
6286 6660
6287 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6661 + QL_PRINT_3(ha, "started\n");
6288 6662
6289 6663 /*
6290 6664 * we need to ensure that q->outcnt == 0, otherwise
6291 6665 * any cmd completed with PKT_PORT_OFFLINE after PLOGI
6292 6666 * will confuse ulps.
6293 6667 */
6294 6668
6295 6669 DEVICE_QUEUE_LOCK(tq);
6296 6670 do {
6297 6671 /*
6298 6672 * wait for the cmds to get drained. If they
6299 6673 * don't get drained then the transport will
6300 6674 * retry PLOGI after few secs.
6301 6675 */
6302 6676 if (tq->outcnt != 0) {
6303 6677 rval = FC_TRAN_BUSY;
6304 6678 DEVICE_QUEUE_UNLOCK(tq);
6305 6679 ql_delay(ha, 10000);
6306 6680 DEVICE_QUEUE_LOCK(tq);
6307 6681 cnt--;
6308 6682 if (!cnt) {
6309 6683 cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
6310 6684 " for %xh outcount %xh", QL_NAME,
6311 6685 ha->instance, tq->d_id.b24, tq->outcnt);
6312 6686 }
6313 6687 } else {
6314 6688 rval = FC_SUCCESS;
6315 6689 break;
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
6316 6690 }
6317 6691 } while (cnt > 0);
6318 6692 DEVICE_QUEUE_UNLOCK(tq);
6319 6693
6320 6694 /*
6321 6695 * return, if busy or if the plogi was asynchronous.
6322 6696 */
6323 6697 if ((rval != FC_SUCCESS) ||
6324 6698 (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
6325 6699 pkt->pkt_comp)) {
6326 - QL_PRINT_3(CE_CONT, "(%d): done, busy or async\n",
6327 - ha->instance);
6700 + QL_PRINT_3(ha, "done, busy or async\n");
6328 6701 return (rval);
6329 6702 }
6330 6703
6331 6704 /*
6332 6705 * Let us give daemon sufficient time and hopefully
6333 6706 * when transport retries PLOGI, it would have flushed
6334 6707 * callback queue.
6335 6708 */
6336 6709 TASK_DAEMON_LOCK(ha);
6337 - for (link = ha->callback_queue.first; link != NULL;
6710 + for (link = ha->unsol_callback_queue.first; link != NULL;
6338 6711 link = next_link) {
6339 6712 next_link = link->next;
6340 6713 sp = link->base_address;
6341 6714 if (sp->flags & SRB_UB_CALLBACK) {
6342 6715 ubp = ha->ub_array[sp->handle];
6343 6716 d_id.b24 = ubp->ub_frame.s_id;
6344 6717 } else {
6345 6718 d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
6346 6719 }
6347 6720 if (tq->d_id.b24 == d_id.b24) {
6348 6721 cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
6349 6722 ha->instance, tq->d_id.b24);
6350 6723 rval = FC_TRAN_BUSY;
6351 6724 break;
6352 6725 }
6353 6726 }
6354 6727 TASK_DAEMON_UNLOCK(ha);
6355 6728
6356 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6729 + QL_PRINT_3(ha, "done\n");
6357 6730
6358 6731 return (rval);
6359 6732 }
6360 6733
6361 6734 /*
6362 6735 * ql_login_port
6363 6736 * Logs in a device if not already logged in.
6364 6737 *
6365 6738 * Input:
6366 6739 * ha = adapter state pointer.
6367 6740 * d_id = 24 bit port ID.
6368 6741 * DEVICE_QUEUE_LOCK must be released.
6369 6742 *
6370 6743 * Returns:
6371 6744 * QL local function return status code.
6372 6745 *
6373 6746 * Context:
6374 6747 * Kernel context.
6375 6748 */
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
6376 6749 static int
6377 6750 ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6378 6751 {
6379 6752 ql_adapter_state_t *vha;
6380 6753 ql_link_t *link;
6381 6754 uint16_t index;
6382 6755 ql_tgt_t *tq, *tq2;
6383 6756 uint16_t loop_id, first_loop_id, last_loop_id;
6384 6757 int rval = QL_SUCCESS;
6385 6758
6386 - QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6387 - d_id.b24);
6759 + QL_PRINT_3(ha, "started, d_id=%xh\n", d_id.b24);
6388 6760
6761 + /* Do not login vports */
6762 + for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6763 + if (vha->d_id.b24 == d_id.b24) {
6764 + EL(ha, "failed=%xh, d_id=%xh vp_index=%xh\n",
6765 + QL_FUNCTION_FAILED, d_id.b24, vha->vp_index);
6766 + return (QL_FUNCTION_FAILED);
6767 + }
6768 + }
6769 +
6389 6770 /* Get head queue index. */
6390 6771 index = ql_alpa_to_index[d_id.b.al_pa];
6391 6772
6392 6773 /* Check for device already has a queue. */
6393 6774 tq = NULL;
6394 6775 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6395 6776 tq = link->base_address;
6396 6777 if (tq->d_id.b24 == d_id.b24) {
6397 6778 loop_id = tq->loop_id;
6398 6779 break;
6399 6780 } else {
6400 6781 tq = NULL;
6401 6782 }
6402 6783 }
6403 6784
6404 6785 /* Let's stop issuing any IO and unsolicited logo */
6405 6786 if ((tq != NULL) && (!(ddi_in_panic()))) {
6406 6787 DEVICE_QUEUE_LOCK(tq);
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
6407 6788 tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6408 6789 tq->flags &= ~TQF_RSCN_RCVD;
6409 6790 DEVICE_QUEUE_UNLOCK(tq);
6410 6791 }
6411 6792 if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6412 6793 !(tq->flags & TQF_FABRIC_DEVICE)) {
6413 6794 loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6414 6795 }
6415 6796
6416 6797 /* Special case for Nameserver */
6417 - if (d_id.b24 == 0xFFFFFC) {
6418 - loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
6798 + if (d_id.b24 == FS_NAME_SERVER) {
6799 + if (!(ha->topology & QL_FABRIC_CONNECTION)) {
6800 + EL(ha, "failed=%xh, d_id=%xh no fabric\n",
6801 + QL_FUNCTION_FAILED, d_id.b24);
6802 + return (QL_FUNCTION_FAILED);
6803 + }
6804 +
6805 + loop_id = (uint16_t)(CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
6419 6806 SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6420 6807 if (tq == NULL) {
6421 6808 ADAPTER_STATE_LOCK(ha);
6422 6809 tq = ql_dev_init(ha, d_id, loop_id);
6423 6810 ADAPTER_STATE_UNLOCK(ha);
6424 6811 if (tq == NULL) {
6425 6812 EL(ha, "failed=%xh, d_id=%xh\n",
6426 6813 QL_FUNCTION_FAILED, d_id.b24);
6427 6814 return (QL_FUNCTION_FAILED);
6428 6815 }
6429 6816 }
6430 - if (!(CFG_IST(ha, CFG_CTRL_8021))) {
6817 + if (!(CFG_IST(ha, CFG_CTRL_82XX))) {
6431 6818 rval = ql_login_fabric_port(ha, tq, loop_id);
6432 6819 if (rval == QL_SUCCESS) {
6433 6820 tq->loop_id = loop_id;
6434 6821 tq->flags |= TQF_FABRIC_DEVICE;
6435 6822 (void) ql_get_port_database(ha, tq, PDF_NONE);
6436 6823 }
6437 - } else {
6438 - ha->topology = (uint8_t)
6439 - (ha->topology | QL_SNS_CONNECTION);
6440 6824 }
6441 6825 /* Check for device already logged in. */
6442 6826 } else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6443 6827 if (tq->flags & TQF_FABRIC_DEVICE) {
6444 6828 rval = ql_login_fabric_port(ha, tq, loop_id);
6445 6829 if (rval == QL_PORT_ID_USED) {
6446 6830 rval = QL_SUCCESS;
6447 6831 }
6448 6832 } else if (LOCAL_LOOP_ID(loop_id)) {
6449 6833 rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6450 6834 (tq->flags & TQF_INITIATOR_DEVICE ?
6451 6835 LLF_NONE : LLF_PLOGI));
6452 6836 if (rval == QL_SUCCESS) {
6453 6837 DEVICE_QUEUE_LOCK(tq);
6454 6838 tq->loop_id = loop_id;
6455 6839 DEVICE_QUEUE_UNLOCK(tq);
6456 6840 }
6457 6841 }
6458 - } else if (ha->topology & QL_SNS_CONNECTION) {
6842 + } else if (ha->topology & QL_FABRIC_CONNECTION) {
6459 6843 /* Locate unused loop ID. */
6460 - if (CFG_IST(ha, CFG_CTRL_24258081)) {
6844 + if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
6461 6845 first_loop_id = 0;
6462 6846 last_loop_id = LAST_N_PORT_HDL;
6463 6847 } else if (ha->topology & QL_F_PORT) {
6464 6848 first_loop_id = 0;
6465 6849 last_loop_id = SNS_LAST_LOOP_ID;
6466 6850 } else {
6467 6851 first_loop_id = SNS_FIRST_LOOP_ID;
6468 6852 last_loop_id = SNS_LAST_LOOP_ID;
6469 6853 }
6470 6854
6471 6855 /* Acquire adapter state lock. */
6472 6856 ADAPTER_STATE_LOCK(ha);
6473 6857
6474 6858 tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6475 6859 if (tq == NULL) {
6476 6860 EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6477 6861 d_id.b24);
6478 6862
6479 6863 ADAPTER_STATE_UNLOCK(ha);
6480 6864
6481 6865 return (QL_FUNCTION_FAILED);
6482 6866 }
6483 6867
6484 6868 rval = QL_FUNCTION_FAILED;
6485 6869 loop_id = ha->pha->free_loop_id++;
6486 6870 for (index = (uint16_t)(last_loop_id - first_loop_id); index;
6487 6871 index--) {
6488 6872 if (loop_id < first_loop_id ||
6489 6873 loop_id > last_loop_id) {
6490 6874 loop_id = first_loop_id;
6491 6875 ha->pha->free_loop_id = (uint16_t)
6492 6876 (loop_id + 1);
6493 6877 }
6494 6878
6495 6879 /* Bypass if loop ID used. */
6496 6880 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6497 6881 tq2 = ql_loop_id_to_queue(vha, loop_id);
6498 6882 if (tq2 != NULL && tq2 != tq) {
6499 6883 break;
6500 6884 }
6501 6885 }
6502 6886 if (vha != NULL || RESERVED_LOOP_ID(ha, loop_id) ||
6503 6887 loop_id == ha->loop_id) {
6504 6888 loop_id = ha->pha->free_loop_id++;
6505 6889 continue;
6506 6890 }
6507 6891
6508 6892 ADAPTER_STATE_UNLOCK(ha);
6509 6893 rval = ql_login_fabric_port(ha, tq, loop_id);
6510 6894
6511 6895 /*
6512 6896 * If PORT_ID_USED is returned
6513 6897 * the login_fabric_port() updates
6514 6898 * with the correct loop ID
6515 6899 */
6516 6900 switch (rval) {
6517 6901 case QL_PORT_ID_USED:
6518 6902 /*
6519 6903 * use f/w handle and try to
6520 6904 * login again.
6521 6905 */
6522 6906 ADAPTER_STATE_LOCK(ha);
6523 6907 ha->pha->free_loop_id--;
6524 6908 ADAPTER_STATE_UNLOCK(ha);
6525 6909 loop_id = tq->loop_id;
6526 6910 break;
|
↓ open down ↓ |
56 lines elided |
↑ open up ↑ |
6527 6911
6528 6912 case QL_SUCCESS:
6529 6913 tq->flags |= TQF_FABRIC_DEVICE;
6530 6914 (void) ql_get_port_database(ha,
6531 6915 tq, PDF_NONE);
6532 6916 index = 1;
6533 6917 break;
6534 6918
6535 6919 case QL_LOOP_ID_USED:
6536 6920 tq->loop_id = PORT_NO_LOOP_ID;
6921 + ADAPTER_STATE_LOCK(ha);
6537 6922 loop_id = ha->pha->free_loop_id++;
6923 + ADAPTER_STATE_UNLOCK(ha);
6538 6924 break;
6539 6925
6540 6926 case QL_ALL_IDS_IN_USE:
6541 6927 tq->loop_id = PORT_NO_LOOP_ID;
6542 6928 index = 1;
6543 6929 break;
6544 6930
6545 6931 default:
6546 6932 tq->loop_id = PORT_NO_LOOP_ID;
6547 6933 index = 1;
6548 6934 break;
6549 6935 }
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
6550 6936
6551 6937 ADAPTER_STATE_LOCK(ha);
6552 6938 }
6553 6939
6554 6940 ADAPTER_STATE_UNLOCK(ha);
6555 6941 } else {
6556 6942 rval = QL_FUNCTION_FAILED;
6557 6943 }
6558 6944
6559 6945 if (rval != QL_SUCCESS) {
6560 - EL(ha, "failed=%xh, d_id=%xh\n", rval, d_id.b24);
6946 + EL(ha, "failed, rval=%xh, d_id=%xh\n",
6947 + rval, d_id.b24);
6561 6948 } else {
6562 6949 EL(ha, "d_id=%xh, loop_id=%xh, "
6563 6950 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6564 6951 tq->loop_id, tq->port_name[0], tq->port_name[1],
6565 6952 tq->port_name[2], tq->port_name[3], tq->port_name[4],
6566 6953 tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6567 6954 }
6568 6955 return (rval);
6569 6956 }
6570 6957
6571 6958 /*
6572 6959 * ql_login_fabric_port
6573 6960 * Issue login fabric port mailbox command.
6574 6961 *
6575 6962 * Input:
6576 6963 * ha: adapter state pointer.
6577 6964 * tq: target queue pointer.
6578 6965 * loop_id: FC Loop ID.
6579 6966 *
6580 6967 * Returns:
6581 6968 * ql local function return status code.
6582 6969 *
6583 6970 * Context:
6584 6971 * Kernel context.
6585 6972 */
|
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
6586 6973 static int
6587 6974 ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6588 6975 {
6589 6976 int rval;
6590 6977 int index;
6591 6978 int retry = 0;
6592 6979 port_id_t d_id;
6593 6980 ql_tgt_t *newq;
6594 6981 ql_mbx_data_t mr;
6595 6982
6596 - QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6597 - tq->d_id.b24);
6983 + QL_PRINT_3(ha, "started, d_id=%xh\n", tq->d_id.b24);
6598 6984
6599 6985 /*
6600 - * QL_PARAMETER_ERROR also means the firmware is
6601 - * not able to allocate PCB entry due to resource
6602 - * issues, or collision.
6986 + * QL_PARAMETER_ERROR also means the firmware is not able to allocate
6987 + * PCB entry due to resource issues, or collision.
6603 6988 */
6604 6989 do {
6605 6990 rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6606 6991 if ((rval == QL_PARAMETER_ERROR) ||
6607 6992 ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6608 6993 mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6609 6994 retry++;
6610 - drv_usecwait(10 * MILLISEC);
6995 + drv_usecwait(ha->plogi_params->retry_dly_usec);
6611 6996 } else {
6612 6997 break;
6613 6998 }
6614 - } while (retry < 5);
6999 + } while (retry < ha->plogi_params->retry_cnt);
6615 7000
6616 7001 switch (rval) {
6617 7002 case QL_SUCCESS:
6618 7003 tq->loop_id = loop_id;
6619 7004 break;
6620 7005
6621 7006 case QL_PORT_ID_USED:
6622 7007 /*
6623 7008 * This Loop ID should NOT be in use in drivers
6624 7009 */
6625 7010 newq = ql_loop_id_to_queue(ha, mr.mb[1]);
6626 7011
6627 7012 if (newq != NULL && newq != tq && tq->logout_sent == 0) {
6628 7013 cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
6629 7014 "dup loop_id=%xh, d_id=%xh", ha->instance,
6630 7015 newq->loop_id, newq->d_id.b24);
6631 7016 ql_send_logo(ha, newq, NULL);
6632 7017 }
6633 7018
6634 7019 tq->loop_id = mr.mb[1];
6635 7020 break;
6636 7021
6637 7022 case QL_LOOP_ID_USED:
6638 7023 d_id.b.al_pa = LSB(mr.mb[2]);
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
6639 7024 d_id.b.area = MSB(mr.mb[2]);
6640 7025 d_id.b.domain = LSB(mr.mb[1]);
6641 7026
6642 7027 newq = ql_d_id_to_queue(ha, d_id);
6643 7028 if (newq && (newq->loop_id != loop_id)) {
6644 7029 /*
6645 7030 * This should NEVER ever happen; but this
6646 7031 * code is needed to bail out when the worst
6647 7032 * case happens - or as used to happen before
6648 7033 */
6649 - QL_PRINT_2(CE_CONT, "(%d,%d): Loop ID is now "
7034 + QL_PRINT_2(ha, "Loop ID is now "
6650 7035 "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
6651 7036 "new pairs: [%xh, unknown] and [%xh, %xh]\n",
6652 - ha->instance, ha->vp_index, tq->d_id.b24, loop_id,
7037 + tq->d_id.b24, loop_id,
6653 7038 newq->d_id.b24, newq->loop_id, tq->d_id.b24,
6654 7039 newq->d_id.b24, loop_id);
6655 7040
6656 7041 if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
6657 7042 ADAPTER_STATE_LOCK(ha);
6658 7043
6659 7044 index = ql_alpa_to_index[newq->d_id.b.al_pa];
6660 7045 ql_add_link_b(&ha->dev[index], &newq->device);
6661 7046
6662 7047 newq->d_id.b24 = d_id.b24;
6663 7048
6664 7049 index = ql_alpa_to_index[d_id.b.al_pa];
6665 7050 ql_add_link_b(&ha->dev[index], &newq->device);
6666 7051
6667 7052 ADAPTER_STATE_UNLOCK(ha);
6668 7053 }
6669 7054
6670 7055 (void) ql_get_port_database(ha, newq, PDF_NONE);
6671 7056
6672 7057 }
6673 7058
6674 7059 /*
6675 7060 * Invalidate the loop ID for the
6676 7061 * us to obtain a new one.
6677 7062 */
6678 7063 tq->loop_id = PORT_NO_LOOP_ID;
6679 7064 break;
6680 7065
6681 7066 case QL_ALL_IDS_IN_USE:
6682 7067 rval = QL_FUNCTION_FAILED;
6683 7068 EL(ha, "no loop id's available\n");
|
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
6684 7069 break;
6685 7070
6686 7071 default:
6687 7072 if (rval == QL_COMMAND_ERROR) {
6688 7073 switch (mr.mb[1]) {
6689 7074 case 2:
6690 7075 case 3:
6691 7076 rval = QL_MEMORY_ALLOC_FAILED;
6692 7077 break;
6693 7078
7079 + case 0xd:
6694 7080 case 4:
6695 7081 rval = QL_FUNCTION_TIMEOUT;
6696 7082 break;
7083 + case 1:
7084 + case 5:
6697 7085 case 7:
6698 7086 rval = QL_FABRIC_NOT_INITIALIZED;
6699 7087 break;
6700 7088 default:
6701 7089 EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
6702 7090 break;
6703 7091 }
6704 7092 } else {
6705 7093 cmn_err(CE_WARN, "%s(%d): login fabric port failed"
6706 7094 " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
6707 7095 ha->instance, tq->d_id.b24, rval, mr.mb[1]);
6708 7096 }
6709 7097 break;
6710 7098 }
6711 7099
6712 7100 if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
6713 7101 rval != QL_LOOP_ID_USED) {
6714 7102 EL(ha, "failed=%xh\n", rval);
6715 7103 } else {
6716 7104 /*EMPTY*/
6717 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7105 + QL_PRINT_3(ha, "done\n");
6718 7106 }
6719 7107 return (rval);
6720 7108 }
6721 7109
6722 7110 /*
6723 7111 * ql_logout_port
6724 7112 * Logs out a device if possible.
6725 7113 *
6726 7114 * Input:
6727 7115 * ha: adapter state pointer.
6728 7116 * d_id: 24 bit port ID.
6729 7117 *
6730 7118 * Returns:
6731 7119 * QL local function return status code.
6732 7120 *
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
6733 7121 * Context:
6734 7122 * Kernel context.
6735 7123 */
6736 7124 static int
6737 7125 ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
6738 7126 {
6739 7127 ql_link_t *link;
6740 7128 ql_tgt_t *tq;
6741 7129 uint16_t index;
6742 7130
6743 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7131 + QL_PRINT_3(ha, "started\n");
6744 7132
6745 7133 /* Get head queue index. */
6746 7134 index = ql_alpa_to_index[d_id.b.al_pa];
6747 7135
6748 7136 /* Get device queue. */
6749 7137 tq = NULL;
6750 7138 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6751 7139 tq = link->base_address;
6752 7140 if (tq->d_id.b24 == d_id.b24) {
6753 7141 break;
6754 7142 } else {
6755 7143 tq = NULL;
6756 7144 }
6757 7145 }
6758 7146
6759 7147 if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
6760 7148 (void) ql_logout_fabric_port(ha, tq);
6761 7149 tq->loop_id = PORT_NO_LOOP_ID;
6762 7150 }
6763 7151
6764 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7152 + QL_PRINT_3(ha, "done\n");
6765 7153
6766 7154 return (QL_SUCCESS);
6767 7155 }
6768 7156
6769 7157 /*
6770 7158 * ql_dev_init
6771 7159 * Initialize/allocate device queue.
6772 7160 *
6773 7161 * Input:
6774 7162 * ha: adapter state pointer.
6775 7163 * d_id: device destination ID
6776 7164 * loop_id: device loop ID
6777 7165 * ADAPTER_STATE_LOCK must be already obtained.
6778 7166 *
6779 7167 * Returns:
6780 7168 * NULL = failure
6781 7169 *
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
6782 7170 * Context:
6783 7171 * Kernel context.
6784 7172 */
6785 7173 ql_tgt_t *
6786 7174 ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
6787 7175 {
6788 7176 ql_link_t *link;
6789 7177 uint16_t index;
6790 7178 ql_tgt_t *tq;
6791 7179
6792 - QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh, loop_id=%xh\n",
6793 - ha->instance, d_id.b24, loop_id);
7180 + QL_PRINT_3(ha, "started, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
6794 7181
6795 7182 index = ql_alpa_to_index[d_id.b.al_pa];
6796 7183
6797 7184 /* If device queue exists, set proper loop ID. */
6798 - tq = NULL;
6799 7185 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6800 7186 tq = link->base_address;
6801 7187 if (tq->d_id.b24 == d_id.b24) {
6802 7188 tq->loop_id = loop_id;
6803 7189
6804 7190 /* Reset port down retry count. */
6805 7191 tq->port_down_retry_count = ha->port_down_retry_count;
6806 7192 tq->qfull_retry_count = ha->qfull_retry_count;
6807 7193
6808 7194 break;
6809 - } else {
6810 - tq = NULL;
6811 7195 }
6812 7196 }
6813 7197
6814 7198 /* If device does not have queue. */
6815 - if (tq == NULL) {
7199 + if (link == NULL) {
6816 7200 tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
6817 7201 if (tq != NULL) {
6818 7202 /*
6819 7203 * mutex to protect the device queue,
6820 7204 * does not block interrupts.
6821 7205 */
6822 7206 mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
6823 - (ha->iflags & IFLG_INTR_AIF) ?
6824 - (void *)(uintptr_t)ha->intr_pri :
6825 - (void *)(uintptr_t)ha->iblock_cookie);
7207 + ha->intr_pri);
6826 7208
6827 7209 tq->d_id.b24 = d_id.b24;
6828 7210 tq->loop_id = loop_id;
6829 7211 tq->device.base_address = tq;
6830 7212 tq->iidma_rate = IIDMA_RATE_INIT;
6831 7213
6832 7214 /* Reset port down retry count. */
6833 7215 tq->port_down_retry_count = ha->port_down_retry_count;
6834 7216 tq->qfull_retry_count = ha->qfull_retry_count;
6835 7217
6836 7218 /* Add device to device queue. */
6837 7219 ql_add_link_b(&ha->dev[index], &tq->device);
6838 7220 }
6839 7221 }
6840 7222
6841 7223 if (tq == NULL) {
6842 7224 EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
6843 7225 } else {
6844 7226 /*EMPTY*/
6845 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7227 + QL_PRINT_3(ha, "done\n");
6846 7228 }
6847 7229 return (tq);
6848 7230 }
6849 7231
6850 7232 /*
6851 7233 * ql_dev_free
6852 7234 * Remove queue from device list and frees resources used by queue.
6853 7235 *
6854 7236 * Input:
6855 7237 * ha: adapter state pointer.
6856 7238 * tq: target queue pointer.
6857 7239 * ADAPTER_STATE_LOCK must be already obtained.
6858 7240 *
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
6859 7241 * Context:
6860 7242 * Kernel context.
6861 7243 */
6862 7244 void
6863 7245 ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
6864 7246 {
6865 7247 ql_link_t *link;
6866 7248 uint16_t index;
6867 7249 ql_lun_t *lq;
6868 7250
6869 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7251 + QL_PRINT_3(ha, "started\n");
6870 7252
6871 7253 for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6872 7254 lq = link->base_address;
6873 7255 if (lq->cmd.first != NULL) {
7256 + EL(ha, "cmd %ph pending in lq=%ph, lun=%xh\n",
7257 + lq->cmd.first, lq, lq->lun_no);
6874 7258 return;
6875 7259 }
6876 7260 }
6877 7261
6878 7262 if (tq->outcnt == 0) {
6879 7263 /* Get head queue index. */
6880 7264 index = ql_alpa_to_index[tq->d_id.b.al_pa];
6881 7265 for (link = ha->dev[index].first; link != NULL;
6882 7266 link = link->next) {
6883 7267 if (link->base_address == tq) {
6884 7268 ql_remove_link(&ha->dev[index], link);
6885 7269
6886 7270 link = tq->lun_queues.first;
6887 7271 while (link != NULL) {
6888 7272 lq = link->base_address;
6889 7273 link = link->next;
6890 7274
6891 7275 ql_remove_link(&tq->lun_queues,
6892 7276 &lq->link);
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
6893 7277 kmem_free(lq, sizeof (ql_lun_t));
6894 7278 }
6895 7279
6896 7280 mutex_destroy(&tq->mutex);
6897 7281 kmem_free(tq, sizeof (ql_tgt_t));
6898 7282 break;
6899 7283 }
6900 7284 }
6901 7285 }
6902 7286
6903 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7287 + QL_PRINT_3(ha, "done\n");
6904 7288 }
6905 7289
6906 7290 /*
6907 7291 * ql_lun_queue
6908 7292 * Allocate LUN queue if does not exists.
6909 7293 *
6910 7294 * Input:
6911 7295 * ha: adapter state pointer.
6912 - * tq: target queue.
6913 - * lun: LUN number.
7296 + * tq: target queue.
7297 + * lun_addr: LUN number.
6914 7298 *
6915 7299 * Returns:
6916 7300 * NULL = failure
6917 7301 *
6918 7302 * Context:
6919 7303 * Kernel context.
6920 7304 */
6921 7305 static ql_lun_t *
6922 -ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t lun)
7306 +ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint64_t lun_addr)
6923 7307 {
6924 7308 ql_lun_t *lq;
6925 7309 ql_link_t *link;
7310 + uint16_t lun_no, lun_no_tmp;
7311 + fcp_ent_addr_t *fcp_ent_addr = (fcp_ent_addr_t *)&lun_addr;
6926 7312
6927 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7313 + QL_PRINT_3(ha, "started\n");
6928 7314
6929 7315 /* Fast path. */
6930 - if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_no == lun) {
6931 - QL_PRINT_3(CE_CONT, "(%d): fast done\n", ha->instance);
7316 + if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_addr ==
7317 + lun_addr) {
7318 + QL_PRINT_3(ha, "fast done\n");
6932 7319 return (tq->last_lun_queue);
6933 7320 }
6934 7321
6935 - if (lun >= MAX_LUNS) {
6936 - EL(ha, "Exceeded MAX_LUN=%d, lun=%d\n", MAX_LUNS, lun);
6937 - return (NULL);
6938 - }
6939 7322 /* If device queue exists, set proper loop ID. */
6940 - lq = NULL;
6941 7323 for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6942 7324 lq = link->base_address;
6943 - if (lq->lun_no == lun) {
6944 - QL_PRINT_3(CE_CONT, "(%d): found done\n", ha->instance);
7325 + if (lq->lun_addr == lun_addr) {
7326 + QL_PRINT_3(ha, "found done\n");
6945 7327 tq->last_lun_queue = lq;
6946 7328 return (lq);
6947 7329 }
6948 7330 }
6949 7331
6950 - /* If queue does exist. */
6951 - lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
7332 + /* Check the LUN addressing levels. */
7333 + if (fcp_ent_addr->ent_addr_1 != 0 || fcp_ent_addr->ent_addr_2 != 0 ||
7334 + fcp_ent_addr->ent_addr_3 != 0) {
7335 + EL(ha, "Unsupported LUN Addressing level=0x%llxh", lun_addr);
7336 + }
6952 7337
6953 - /* Initialize LUN queue. */
7338 + lun_no_tmp = CHAR_TO_SHORT(lobyte(fcp_ent_addr->ent_addr_0),
7339 + hibyte(fcp_ent_addr->ent_addr_0));
7340 +
7341 + lun_no = lun_no_tmp & ~(QL_LUN_AM_MASK << 8);
7342 +
7343 + if (lun_no_tmp & (QL_LUN_AM_LUN << 8)) {
7344 + EL(ha, "Unsupported first level LUN Addressing method=%xh, "
7345 + "lun=%d(%xh)\n", lun_no_tmp & (QL_LUN_AM_MASK << 8),
7346 + lun_no, lun_no_tmp);
7347 + }
7348 +
7349 + /* Create and initialize LUN queue. */
7350 + lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
6954 7351 if (lq != NULL) {
6955 7352 lq->link.base_address = lq;
6956 -
6957 - lq->lun_no = lun;
6958 7353 lq->target_queue = tq;
7354 + lq->lun_addr = lun_addr;
7355 + lq->lun_no = lun_no;
6959 7356
6960 7357 DEVICE_QUEUE_LOCK(tq);
6961 7358 ql_add_link_b(&tq->lun_queues, &lq->link);
6962 7359 DEVICE_QUEUE_UNLOCK(tq);
6963 7360 tq->last_lun_queue = lq;
6964 7361 }
6965 7362
6966 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7363 + QL_PRINT_3(ha, "done\n");
6967 7364
6968 7365 return (lq);
6969 7366 }
6970 7367
6971 7368 /*
6972 7369 * ql_fcp_scsi_cmd
6973 7370 * Process fibre channel (FCP) SCSI protocol commands.
6974 7371 *
6975 7372 * Input:
6976 7373 * ha = adapter state pointer.
6977 7374 * pkt = pointer to fc_packet.
6978 7375 * sp = srb pointer.
6979 7376 *
6980 7377 * Returns:
6981 7378 * FC_SUCCESS - the packet was accepted for transport.
6982 7379 * FC_TRANSPORT_ERROR - a transport error occurred.
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
6983 7380 *
6984 7381 * Context:
6985 7382 * Kernel context.
6986 7383 */
6987 7384 static int
6988 7385 ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6989 7386 {
6990 7387 port_id_t d_id;
6991 7388 ql_tgt_t *tq;
6992 7389 uint64_t *ptr;
6993 - uint16_t lun;
7390 + uint64_t fcp_ent_addr = 0;
6994 7391
6995 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7392 + QL_PRINT_3(ha, "started\n");
6996 7393
6997 7394 tq = (ql_tgt_t *)pkt->pkt_fca_device;
6998 7395 if (tq == NULL) {
6999 7396 d_id.r.rsvd_1 = 0;
7000 7397 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7001 7398 tq = ql_d_id_to_queue(ha, d_id);
7002 7399 }
7003 7400
7004 7401 sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
7005 - lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
7006 - hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
7007 -
7402 + fcp_ent_addr = *(uint64_t *)(&sp->fcp->fcp_ent_addr);
7008 7403 if (tq != NULL &&
7009 - (sp->lun_queue = ql_lun_queue(ha, tq, lun)) != NULL) {
7404 + (sp->lun_queue = ql_lun_queue(ha, tq, fcp_ent_addr)) != NULL) {
7010 7405
7011 7406 /*
7012 7407 * zero out FCP response; 24 Bytes
7013 7408 */
7014 7409 ptr = (uint64_t *)pkt->pkt_resp;
7015 7410 *ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
7016 7411
7017 7412 /* Handle task management function. */
7018 7413 if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
7019 7414 sp->fcp->fcp_cntl.cntl_clr_aca |
7020 7415 sp->fcp->fcp_cntl.cntl_reset_tgt |
7021 7416 sp->fcp->fcp_cntl.cntl_reset_lun |
7022 7417 sp->fcp->fcp_cntl.cntl_clr_tsk |
7023 7418 sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
7024 7419 ql_task_mgmt(ha, tq, pkt, sp);
7025 7420 } else {
7026 7421 ha->pha->xioctl->IosRequested++;
7027 7422 ha->pha->xioctl->BytesRequested += (uint32_t)
7028 7423 sp->fcp->fcp_data_len;
7029 7424
7030 7425 /*
7031 7426 * Setup for commands with data transfer
7032 7427 */
7033 7428 sp->iocb = ha->fcp_cmd;
7034 7429 sp->req_cnt = 1;
7035 7430 if (sp->fcp->fcp_data_len != 0) {
|
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
7036 7431 /*
7037 7432 * FCP data is bound to pkt_data_dma
7038 7433 */
7039 7434 if (sp->fcp->fcp_cntl.cntl_write_data) {
7040 7435 (void) ddi_dma_sync(pkt->pkt_data_dma,
7041 7436 0, 0, DDI_DMA_SYNC_FORDEV);
7042 7437 }
7043 7438
7044 7439 /* Setup IOCB count. */
7045 7440 if (pkt->pkt_data_cookie_cnt > ha->cmd_segs &&
7046 - (!CFG_IST(ha, CFG_CTRL_8021) ||
7441 + (!CFG_IST(ha, CFG_CTRL_82XX) ||
7047 7442 sp->sg_dma.dma_handle == NULL)) {
7048 7443 uint32_t cnt;
7049 7444
7050 7445 cnt = pkt->pkt_data_cookie_cnt -
7051 7446 ha->cmd_segs;
7052 7447 sp->req_cnt = (uint16_t)
7053 7448 (cnt / ha->cmd_cont_segs);
7054 7449 if (cnt % ha->cmd_cont_segs) {
7055 7450 sp->req_cnt = (uint16_t)
7056 7451 (sp->req_cnt + 2);
7057 7452 } else {
7058 7453 sp->req_cnt++;
7059 7454 }
7060 7455 }
7061 7456 }
7062 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7457 + QL_PRINT_3(ha, "done\n");
7063 7458
7064 7459 return (ql_start_cmd(ha, tq, pkt, sp));
7065 7460 }
7066 7461 } else {
7067 7462 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7068 7463 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7069 7464
7070 - if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7071 - ql_awaken_task_daemon(ha, sp, 0, 0);
7465 + if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7466 + ql_io_comp(sp);
7467 + }
7072 7468 }
7073 7469
7074 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7470 + QL_PRINT_3(ha, "done\n");
7075 7471
7076 7472 return (FC_SUCCESS);
7077 7473 }
7078 7474
7079 7475 /*
7080 7476 * ql_task_mgmt
7081 7477 * Task management function processor.
7082 7478 *
7083 7479 * Input:
7084 7480 * ha: adapter state pointer.
7085 7481 * tq: target queue pointer.
7086 7482 * pkt: pointer to fc_packet.
7087 7483 * sp: SRB pointer.
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
7088 7484 *
7089 7485 * Context:
7090 7486 * Kernel context.
7091 7487 */
7092 7488 static void
7093 7489 ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7094 7490 ql_srb_t *sp)
7095 7491 {
7096 7492 fcp_rsp_t *fcpr;
7097 7493 struct fcp_rsp_info *rsp;
7098 - uint16_t lun;
7494 + ql_lun_t *lq = sp->lun_queue;
7099 7495
7100 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7496 + QL_PRINT_3(ha, "started\n");
7101 7497
7102 7498 fcpr = (fcp_rsp_t *)pkt->pkt_resp;
7103 - rsp = (struct fcp_rsp_info *)pkt->pkt_resp + sizeof (fcp_rsp_t);
7499 + rsp = (struct fcp_rsp_info *)(pkt->pkt_resp + sizeof (fcp_rsp_t));
7104 7500
7105 7501 bzero(fcpr, pkt->pkt_rsplen);
7106 7502
7107 7503 fcpr->fcp_u.fcp_status.rsp_len_set = 1;
7108 7504 fcpr->fcp_response_len = 8;
7109 - lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
7110 - hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
7111 7505
7112 7506 if (sp->fcp->fcp_cntl.cntl_clr_aca) {
7113 - if (ql_clear_aca(ha, tq, lun) != QL_SUCCESS) {
7507 + if (ql_clear_aca(ha, tq, lq) != QL_SUCCESS) {
7114 7508 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7115 7509 }
7116 7510 } else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
7117 - if (ql_lun_reset(ha, tq, lun) != QL_SUCCESS) {
7511 + if (ql_lun_reset(ha, tq, lq) != QL_SUCCESS) {
7118 7512 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7119 7513 }
7120 7514 } else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
7121 7515 if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
7122 7516 QL_SUCCESS) {
7123 7517 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7124 7518 }
7125 7519 } else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
7126 - if (ql_clear_task_set(ha, tq, lun) != QL_SUCCESS) {
7520 + if (ql_clear_task_set(ha, tq, lq) != QL_SUCCESS) {
7127 7521 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7128 7522 }
7129 7523 } else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
7130 - if (ql_abort_task_set(ha, tq, lun) != QL_SUCCESS) {
7524 + if (ql_abort_task_set(ha, tq, lq) != QL_SUCCESS) {
7131 7525 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7132 7526 }
7133 7527 } else {
7134 7528 rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
7135 7529 }
7136 7530
7137 7531 pkt->pkt_state = FC_PKT_SUCCESS;
7138 7532
7139 7533 /* Do command callback. */
7140 7534 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7141 - ql_awaken_task_daemon(ha, sp, 0, 0);
7535 + ql_io_comp(sp);
7142 7536 }
7143 7537
7144 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7538 + QL_PRINT_3(ha, "done\n");
7145 7539 }
7146 7540
7147 7541 /*
7148 7542 * ql_fcp_ip_cmd
7149 7543 * Process fibre channel (FCP) Internet (IP) protocols commands.
7150 7544 *
7151 7545 * Input:
7152 7546 * ha: adapter state pointer.
7153 7547 * pkt: pointer to fc_packet.
7154 7548 * sp: SRB pointer.
7155 7549 *
7156 7550 * Returns:
7157 7551 * FC_SUCCESS - the packet was accepted for transport.
7158 7552 * FC_TRANSPORT_ERROR - a transport error occurred.
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
7159 7553 *
7160 7554 * Context:
7161 7555 * Kernel context.
7162 7556 */
7163 7557 static int
7164 7558 ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
7165 7559 {
7166 7560 port_id_t d_id;
7167 7561 ql_tgt_t *tq;
7168 7562
7169 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7563 + QL_PRINT_3(ha, "started\n");
7170 7564
7171 7565 tq = (ql_tgt_t *)pkt->pkt_fca_device;
7172 7566 if (tq == NULL) {
7173 7567 d_id.r.rsvd_1 = 0;
7174 7568 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7175 7569 tq = ql_d_id_to_queue(ha, d_id);
7176 7570 }
7177 7571
7178 7572 if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
7179 7573 /*
7180 7574 * IP data is bound to pkt_cmd_dma
7181 7575 */
7182 7576 (void) ddi_dma_sync(pkt->pkt_cmd_dma,
7183 7577 0, 0, DDI_DMA_SYNC_FORDEV);
7184 7578
7185 7579 /* Setup IOCB count. */
7186 7580 sp->iocb = ha->ip_cmd;
7187 7581 if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
7188 7582 uint32_t cnt;
7189 7583
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
7190 7584 cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
7191 7585 sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
7192 7586 if (cnt % ha->cmd_cont_segs) {
7193 7587 sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7194 7588 } else {
7195 7589 sp->req_cnt++;
7196 7590 }
7197 7591 } else {
7198 7592 sp->req_cnt = 1;
7199 7593 }
7200 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7594 + QL_PRINT_3(ha, "done\n");
7201 7595
7202 7596 return (ql_start_cmd(ha, tq, pkt, sp));
7203 7597 } else {
7204 7598 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7205 7599 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7206 7600
7207 7601 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7208 - ql_awaken_task_daemon(ha, sp, 0, 0);
7602 + ql_io_comp(sp);
7209 7603 }
7210 7604
7211 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7605 + QL_PRINT_3(ha, "done\n");
7212 7606
7213 7607 return (FC_SUCCESS);
7214 7608 }
7215 7609
7216 7610 /*
7217 7611 * ql_fc_services
7218 7612 * Process fibre channel services (name server).
7219 7613 *
7220 7614 * Input:
7221 7615 * ha: adapter state pointer.
7222 7616 * pkt: pointer to fc_packet.
7223 7617 *
7224 7618 * Returns:
7225 7619 * FC_SUCCESS - the packet was accepted for transport.
7226 7620 * FC_TRANSPORT_ERROR - a transport error occurred.
7227 7621 *
7228 7622 * Context:
7229 7623 * Kernel context.
7230 7624 */
7231 7625 static int
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
7232 7626 ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
7233 7627 {
7234 7628 uint32_t cnt;
7235 7629 fc_ct_header_t hdr;
7236 7630 la_els_rjt_t rjt;
7237 7631 port_id_t d_id;
7238 7632 ql_tgt_t *tq;
7239 7633 ql_srb_t *sp;
7240 7634 int rval;
7241 7635
7242 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7636 + QL_PRINT_3(ha, "started\n");
7243 7637
7244 7638 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
7245 7639 (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
7246 7640
7247 7641 bzero(&rjt, sizeof (rjt));
7248 7642
7249 7643 /* Do some sanity checks */
7250 7644 cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
7251 7645 sizeof (fc_ct_header_t));
7252 7646 if (cnt > (uint32_t)pkt->pkt_rsplen) {
7253 7647 EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
7254 7648 pkt->pkt_rsplen);
7255 7649 return (FC_ELS_MALFORMED);
7256 7650 }
7257 7651
7258 7652 switch (hdr.ct_fcstype) {
7259 7653 case FCSTYPE_DIRECTORY:
7260 7654 case FCSTYPE_MGMTSERVICE:
7655 +
7261 7656 /* An FCA must make sure that the header is in big endian */
7262 7657 ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
7263 7658
7264 7659 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7265 7660 tq = ql_d_id_to_queue(ha, d_id);
7266 7661 sp = (ql_srb_t *)pkt->pkt_fca_private;
7662 +
7267 7663 if (tq == NULL ||
7268 7664 (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
7269 7665 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7270 7666 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7271 7667 rval = QL_SUCCESS;
7272 7668 break;
7273 7669 }
7274 7670
7671 + if (tq->flags & TQF_LOGIN_NEEDED) {
7672 + DEVICE_QUEUE_LOCK(tq);
7673 + tq->flags &= ~TQF_LOGIN_NEEDED;
7674 + DEVICE_QUEUE_UNLOCK(tq);
7675 + (void) ql_login_fport(ha, tq, tq->loop_id, LFF_NONE,
7676 + NULL);
7677 + }
7275 7678 /*
7276 7679 * Services data is bound to pkt_cmd_dma
7277 7680 */
7278 7681 (void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
7279 7682 DDI_DMA_SYNC_FORDEV);
7280 7683
7281 7684 sp->flags |= SRB_MS_PKT;
7282 7685 sp->retry_count = 32;
7283 7686
7284 7687 /* Setup IOCB count. */
7285 7688 sp->iocb = ha->ms_cmd;
7286 7689 if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
7287 7690 cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
7288 7691 sp->req_cnt =
7289 - (uint16_t)(cnt / CONT_TYPE_1_DATA_SEGMENTS);
7290 - if (cnt % CONT_TYPE_1_DATA_SEGMENTS) {
7692 + (uint16_t)(cnt / ha->cmd_cont_segs);
7693 + if (cnt % ha->cmd_cont_segs) {
7291 7694 sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7292 7695 } else {
7293 7696 sp->req_cnt++;
7294 7697 }
7295 7698 } else {
7296 7699 sp->req_cnt = 1;
7297 7700 }
7298 7701 rval = ql_start_cmd(ha, tq, pkt, sp);
7299 7702
7300 - QL_PRINT_3(CE_CONT, "(%d): done, ql_start_cmd=%xh\n",
7301 - ha->instance, rval);
7703 + QL_PRINT_3(ha, "done, ql_start_cmd=%xh\n", rval);
7302 7704
7303 7705 return (rval);
7304 7706
7305 7707 default:
7306 7708 EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7307 7709 rval = QL_FUNCTION_PARAMETER_ERROR;
7308 7710 break;
7309 7711 }
7310 7712
7311 7713 if (rval != QL_SUCCESS) {
7312 7714 /* Build RJT. */
7313 7715 rjt.ls_code.ls_code = LA_ELS_RJT;
7314 7716 rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7315 7717
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
7316 7718 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7317 7719 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7318 7720
7319 7721 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7320 7722 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7321 7723 EL(ha, "LA_ELS_RJT, FC_REASON_UNSUPPORTED\n");
7322 7724 }
7323 7725
7324 7726 /* Do command callback. */
7325 7727 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7326 - ql_awaken_task_daemon(ha, (ql_srb_t *)pkt->pkt_fca_private,
7327 - 0, 0);
7728 + ql_io_comp((ql_srb_t *)pkt->pkt_fca_private);
7328 7729 }
7329 7730
7330 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7731 + QL_PRINT_3(ha, "done\n");
7331 7732
7332 7733 return (FC_SUCCESS);
7333 7734 }
7334 7735
7335 7736 /*
7336 7737 * ql_cthdr_endian
7337 7738 * Change endianess of ct passthrough header and payload.
7338 7739 *
7339 7740 * Input:
7340 7741 * acc_handle: DMA buffer access handle.
7341 7742 * ct_hdr: Pointer to header.
7342 7743 * restore: Restore first flag.
7343 7744 *
7344 7745 * Context:
7345 7746 * Interrupt or Kernel context, no mailbox commands allowed.
7346 7747 */
7347 7748 void
7348 7749 ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7349 7750 boolean_t restore)
7350 7751 {
7351 7752 uint8_t i, *bp;
7352 7753 fc_ct_header_t hdr;
7353 7754 uint32_t *hdrp = (uint32_t *)&hdr;
7354 7755
7355 7756 ddi_rep_get8(acc_handle, (uint8_t *)&hdr,
7356 7757 (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7357 7758
7358 7759 if (restore) {
7359 7760 for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7360 7761 *hdrp = BE_32(*hdrp);
7361 7762 hdrp++;
7362 7763 }
7363 7764 }
7364 7765
7365 7766 if (hdr.ct_fcstype == FCSTYPE_DIRECTORY) {
7366 7767 bp = (uint8_t *)ct_hdr + sizeof (fc_ct_header_t);
7367 7768
7368 7769 switch (hdr.ct_cmdrsp) {
7369 7770 case NS_GA_NXT:
7370 7771 case NS_GPN_ID:
7371 7772 case NS_GNN_ID:
7372 7773 case NS_GCS_ID:
7373 7774 case NS_GFT_ID:
7374 7775 case NS_GSPN_ID:
7375 7776 case NS_GPT_ID:
7376 7777 case NS_GID_FT:
7377 7778 case NS_GID_PT:
7378 7779 case NS_RPN_ID:
7379 7780 case NS_RNN_ID:
7380 7781 case NS_RSPN_ID:
7381 7782 case NS_DA_ID:
7382 7783 BIG_ENDIAN_32(bp);
7383 7784 break;
7384 7785 case NS_RFT_ID:
7385 7786 case NS_RCS_ID:
7386 7787 case NS_RPT_ID:
7387 7788 BIG_ENDIAN_32(bp);
7388 7789 bp += 4;
7389 7790 BIG_ENDIAN_32(bp);
7390 7791 break;
7391 7792 case NS_GNN_IP:
7392 7793 case NS_GIPA_IP:
7393 7794 BIG_ENDIAN(bp, 16);
7394 7795 break;
7395 7796 case NS_RIP_NN:
7396 7797 bp += 8;
7397 7798 BIG_ENDIAN(bp, 16);
7398 7799 break;
7399 7800 case NS_RIPA_NN:
7400 7801 bp += 8;
7401 7802 BIG_ENDIAN_64(bp);
7402 7803 break;
7403 7804 default:
7404 7805 break;
7405 7806 }
7406 7807 }
7407 7808
7408 7809 if (restore == B_FALSE) {
7409 7810 for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7410 7811 *hdrp = BE_32(*hdrp);
7411 7812 hdrp++;
7412 7813 }
7413 7814 }
7414 7815
7415 7816 ddi_rep_put8(acc_handle, (uint8_t *)&hdr,
7416 7817 (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7417 7818 }
7418 7819
7419 7820 /*
7420 7821 * ql_start_cmd
7421 7822 * Finishes starting fibre channel protocol (FCP) command.
7422 7823 *
7423 7824 * Input:
7424 7825 * ha: adapter state pointer.
7425 7826 * tq: target queue pointer.
7426 7827 * pkt: pointer to fc_packet.
7427 7828 * sp: SRB pointer.
7428 7829 *
7429 7830 * Context:
|
↓ open down ↓ |
89 lines elided |
↑ open up ↑ |
7430 7831 * Kernel context.
7431 7832 */
7432 7833 static int
7433 7834 ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7434 7835 ql_srb_t *sp)
7435 7836 {
7436 7837 int rval = FC_SUCCESS;
7437 7838 time_t poll_wait = 0;
7438 7839 ql_lun_t *lq = sp->lun_queue;
7439 7840
7440 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7841 + QL_PRINT_3(ha, "started\n");
7441 7842
7442 7843 sp->handle = 0;
7443 7844
7444 7845 /* Set poll for finish. */
7445 7846 if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7446 7847 sp->flags |= SRB_POLL;
7447 7848 if (pkt->pkt_timeout == 0) {
7448 7849 pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7449 7850 }
7450 7851 }
7451 7852
7452 7853 /* Acquire device queue lock. */
7453 7854 DEVICE_QUEUE_LOCK(tq);
7454 7855
7455 7856 /*
7456 7857 * If we need authentication, report device busy to
7457 7858 * upper layers to retry later
7458 7859 */
7459 7860 if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7460 7861 DEVICE_QUEUE_UNLOCK(tq);
7461 7862 EL(ha, "failed, FC_DEVICE_BUSY=%xh, d_id=%xh\n", tq->flags,
7462 7863 tq->d_id.b24);
7463 7864 return (FC_DEVICE_BUSY);
7464 7865 }
7465 7866
7466 7867 /* Insert command onto watchdog queue. */
7467 7868 if (!(pkt->pkt_tran_flags & FC_TRAN_DUMPING)) {
7468 7869 ql_timeout_insert(ha, tq, sp);
7469 7870 } else {
7470 7871 /*
7471 7872 * Run dump requests in polled mode as kernel threads
7472 7873 * and interrupts may have been disabled.
7473 7874 */
7474 7875 sp->flags |= SRB_POLL;
7475 7876 sp->init_wdg_q_time = 0;
7476 7877 sp->isp_timeout = 0;
7477 7878 }
7478 7879
7479 7880 /* If a polling command setup wait time. */
7480 7881 if (sp->flags & SRB_POLL) {
7481 7882 if (sp->flags & SRB_WATCHDOG_ENABLED) {
7482 7883 poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7483 7884 } else {
7484 7885 poll_wait = pkt->pkt_timeout;
7485 7886 }
|
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
7486 7887 }
7487 7888
7488 7889 if (ha->pha->flags & ABORT_CMDS_LOOP_DOWN_TMO &&
7489 7890 (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7490 7891 /* Set ending status. */
7491 7892 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7492 7893
7493 7894 /* Call done routine to handle completions. */
7494 7895 sp->cmd.next = NULL;
7495 7896 DEVICE_QUEUE_UNLOCK(tq);
7496 - ql_done(&sp->cmd);
7897 + ql_done(&sp->cmd, B_FALSE);
7497 7898 } else {
7498 7899 if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7499 7900 int do_lip = 0;
7500 7901
7501 7902 DEVICE_QUEUE_UNLOCK(tq);
7502 7903
7503 7904 ADAPTER_STATE_LOCK(ha);
7504 7905 if ((do_lip = ha->pha->lip_on_panic) == 0) {
7505 7906 ha->pha->lip_on_panic++;
7506 7907 }
7507 7908 ADAPTER_STATE_UNLOCK(ha);
7508 7909
7509 7910 if (!do_lip) {
7510 7911
7511 7912 /*
7512 7913 * That Qlogic F/W performs PLOGI, PRLI, etc
7513 7914 * is helpful here. If a PLOGI fails for some
7514 7915 * reason, you would get CS_PORT_LOGGED_OUT
7515 7916 * or some such error; and we should get a
7516 7917 * careful polled mode login kicked off inside
7517 7918 * of this driver itself. You don't have FC
7518 7919 * transport's services as all threads are
7519 7920 * suspended, interrupts disabled, and so
7520 7921 * on. Right now we do re-login if the packet
7521 7922 * state isn't FC_PKT_SUCCESS.
7522 7923 */
7523 7924 (void) ql_abort_isp(ha);
7524 7925 }
7525 7926
7526 7927 ql_start_iocb(ha, sp);
7527 7928 } else {
7528 7929 /* Add the command to the device queue */
7529 7930 if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7530 7931 ql_add_link_t(&lq->cmd, &sp->cmd);
7531 7932 } else {
7532 7933 ql_add_link_b(&lq->cmd, &sp->cmd);
7533 7934 }
|
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
7534 7935
7535 7936 sp->flags |= SRB_IN_DEVICE_QUEUE;
7536 7937
7537 7938 /* Check whether next message can be processed */
7538 7939 ql_next(ha, lq);
7539 7940 }
7540 7941 }
7541 7942
7542 7943 /* If polling, wait for finish. */
7543 7944 if (poll_wait) {
7544 - if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS) {
7545 - int res;
7546 -
7547 - res = ql_abort((opaque_t)ha, pkt, 0);
7548 - if (res != FC_SUCCESS && res != FC_ABORTED) {
7549 - DEVICE_QUEUE_LOCK(tq);
7550 - ql_remove_link(&lq->cmd, &sp->cmd);
7551 - sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7552 - DEVICE_QUEUE_UNLOCK(tq);
7553 - }
7945 + if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS &&
7946 + pkt->pkt_state == FC_PKT_SUCCESS) {
7947 + pkt->pkt_state = FC_PKT_TIMEOUT;
7948 + pkt->pkt_reason = FC_REASON_HW_ERROR;
7554 7949 }
7555 7950
7556 7951 if (pkt->pkt_state != FC_PKT_SUCCESS) {
7557 7952 EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7558 7953 rval = FC_TRANSPORT_ERROR;
7559 7954 }
7560 7955
7561 7956 if (ddi_in_panic()) {
7562 7957 if (pkt->pkt_state != FC_PKT_SUCCESS) {
7563 7958 port_id_t d_id;
7564 7959
7565 7960 /*
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
7566 7961 * successful LOGIN implies by design
7567 7962 * that PRLI also succeeded for disks
7568 7963 * Note also that there is no special
7569 7964 * mailbox command to send PRLI.
7570 7965 */
7571 7966 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7572 7967 (void) ql_login_port(ha, d_id);
7573 7968 }
7574 7969 }
7575 7970
7971 + (void) qlc_fm_check_pkt_dma_handle(ha, sp);
7576 7972 /*
7577 7973 * This should only happen during CPR dumping
7578 7974 */
7579 7975 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7580 7976 pkt->pkt_comp) {
7581 7977 sp->flags &= ~SRB_POLL;
7582 7978 (*pkt->pkt_comp)(pkt);
7583 7979 }
7584 7980 }
7585 7981
7586 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7982 + QL_PRINT_3(ha, "done\n");
7587 7983
7588 7984 return (rval);
7589 7985 }
7590 7986
7591 7987 /*
7592 7988 * ql_poll_cmd
7593 7989 * Polls commands for completion.
7594 7990 *
7595 7991 * Input:
7596 7992 * ha = adapter state pointer.
7597 7993 * sp = SRB command pointer.
7598 7994 * poll_wait = poll wait time in seconds.
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
7599 7995 *
7600 7996 * Returns:
7601 7997 * QL local function return status code.
7602 7998 *
7603 7999 * Context:
7604 8000 * Kernel context.
7605 8001 */
7606 8002 static int
7607 8003 ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
7608 8004 {
8005 + uint32_t index;
7609 8006 int rval = QL_SUCCESS;
7610 8007 time_t msecs_left = poll_wait * 100; /* 10ms inc */
7611 8008 ql_adapter_state_t *ha = vha->pha;
7612 8009
7613 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8010 + QL_PRINT_3(ha, "started\n");
7614 8011
7615 8012 while (sp->flags & SRB_POLL) {
7616 8013
7617 8014 if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
7618 - ha->idle_timer >= 15 || ddi_in_panic()) {
8015 + ha->idle_timer >= 15 || ddi_in_panic() ||
8016 + curthread->t_flag & T_INTR_THREAD) {
7619 8017
7620 8018 /* If waiting for restart, do it now. */
7621 8019 if (ha->port_retry_timer != 0) {
7622 8020 ADAPTER_STATE_LOCK(ha);
7623 8021 ha->port_retry_timer = 0;
7624 8022 ADAPTER_STATE_UNLOCK(ha);
7625 8023
7626 8024 TASK_DAEMON_LOCK(ha);
7627 8025 ha->task_daemon_flags |= PORT_RETRY_NEEDED;
7628 8026 TASK_DAEMON_UNLOCK(ha);
7629 8027 }
7630 8028
8029 + ADAPTER_STATE_LOCK(ha);
8030 + ha->flags |= POLL_INTR;
8031 + ADAPTER_STATE_UNLOCK(ha);
8032 +
7631 8033 if (INTERRUPT_PENDING(ha)) {
7632 - (void) ql_isr((caddr_t)ha);
8034 + (void) ql_isr_aif((caddr_t)ha, 0);
7633 8035 INTR_LOCK(ha);
7634 8036 ha->intr_claimed = TRUE;
7635 8037 INTR_UNLOCK(ha);
7636 8038 }
8039 + if (ha->flags & NO_INTR_HANDSHAKE) {
8040 + for (index = 0; index < ha->rsp_queues_cnt;
8041 + index++) {
8042 + (void) ql_isr_aif((caddr_t)ha,
8043 + (caddr_t)((uintptr_t)(index + 1)));
8044 + }
8045 + }
7637 8046
8047 + ADAPTER_STATE_LOCK(ha);
8048 + ha->flags &= ~POLL_INTR;
8049 + ADAPTER_STATE_UNLOCK(ha);
8050 +
7638 8051 /*
7639 8052 * Call task thread function in case the
7640 8053 * daemon is not running.
7641 8054 */
7642 8055 TASK_DAEMON_LOCK(ha);
7643 8056
7644 8057 if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
7645 8058 QL_TASK_PENDING(ha)) {
7646 - ha->task_daemon_flags |= TASK_THREAD_CALLED;
7647 8059 ql_task_thread(ha);
7648 - ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7649 8060 }
7650 8061
7651 8062 TASK_DAEMON_UNLOCK(ha);
7652 8063 }
7653 8064
7654 - if (msecs_left < 10) {
7655 - rval = QL_FUNCTION_TIMEOUT;
7656 - break;
8065 + if (msecs_left == 0) {
8066 + if (rval == QL_SUCCESS) {
8067 + EL(ha, "timeout\n");
8068 + rval = QL_FUNCTION_TIMEOUT;
8069 + if (ql_abort_io(ha, sp) == QL_SUCCESS) {
8070 + sp->pkt->pkt_reason = CS_ABORTED;
8071 + sp->cmd.next = NULL;
8072 + ql_done(&sp->cmd, B_FALSE);
8073 + break;
8074 + }
8075 + sp->flags |= SRB_COMMAND_TIMEOUT;
8076 + EL(ha, "abort failed, isp_abort_needed\n");
8077 + ql_awaken_task_daemon(ha, NULL,
8078 + ISP_ABORT_NEEDED, 0);
8079 + msecs_left = 30 * 100;
8080 + } else {
8081 + break;
8082 + }
7657 8083 }
7658 8084
7659 8085 /*
7660 8086 * Polling interval is 10 milli seconds; Increasing
7661 8087 * the polling interval to seconds since disk IO
7662 8088 * timeout values are ~60 seconds is tempting enough,
7663 8089 * but CPR dump time increases, and so will the crash
7664 8090 * dump time; Don't toy with the settings without due
7665 8091 * consideration for all the scenarios that will be
7666 8092 * impacted.
7667 8093 */
7668 8094 ql_delay(ha, 10000);
7669 8095 msecs_left -= 10;
7670 8096 }
7671 8097
7672 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8098 + QL_PRINT_3(ha, "done\n");
7673 8099
7674 8100 return (rval);
7675 8101 }
7676 8102
7677 8103 /*
7678 8104 * ql_next
7679 8105 * Retrieve and process next job in the device queue.
7680 8106 *
7681 8107 * Input:
7682 8108 * ha: adapter state pointer.
7683 8109 * lq: LUN queue pointer.
7684 8110 * DEVICE_QUEUE_LOCK must be already obtained.
7685 8111 *
7686 8112 * Output:
7687 8113 * Releases DEVICE_QUEUE_LOCK upon exit.
7688 8114 *
7689 8115 * Context:
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
7690 8116 * Interrupt or Kernel context, no mailbox commands allowed.
7691 8117 */
7692 8118 void
7693 8119 ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
7694 8120 {
7695 8121 ql_srb_t *sp;
7696 8122 ql_link_t *link;
7697 8123 ql_tgt_t *tq = lq->target_queue;
7698 8124 ql_adapter_state_t *ha = vha->pha;
7699 8125
7700 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8126 + QL_PRINT_3(ha, "started\n");
7701 8127
7702 8128 if (ddi_in_panic()) {
7703 8129 DEVICE_QUEUE_UNLOCK(tq);
7704 - QL_PRINT_3(CE_CONT, "(%d): panic/active exit\n",
7705 - ha->instance);
8130 + QL_PRINT_3(ha, "panic/active exit\n");
7706 8131 return;
7707 8132 }
7708 8133
7709 8134 while ((link = lq->cmd.first) != NULL) {
7710 8135 sp = link->base_address;
7711 8136
7712 8137 /* Exit if can not start commands. */
7713 8138 if (DRIVER_SUSPENDED(ha) ||
7714 8139 (ha->flags & ONLINE) == 0 ||
7715 8140 !VALID_DEVICE_ID(ha, tq->loop_id) ||
7716 - sp->flags & SRB_ABORT ||
7717 8141 tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
7718 8142 TQF_QUEUE_SUSPENDED)) {
7719 8143 EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
7720 - "haf=%xh, loop_id=%xh\n", tq->d_id.b24,
8144 + "haf=%xh, loop_id=%xh sp=%ph\n", tq->d_id.b24,
7721 8145 ha->task_daemon_flags, tq->flags, sp->flags,
7722 - ha->flags, tq->loop_id);
8146 + ha->flags, tq->loop_id, sp);
7723 8147 break;
7724 8148 }
7725 8149
7726 8150 /*
7727 8151 * Find out the LUN number for untagged command use.
7728 8152 * If there is an untagged command pending for the LUN,
7729 8153 * we would not submit another untagged command
7730 8154 * or if reached LUN execution throttle.
7731 8155 */
7732 8156 if (sp->flags & SRB_FCP_CMD_PKT) {
7733 8157 if (lq->flags & LQF_UNTAGGED_PENDING ||
7734 8158 lq->lun_outcnt >= ha->execution_throttle) {
7735 - QL_PRINT_8(CE_CONT, "(%d): break, d_id=%xh, "
7736 - "lf=%xh, lun_outcnt=%xh\n", ha->instance,
8159 + QL_PRINT_8(ha, "break, d_id=%xh, "
8160 + "lf=%xh, lun_outcnt=%xh\n",
7737 8161 tq->d_id.b24, lq->flags, lq->lun_outcnt);
7738 8162 break;
7739 8163 }
7740 8164 if (sp->fcp->fcp_cntl.cntl_qtype ==
7741 8165 FCP_QTYPE_UNTAGGED) {
7742 8166 /*
7743 8167 * Set the untagged-flag for the LUN
7744 8168 * so that no more untagged commands
7745 8169 * can be submitted for this LUN.
7746 8170 */
7747 8171 lq->flags |= LQF_UNTAGGED_PENDING;
7748 8172 }
7749 8173
7750 8174 /* Count command as sent. */
7751 8175 lq->lun_outcnt++;
7752 8176 }
7753 8177
7754 8178 /* Remove srb from device queue. */
7755 8179 ql_remove_link(&lq->cmd, &sp->cmd);
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
7756 8180 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7757 8181
7758 8182 tq->outcnt++;
7759 8183
7760 8184 ql_start_iocb(vha, sp);
7761 8185 }
7762 8186
7763 8187 /* Release device queue lock. */
7764 8188 DEVICE_QUEUE_UNLOCK(tq);
7765 8189
7766 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8190 + QL_PRINT_3(ha, "done\n");
7767 8191 }
7768 8192
7769 8193 /*
7770 8194 * ql_done
7771 8195 * Process completed commands.
7772 8196 *
7773 8197 * Input:
7774 8198 * link: first command link in chain.
8199 + * cmplt: do command complete call back.
7775 8200 *
7776 8201 * Context:
7777 8202 * Interrupt or Kernel context, no mailbox commands allowed.
7778 8203 */
7779 8204 void
7780 -ql_done(ql_link_t *link)
8205 +ql_done(ql_link_t *link, boolean_t cmplt)
7781 8206 {
7782 8207 ql_adapter_state_t *ha;
7783 8208 ql_link_t *next_link;
7784 8209 ql_srb_t *sp;
7785 8210 ql_tgt_t *tq;
7786 8211 ql_lun_t *lq;
8212 + uint64_t set_flags;
7787 8213
7788 - QL_PRINT_3(CE_CONT, "started\n");
8214 + QL_PRINT_3(NULL, "started\n");
7789 8215
7790 8216 for (; link != NULL; link = next_link) {
7791 8217 next_link = link->next;
7792 8218 sp = link->base_address;
8219 + link->prev = link->next = NULL;
8220 + link->head = NULL;
7793 8221 ha = sp->ha;
8222 + set_flags = 0;
7794 8223
7795 8224 if (sp->flags & SRB_UB_CALLBACK) {
7796 8225 QL_UB_LOCK(ha);
7797 8226 if (sp->flags & SRB_UB_IN_ISP) {
7798 8227 if (ha->ub_outcnt != 0) {
7799 8228 ha->ub_outcnt--;
7800 8229 }
7801 - QL_UB_UNLOCK(ha);
7802 - ql_isp_rcvbuf(ha);
7803 - QL_UB_LOCK(ha);
8230 + if (ha->flags & IP_ENABLED) {
8231 + set_flags |= NEED_UNSOLICITED_BUFFERS;
8232 + }
7804 8233 }
7805 8234 QL_UB_UNLOCK(ha);
7806 - ql_awaken_task_daemon(ha, sp, 0, 0);
8235 + ql_awaken_task_daemon(ha, sp, set_flags, 0);
7807 8236 } else {
7808 8237 /* Free outstanding command slot. */
8238 + INTR_LOCK(ha);
7809 8239 if (sp->handle != 0) {
7810 - ha->outstanding_cmds[
8240 + EL(ha, "free sp=%ph, sp->hdl=%xh\n",
8241 + (void *)sp, sp->handle);
8242 + ha->pha->outstanding_cmds[
7811 8243 sp->handle & OSC_INDEX_MASK] = NULL;
7812 8244 sp->handle = 0;
7813 8245 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
7814 8246 }
8247 + INTR_UNLOCK(ha);
7815 8248
7816 8249 /* Acquire device queue lock. */
7817 8250 lq = sp->lun_queue;
7818 8251 tq = lq->target_queue;
7819 8252 DEVICE_QUEUE_LOCK(tq);
7820 8253
7821 8254 /* Decrement outstanding commands on device. */
7822 8255 if (tq->outcnt != 0) {
7823 8256 tq->outcnt--;
7824 8257 }
7825 8258
7826 8259 if (sp->flags & SRB_FCP_CMD_PKT) {
7827 8260 if (sp->fcp->fcp_cntl.cntl_qtype ==
7828 8261 FCP_QTYPE_UNTAGGED) {
7829 8262 /*
7830 8263 * Clear the flag for this LUN so that
7831 8264 * untagged commands can be submitted
7832 8265 * for it.
7833 8266 */
7834 8267 lq->flags &= ~LQF_UNTAGGED_PENDING;
7835 8268 }
7836 8269
7837 8270 if (lq->lun_outcnt != 0) {
7838 8271 lq->lun_outcnt--;
7839 8272 }
7840 8273 }
7841 8274
7842 8275 /* Reset port down retry count on good completion. */
7843 8276 if (sp->pkt->pkt_reason == CS_COMPLETE) {
7844 8277 tq->port_down_retry_count =
7845 8278 ha->port_down_retry_count;
7846 8279 tq->qfull_retry_count = ha->qfull_retry_count;
7847 8280 }
7848 8281
7849 8282
7850 8283 /* Alter aborted status for fast timeout feature */
7851 8284 if (CFG_IST(ha, CFG_FAST_TIMEOUT) &&
7852 8285 (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7853 8286 !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7854 8287 sp->flags & SRB_RETRY &&
7855 8288 (sp->flags & SRB_WATCHDOG_ENABLED &&
7856 8289 sp->wdg_q_time > 1)) {
7857 8290 EL(ha, "fast abort modify change\n");
7858 8291 sp->flags &= ~(SRB_RETRY);
7859 8292 sp->pkt->pkt_reason = CS_TIMEOUT;
7860 8293 }
7861 8294
7862 8295 /* Place request back on top of target command queue */
7863 8296 if ((sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7864 8297 !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7865 8298 sp->flags & SRB_RETRY &&
7866 8299 (sp->flags & SRB_WATCHDOG_ENABLED &&
|
↓ open down ↓ |
42 lines elided |
↑ open up ↑ |
7867 8300 sp->wdg_q_time > 1)) {
7868 8301 sp->flags &= ~(SRB_ISP_STARTED |
7869 8302 SRB_ISP_COMPLETED | SRB_RETRY);
7870 8303
7871 8304 /* Reset watchdog timer */
7872 8305 sp->wdg_q_time = sp->init_wdg_q_time;
7873 8306
7874 8307 /* Issue marker command on reset status. */
7875 8308 if (!(ha->task_daemon_flags & LOOP_DOWN) &&
7876 8309 (sp->pkt->pkt_reason == CS_RESET ||
7877 - (CFG_IST(ha, CFG_CTRL_24258081) &&
8310 + (CFG_IST(ha, CFG_ISP_FW_TYPE_2) &&
7878 8311 sp->pkt->pkt_reason == CS_ABORTED))) {
7879 8312 (void) ql_marker(ha, tq->loop_id, 0,
7880 8313 MK_SYNC_ID);
7881 8314 }
7882 8315
7883 8316 ql_add_link_t(&lq->cmd, &sp->cmd);
7884 8317 sp->flags |= SRB_IN_DEVICE_QUEUE;
7885 8318 ql_next(ha, lq);
7886 8319 } else {
7887 8320 /* Remove command from watchdog queue. */
7888 8321 if (sp->flags & SRB_WATCHDOG_ENABLED) {
7889 8322 ql_remove_link(&tq->wdg, &sp->wdg);
7890 8323 sp->flags &= ~SRB_WATCHDOG_ENABLED;
7891 8324 }
7892 8325
7893 8326 if (lq->cmd.first != NULL) {
7894 8327 ql_next(ha, lq);
7895 8328 } else {
7896 8329 /* Release LU queue specific lock. */
7897 8330 DEVICE_QUEUE_UNLOCK(tq);
7898 8331 if (ha->pha->pending_cmds.first !=
7899 8332 NULL) {
7900 8333 ql_start_iocb(ha, NULL);
7901 8334 }
7902 8335 }
7903 8336
7904 8337 /* Sync buffers if required. */
7905 8338 if (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT)) {
7906 8339 (void) ddi_dma_sync(
7907 8340 sp->pkt->pkt_resp_dma,
7908 8341 0, 0, DDI_DMA_SYNC_FORCPU);
|
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
7909 8342 }
7910 8343
7911 8344 /* Map ISP completion codes. */
7912 8345 sp->pkt->pkt_expln = FC_EXPLN_NONE;
7913 8346 sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
7914 8347 switch (sp->pkt->pkt_reason) {
7915 8348 case CS_COMPLETE:
7916 8349 sp->pkt->pkt_state = FC_PKT_SUCCESS;
7917 8350 break;
7918 8351 case CS_RESET:
7919 - /* Issue marker command. */
7920 - if (!(ha->task_daemon_flags &
7921 - LOOP_DOWN)) {
7922 - (void) ql_marker(ha,
7923 - tq->loop_id, 0,
7924 - MK_SYNC_ID);
7925 - }
7926 8352 sp->pkt->pkt_state =
7927 8353 FC_PKT_PORT_OFFLINE;
7928 8354 sp->pkt->pkt_reason =
7929 8355 FC_REASON_ABORTED;
7930 8356 break;
7931 8357 case CS_RESOUCE_UNAVAILABLE:
7932 8358 sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
7933 8359 sp->pkt->pkt_reason =
7934 8360 FC_REASON_PKT_BUSY;
7935 8361 break;
7936 8362
7937 8363 case CS_TIMEOUT:
7938 8364 sp->pkt->pkt_state = FC_PKT_TIMEOUT;
7939 8365 sp->pkt->pkt_reason =
7940 8366 FC_REASON_HW_ERROR;
7941 8367 break;
7942 8368 case CS_DATA_OVERRUN:
7943 8369 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7944 8370 sp->pkt->pkt_reason =
7945 8371 FC_REASON_OVERRUN;
7946 8372 break;
7947 8373 case CS_PORT_UNAVAILABLE:
7948 8374 case CS_PORT_LOGGED_OUT:
7949 8375 sp->pkt->pkt_state =
7950 8376 FC_PKT_PORT_OFFLINE;
7951 8377 sp->pkt->pkt_reason =
7952 8378 FC_REASON_LOGIN_REQUIRED;
7953 8379 ql_send_logo(ha, tq, NULL);
7954 8380 break;
7955 8381 case CS_PORT_CONFIG_CHG:
7956 8382 sp->pkt->pkt_state =
7957 8383 FC_PKT_PORT_OFFLINE;
7958 8384 sp->pkt->pkt_reason =
7959 8385 FC_REASON_OFFLINE;
7960 8386 break;
7961 8387 case CS_QUEUE_FULL:
7962 8388 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7963 8389 sp->pkt->pkt_reason = FC_REASON_QFULL;
7964 8390 break;
7965 8391
7966 8392 case CS_ABORTED:
7967 8393 DEVICE_QUEUE_LOCK(tq);
7968 8394 if (tq->flags & (TQF_RSCN_RCVD |
7969 8395 TQF_NEED_AUTHENTICATION)) {
7970 8396 sp->pkt->pkt_state =
7971 8397 FC_PKT_PORT_OFFLINE;
7972 8398 sp->pkt->pkt_reason =
7973 8399 FC_REASON_LOGIN_REQUIRED;
|
↓ open down ↓ |
38 lines elided |
↑ open up ↑ |
7974 8400 } else {
7975 8401 sp->pkt->pkt_state =
7976 8402 FC_PKT_LOCAL_RJT;
7977 8403 sp->pkt->pkt_reason =
7978 8404 FC_REASON_ABORTED;
7979 8405 }
7980 8406 DEVICE_QUEUE_UNLOCK(tq);
7981 8407 break;
7982 8408
7983 8409 case CS_TRANSPORT:
8410 + case CS_DEV_NOT_READY:
7984 8411 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7985 8412 sp->pkt->pkt_reason =
7986 8413 FC_PKT_TRAN_ERROR;
7987 8414 break;
7988 8415
7989 8416 case CS_DATA_UNDERRUN:
7990 8417 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7991 8418 sp->pkt->pkt_reason =
7992 8419 FC_REASON_UNDERRUN;
7993 8420 break;
7994 8421 case CS_DMA_ERROR:
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
7995 8422 case CS_BAD_PAYLOAD:
7996 8423 case CS_UNKNOWN:
7997 8424 case CS_CMD_FAILED:
7998 8425 default:
7999 8426 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
8000 8427 sp->pkt->pkt_reason =
8001 8428 FC_REASON_HW_ERROR;
8002 8429 break;
8003 8430 }
8004 8431
8432 + (void) qlc_fm_check_pkt_dma_handle(ha, sp);
8433 +
8005 8434 /* Now call the pkt completion callback */
8006 8435 if (sp->flags & SRB_POLL) {
8007 8436 sp->flags &= ~SRB_POLL;
8008 - } else if (sp->pkt->pkt_comp) {
8009 - if (sp->pkt->pkt_tran_flags &
8010 - FC_TRAN_IMMEDIATE_CB) {
8011 - (*sp->pkt->pkt_comp)(sp->pkt);
8012 - } else {
8013 - ql_awaken_task_daemon(ha, sp,
8014 - 0, 0);
8015 - }
8437 + } else if (cmplt == B_TRUE &&
8438 + sp->pkt->pkt_comp) {
8439 + (sp->pkt->pkt_comp)(sp->pkt);
8440 + } else {
8441 + ql_io_comp(sp);
8016 8442 }
8017 8443 }
8018 8444 }
8019 8445 }
8020 8446
8021 - QL_PRINT_3(CE_CONT, "done\n");
8447 + QL_PRINT_3(ha, "done\n");
8022 8448 }
8023 8449
8024 8450 /*
8025 8451 * ql_awaken_task_daemon
8026 8452 * Adds command completion callback to callback queue and/or
8027 8453 * awakens task daemon thread.
8028 8454 *
8029 8455 * Input:
8030 8456 * ha: adapter state pointer.
8031 8457 * sp: srb pointer.
8032 8458 * set_flags: task daemon flags to set.
8033 8459 * reset_flags: task daemon flags to reset.
8034 8460 *
8035 8461 * Context:
8036 8462 * Interrupt or Kernel context, no mailbox commands allowed.
8037 8463 */
8038 8464 void
8039 8465 ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
8040 - uint32_t set_flags, uint32_t reset_flags)
8466 + uint64_t set_flags, uint64_t reset_flags)
8041 8467 {
8042 8468 ql_adapter_state_t *ha = vha->pha;
8043 8469
8044 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8470 + QL_PRINT_3(ha, "started, sp=%p set_flags=%llx reset_flags=%llx\n",
8471 + sp, set_flags, reset_flags);
8045 8472
8046 8473 /* Acquire task daemon lock. */
8047 8474 TASK_DAEMON_LOCK(ha);
8048 8475
8049 - if (set_flags & ISP_ABORT_NEEDED) {
8050 - if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
8051 - set_flags &= ~ISP_ABORT_NEEDED;
8052 - }
8476 + if (set_flags) {
8477 + ha->task_daemon_flags |= set_flags;
8053 8478 }
8479 + if (reset_flags) {
8480 + ha->task_daemon_flags &= ~reset_flags;
8481 + }
8054 8482
8055 - ha->task_daemon_flags |= set_flags;
8056 - ha->task_daemon_flags &= ~reset_flags;
8483 + if (!(ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG)) {
8484 + EL(ha, "done, not alive dtf=%xh\n", ha->task_daemon_flags);
8485 + TASK_DAEMON_UNLOCK(ha);
8486 + return;
8487 + }
8057 8488
8058 - if (QL_DAEMON_SUSPENDED(ha)) {
8059 - if (sp != NULL) {
8060 - TASK_DAEMON_UNLOCK(ha);
8061 -
8062 - /* Do callback. */
8063 - if (sp->flags & SRB_UB_CALLBACK) {
8064 - ql_unsol_callback(sp);
8065 - } else {
8066 - (*sp->pkt->pkt_comp)(sp->pkt);
8067 - }
8489 + if (sp != NULL) {
8490 + if (sp->flags & SRB_UB_CALLBACK) {
8491 + ql_add_link_b(&ha->unsol_callback_queue, &sp->cmd);
8068 8492 } else {
8069 - if (!(curthread->t_flag & T_INTR_THREAD) &&
8070 - !(ha->task_daemon_flags & TASK_THREAD_CALLED)) {
8071 - ha->task_daemon_flags |= TASK_THREAD_CALLED;
8072 - ql_task_thread(ha);
8073 - ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
8074 - }
8075 -
8076 - TASK_DAEMON_UNLOCK(ha);
8493 + EL(ha, "sp=%p, spf=%xh is not SRB_UB_CALLBACK",
8494 + sp->flags);
8077 8495 }
8078 - } else {
8079 - if (sp != NULL) {
8080 - ql_add_link_b(&ha->callback_queue, &sp->cmd);
8081 - }
8496 + }
8082 8497
8083 - if (ha->task_daemon_flags & TASK_DAEMON_SLEEPING_FLG) {
8084 - cv_broadcast(&ha->cv_task_daemon);
8085 - }
8086 - TASK_DAEMON_UNLOCK(ha);
8498 + if (!ha->driver_thread_awake) {
8499 + QL_PRINT_3(ha, "driver_thread_awake\n");
8500 + cv_broadcast(&ha->cv_task_daemon);
8087 8501 }
8088 8502
8089 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8503 + TASK_DAEMON_UNLOCK(ha);
8504 +
8505 + QL_PRINT_3(ha, "done\n");
8090 8506 }
8091 8507
8092 8508 /*
8093 8509 * ql_task_daemon
8094 8510 * Thread that is awaken by the driver when a
8095 8511 * background needs to be done.
8096 8512 *
8097 8513 * Input:
8098 8514 * arg = adapter state pointer.
8099 8515 *
8100 8516 * Context:
8101 8517 * Kernel context.
8102 8518 */
8103 8519 static void
8104 8520 ql_task_daemon(void *arg)
8105 8521 {
8106 8522 ql_adapter_state_t *ha = (void *)arg;
8107 8523
8108 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8524 + QL_PRINT_3(ha, "started\n");
8109 8525
8110 - CALLB_CPR_INIT(&ha->cprinfo, &ha->task_daemon_mutex, callb_generic_cpr,
8111 - "ql_task_daemon");
8112 -
8113 8526 /* Acquire task daemon lock. */
8114 8527 TASK_DAEMON_LOCK(ha);
8115 8528
8116 - ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
8117 -
8118 8529 while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
8119 8530 ql_task_thread(ha);
8120 8531
8121 - QL_PRINT_3(CE_CONT, "(%d): Going to sleep\n", ha->instance);
8122 -
8123 8532 /*
8124 8533 * Before we wait on the conditional variable, we
8125 8534 * need to check if STOP_FLG is set for us to terminate
8126 8535 */
8127 8536 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
8128 8537 break;
8129 8538 }
8130 8539
8131 - /*LINTED [Solaris CALLB_CPR_SAFE_BEGIN Lint error]*/
8132 - CALLB_CPR_SAFE_BEGIN(&ha->cprinfo);
8133 -
8540 + QL_PRINT_3(ha, "Going to sleep\n");
8134 8541 ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
8135 8542
8136 8543 /* If killed, stop task daemon */
8137 8544 if (cv_wait_sig(&ha->cv_task_daemon,
8138 8545 &ha->task_daemon_mutex) == 0) {
8139 - ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
8546 + QL_PRINT_10(ha, "killed\n");
8547 + break;
8140 8548 }
8141 8549
8550 + QL_PRINT_3(ha, "Awakened\n");
8142 8551 ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
8143 -
8144 - /*LINTED [Solaris CALLB_CPR_SAFE_END Lint error]*/
8145 - CALLB_CPR_SAFE_END(&ha->cprinfo, &ha->task_daemon_mutex);
8146 -
8147 - QL_PRINT_3(CE_CONT, "(%d): Awakened\n", ha->instance);
8148 8552 }
8149 8553
8150 - ha->task_daemon_flags &= ~(TASK_DAEMON_STOP_FLG |
8554 + ha->task_daemon_flags &= ~(TASK_DAEMON_SLEEPING_FLG |
8151 8555 TASK_DAEMON_ALIVE_FLG);
8152 8556
8153 - /*LINTED [Solaris CALLB_CPR_EXIT Lint error]*/
8154 - CALLB_CPR_EXIT(&ha->cprinfo);
8557 + TASK_DAEMON_UNLOCK(ha);
8155 8558
8156 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8157 -
8158 - thread_exit();
8559 + QL_PRINT_3(ha, "done\n");
8159 8560 }
8160 8561
8161 8562 /*
8162 8563 * ql_task_thread
8163 8564 * Thread run by daemon.
8164 8565 *
8165 8566 * Input:
8166 8567 * ha = adapter state pointer.
8167 8568 * TASK_DAEMON_LOCK must be acquired prior to call.
8168 8569 *
8169 8570 * Context:
8170 8571 * Kernel context.
8171 8572 */
8172 8573 static void
8173 8574 ql_task_thread(ql_adapter_state_t *ha)
8174 8575 {
8175 - int loop_again;
8576 + boolean_t loop_again;
8176 8577 ql_srb_t *sp;
8177 - ql_head_t *head;
8178 8578 ql_link_t *link;
8179 8579 caddr_t msg;
8180 8580 ql_adapter_state_t *vha;
8181 8581
8582 + ha->driver_thread_awake++;
8182 8583 do {
8183 - QL_PRINT_3(CE_CONT, "(%d): task_daemon_flags=%xh\n",
8184 - ha->instance, ha->task_daemon_flags);
8584 + loop_again = B_FALSE;
8185 8585
8186 - loop_again = FALSE;
8586 + if (ha->sf != ha->flags ||
8587 + (ha->task_daemon_flags & ~DTF_EL_MSG_SKIP_FLGS) != ha->df ||
8588 + ha->cf != ha->cfg_flags) {
8589 + ha->sf = ha->flags;
8590 + ha->df = ha->task_daemon_flags & ~DTF_EL_MSG_SKIP_FLGS;
8591 + ha->cf = ha->cfg_flags;
8592 + EL(ha, "df=%xh, sf=%xh, cf=%xh\n",
8593 + ha->df, ha->sf, ha->cf);
8594 + }
8187 8595
8188 8596 QL_PM_LOCK(ha);
8189 8597 if (ha->power_level != PM_LEVEL_D0) {
8190 8598 QL_PM_UNLOCK(ha);
8191 - ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8599 + ha->task_daemon_flags |= DRIVER_STALL |
8600 + TASK_DAEMON_STALLED_FLG;
8192 8601 break;
8193 8602 }
8194 8603 QL_PM_UNLOCK(ha);
8195 8604
8196 - /* IDC event. */
8197 - if (ha->task_daemon_flags & IDC_EVENT) {
8198 - ha->task_daemon_flags &= ~IDC_EVENT;
8605 + if (ha->flags & ADAPTER_SUSPENDED) {
8606 + ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8607 + break;
8608 + }
8609 +
8610 + /* Handle FW IDC events. */
8611 + while (ha->flags & (IDC_STALL_NEEDED | IDC_RESTART_NEEDED |
8612 + IDC_ACK_NEEDED)) {
8199 8613 TASK_DAEMON_UNLOCK(ha);
8200 - ql_process_idc_event(ha);
8614 + ql_idc(ha);
8201 8615 TASK_DAEMON_LOCK(ha);
8202 - loop_again = TRUE;
8616 + loop_again = B_TRUE;
8203 8617 }
8204 8618
8205 - if (ha->flags & ADAPTER_SUSPENDED || ha->task_daemon_flags &
8619 + if (ha->task_daemon_flags &
8206 8620 (TASK_DAEMON_STOP_FLG | DRIVER_STALL) ||
8207 - (ha->flags & ONLINE) == 0) {
8621 + !(ha->flags & ONLINE)) {
8208 8622 ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8209 8623 break;
8210 8624 }
8211 8625 ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
8212 8626
8213 - if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8627 + /* Store error log. */
8628 + if (ha->errlog[0] != 0 &&
8629 + !(ha->task_daemon_flags & ISP_ABORT_NEEDED)) {
8214 8630 TASK_DAEMON_UNLOCK(ha);
8215 - if (ha->log_parity_pause == B_TRUE) {
8216 - (void) ql_flash_errlog(ha,
8217 - FLASH_ERRLOG_PARITY_ERR, 0,
8218 - MSW(ha->parity_stat_err),
8219 - LSW(ha->parity_stat_err));
8220 - ha->log_parity_pause = B_FALSE;
8221 - }
8222 - ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
8631 + (void) ql_flash_errlog(ha, ha->errlog[0],
8632 + ha->errlog[1], ha->errlog[2], ha->errlog[3]);
8633 + ha->errlog[0] = 0;
8223 8634 TASK_DAEMON_LOCK(ha);
8224 - loop_again = TRUE;
8635 + loop_again = B_TRUE;
8225 8636 }
8226 8637
8227 8638 /* Idle Check. */
8228 8639 if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
8229 8640 ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
8230 - if (!(ha->task_daemon_flags & QL_SUSPENDED)) {
8641 + if (!DRIVER_SUSPENDED(ha)) {
8231 8642 TASK_DAEMON_UNLOCK(ha);
8232 8643 ql_idle_check(ha);
8233 8644 TASK_DAEMON_LOCK(ha);
8234 - loop_again = TRUE;
8645 + loop_again = B_TRUE;
8235 8646 }
8236 8647 }
8237 8648
8238 8649 /* Crystal+ port#0 bypass transition */
8239 8650 if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
8240 8651 ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
8241 8652 TASK_DAEMON_UNLOCK(ha);
8242 8653 (void) ql_initiate_lip(ha);
8243 8654 TASK_DAEMON_LOCK(ha);
8244 - loop_again = TRUE;
8655 + loop_again = B_TRUE;
8245 8656 }
8246 8657
8247 8658 /* Abort queues needed. */
8248 8659 if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
8249 8660 ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
8250 - TASK_DAEMON_UNLOCK(ha);
8251 - ql_abort_queues(ha);
8252 - TASK_DAEMON_LOCK(ha);
8661 + if (ha->flags & ABORT_CMDS_LOOP_DOWN_TMO) {
8662 + TASK_DAEMON_UNLOCK(ha);
8663 + ql_abort_queues(ha);
8664 + TASK_DAEMON_LOCK(ha);
8665 + loop_again = B_TRUE;
8666 + }
8253 8667 }
8254 8668
8255 8669 /* Not suspended, awaken waiting routines. */
8256 - if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8670 + if (!DRIVER_SUSPENDED(ha) &&
8257 8671 ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
8258 8672 ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
8259 8673 cv_broadcast(&ha->cv_dr_suspended);
8260 - loop_again = TRUE;
8674 + loop_again = B_TRUE;
8261 8675 }
8262 8676
8263 8677 /* Handle RSCN changes. */
8264 8678 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8265 8679 if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
8266 8680 vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
8267 8681 TASK_DAEMON_UNLOCK(ha);
8268 8682 (void) ql_handle_rscn_update(vha);
8269 8683 TASK_DAEMON_LOCK(ha);
8270 - loop_again = TRUE;
8684 + loop_again = B_TRUE;
8271 8685 }
8272 8686 }
8273 8687
8274 8688 /* Handle state changes. */
8275 8689 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8276 8690 if (vha->task_daemon_flags & FC_STATE_CHANGE &&
8277 8691 !(ha->task_daemon_flags &
8278 8692 TASK_DAEMON_POWERING_DOWN)) {
8279 8693 /* Report state change. */
8280 8694 EL(vha, "state change = %xh\n", vha->state);
8281 8695 vha->task_daemon_flags &= ~FC_STATE_CHANGE;
8282 8696
8283 8697 if (vha->task_daemon_flags &
8284 8698 COMMAND_WAIT_NEEDED) {
8285 8699 vha->task_daemon_flags &=
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
8286 8700 ~COMMAND_WAIT_NEEDED;
8287 8701 if (!(ha->task_daemon_flags &
8288 8702 COMMAND_WAIT_ACTIVE)) {
8289 8703 ha->task_daemon_flags |=
8290 8704 COMMAND_WAIT_ACTIVE;
8291 8705 TASK_DAEMON_UNLOCK(ha);
8292 8706 ql_cmd_wait(ha);
8293 8707 TASK_DAEMON_LOCK(ha);
8294 8708 ha->task_daemon_flags &=
8295 8709 ~COMMAND_WAIT_ACTIVE;
8710 + loop_again = B_TRUE;
8296 8711 }
8297 8712 }
8298 8713
8299 8714 msg = NULL;
8300 8715 if (FC_PORT_STATE_MASK(vha->state) ==
8301 8716 FC_STATE_OFFLINE) {
8302 8717 if (vha->task_daemon_flags &
8303 8718 STATE_ONLINE) {
8304 8719 if (ha->topology &
8305 8720 QL_LOOP_CONNECTION) {
8306 8721 msg = "Loop OFFLINE";
8307 8722 } else {
8308 8723 msg = "Link OFFLINE";
8309 8724 }
8310 8725 }
8311 8726 vha->task_daemon_flags &=
8312 8727 ~STATE_ONLINE;
8313 8728 } else if (FC_PORT_STATE_MASK(vha->state) ==
8314 8729 FC_STATE_LOOP) {
8315 8730 if (!(vha->task_daemon_flags &
8316 8731 STATE_ONLINE)) {
8317 8732 msg = "Loop ONLINE";
8318 8733 }
8319 8734 vha->task_daemon_flags |= STATE_ONLINE;
8320 8735 } else if (FC_PORT_STATE_MASK(vha->state) ==
8321 8736 FC_STATE_ONLINE) {
8322 8737 if (!(vha->task_daemon_flags &
8323 8738 STATE_ONLINE)) {
8324 8739 msg = "Link ONLINE";
8325 8740 }
8326 8741 vha->task_daemon_flags |= STATE_ONLINE;
8327 8742 } else {
|
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
8328 8743 msg = "Unknown Link state";
8329 8744 }
8330 8745
8331 8746 if (msg != NULL) {
8332 8747 cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8333 8748 "%s", QL_NAME, ha->instance,
8334 8749 vha->vp_index, msg);
8335 8750 }
8336 8751
8337 8752 if (vha->flags & FCA_BOUND) {
8338 - QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8339 - "cb state=%xh\n", ha->instance,
8340 - vha->vp_index, vha->state);
8753 + QL_PRINT_10(vha, "statec_"
8754 + "cb state=%xh\n",
8755 + vha->state);
8341 8756 TASK_DAEMON_UNLOCK(ha);
8342 8757 (vha->bind_info.port_statec_cb)
8343 8758 (vha->bind_info.port_handle,
8344 8759 vha->state);
8345 8760 TASK_DAEMON_LOCK(ha);
8761 + loop_again = B_TRUE;
8346 8762 }
8347 - loop_again = TRUE;
8348 8763 }
8349 8764 }
8350 8765
8351 - if (ha->task_daemon_flags & LIP_RESET_PENDING &&
8352 - !(ha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN)) {
8353 - EL(ha, "processing LIP reset\n");
8354 - ha->task_daemon_flags &= ~LIP_RESET_PENDING;
8355 - TASK_DAEMON_UNLOCK(ha);
8356 - for (vha = ha; vha != NULL; vha = vha->vp_next) {
8357 - if (vha->flags & FCA_BOUND) {
8358 - QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8359 - "cb reset\n", ha->instance,
8360 - vha->vp_index);
8361 - (vha->bind_info.port_statec_cb)
8362 - (vha->bind_info.port_handle,
8363 - FC_STATE_TARGET_PORT_RESET);
8364 - }
8365 - }
8366 - TASK_DAEMON_LOCK(ha);
8367 - loop_again = TRUE;
8368 - }
8369 -
8370 - if (QL_IS_SET(ha->task_daemon_flags, NEED_UNSOLICITED_BUFFERS |
8371 - FIRMWARE_UP)) {
8766 + if (ha->task_daemon_flags & NEED_UNSOLICITED_BUFFERS &&
8767 + ha->task_daemon_flags & FIRMWARE_UP) {
8372 8768 /*
8373 8769 * The firmware needs more unsolicited
8374 8770 * buffers. We cannot allocate any new
8375 8771 * buffers unless the ULP module requests
8376 8772 * for new buffers. All we can do here is
8377 8773 * to give received buffers from the pool
8378 8774 * that is already allocated
8379 8775 */
8380 8776 ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8381 8777 TASK_DAEMON_UNLOCK(ha);
8382 8778 ql_isp_rcvbuf(ha);
8383 8779 TASK_DAEMON_LOCK(ha);
8384 - loop_again = TRUE;
8780 + loop_again = B_TRUE;
8385 8781 }
8386 8782
8783 + if (ha->task_daemon_flags & WATCHDOG_NEEDED) {
8784 + ha->task_daemon_flags &= ~WATCHDOG_NEEDED;
8785 + TASK_DAEMON_UNLOCK(ha);
8786 + ql_watchdog(ha);
8787 + TASK_DAEMON_LOCK(ha);
8788 + loop_again = B_TRUE;
8789 + }
8790 +
8387 8791 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8388 8792 TASK_DAEMON_UNLOCK(ha);
8389 8793 (void) ql_abort_isp(ha);
8390 8794 TASK_DAEMON_LOCK(ha);
8391 - loop_again = TRUE;
8795 + loop_again = B_TRUE;
8392 8796 }
8393 8797
8394 - if (!(ha->task_daemon_flags & (LOOP_DOWN | DRIVER_STALL |
8395 - COMMAND_WAIT_NEEDED))) {
8396 - if (QL_IS_SET(ha->task_daemon_flags,
8397 - RESET_MARKER_NEEDED | FIRMWARE_UP)) {
8398 - ha->task_daemon_flags &= ~RESET_MARKER_NEEDED;
8399 - if (!(ha->task_daemon_flags & RESET_ACTIVE)) {
8400 - ha->task_daemon_flags |= RESET_ACTIVE;
8798 + if (!(ha->task_daemon_flags & (COMMAND_WAIT_NEEDED |
8799 + ABORT_QUEUES_NEEDED | ISP_ABORT_NEEDED | LOOP_DOWN)) &&
8800 + ha->task_daemon_flags & FIRMWARE_UP) {
8801 + if (ha->task_daemon_flags & MARKER_NEEDED) {
8802 + if (!(ha->task_daemon_flags & MARKER_ACTIVE)) {
8803 + ha->task_daemon_flags |= MARKER_ACTIVE;
8804 + ha->task_daemon_flags &= ~MARKER_NEEDED;
8401 8805 TASK_DAEMON_UNLOCK(ha);
8402 8806 for (vha = ha; vha != NULL;
8403 8807 vha = vha->vp_next) {
8404 - ql_rst_aen(vha);
8808 + (void) ql_marker(vha, 0, 0,
8809 + MK_SYNC_ALL);
8405 8810 }
8406 8811 TASK_DAEMON_LOCK(ha);
8407 - ha->task_daemon_flags &= ~RESET_ACTIVE;
8408 - loop_again = TRUE;
8812 + ha->task_daemon_flags &= ~MARKER_ACTIVE;
8813 + TASK_DAEMON_UNLOCK(ha);
8814 + ql_restart_queues(ha);
8815 + TASK_DAEMON_LOCK(ha);
8816 + loop_again = B_TRUE;
8817 + } else {
8818 + ha->task_daemon_flags &= ~MARKER_NEEDED;
8409 8819 }
8410 8820 }
8411 8821
8412 - if (QL_IS_SET(ha->task_daemon_flags,
8413 - LOOP_RESYNC_NEEDED | FIRMWARE_UP)) {
8822 + if (ha->task_daemon_flags & LOOP_RESYNC_NEEDED) {
8414 8823 if (!(ha->task_daemon_flags &
8415 8824 LOOP_RESYNC_ACTIVE)) {
8416 8825 ha->task_daemon_flags |=
8417 8826 LOOP_RESYNC_ACTIVE;
8418 8827 TASK_DAEMON_UNLOCK(ha);
8419 - (void) ql_loop_resync(ha);
8828 + ql_loop_resync(ha);
8420 8829 TASK_DAEMON_LOCK(ha);
8421 - loop_again = TRUE;
8830 + loop_again = B_TRUE;
8422 8831 }
8423 8832 }
8424 8833 }
8425 8834
8426 8835 /* Port retry needed. */
8427 8836 if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8428 8837 ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8429 8838 ADAPTER_STATE_LOCK(ha);
8430 8839 ha->port_retry_timer = 0;
8431 8840 ADAPTER_STATE_UNLOCK(ha);
8432 8841
8433 8842 TASK_DAEMON_UNLOCK(ha);
8434 8843 ql_restart_queues(ha);
8435 8844 TASK_DAEMON_LOCK(ha);
8436 8845 loop_again = B_TRUE;
8437 8846 }
8438 8847
8439 8848 /* iiDMA setting needed? */
8440 8849 if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8441 8850 ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8442 -
8443 8851 TASK_DAEMON_UNLOCK(ha);
8444 8852 ql_iidma(ha);
8445 8853 TASK_DAEMON_LOCK(ha);
8446 8854 loop_again = B_TRUE;
8447 8855 }
8448 8856
8449 8857 if (ha->task_daemon_flags & SEND_PLOGI) {
8450 8858 ha->task_daemon_flags &= ~SEND_PLOGI;
8451 8859 TASK_DAEMON_UNLOCK(ha);
8452 8860 (void) ql_n_port_plogi(ha);
8453 8861 TASK_DAEMON_LOCK(ha);
8862 + loop_again = B_TRUE;
8454 8863 }
8455 8864
8456 - head = &ha->callback_queue;
8457 - if (head->first != NULL) {
8458 - sp = head->first->base_address;
8865 + if (ha->unsol_callback_queue.first != NULL) {
8866 + sp = (ha->unsol_callback_queue.first)->base_address;
8459 8867 link = &sp->cmd;
8460 -
8461 - /* Dequeue command. */
8462 - ql_remove_link(head, link);
8463 -
8464 - /* Release task daemon lock. */
8868 + ql_remove_link(&ha->unsol_callback_queue, link);
8465 8869 TASK_DAEMON_UNLOCK(ha);
8870 + ql_unsol_callback(sp);
8871 + TASK_DAEMON_LOCK(ha);
8872 + loop_again = B_TRUE;
8873 + }
8466 8874
8467 - /* Do callback. */
8468 - if (sp->flags & SRB_UB_CALLBACK) {
8469 - ql_unsol_callback(sp);
8470 - } else {
8471 - (*sp->pkt->pkt_comp)(sp->pkt);
8472 - }
8473 -
8474 - /* Acquire task daemon lock. */
8875 + if (ha->task_daemon_flags & IDC_POLL_NEEDED) {
8876 + ha->task_daemon_flags &= ~IDC_POLL_NEEDED;
8877 + TASK_DAEMON_UNLOCK(ha);
8878 + ql_8021_idc_poll(ha);
8475 8879 TASK_DAEMON_LOCK(ha);
8880 + loop_again = B_TRUE;
8881 + }
8476 8882
8477 - loop_again = TRUE;
8883 + if (ha->task_daemon_flags & LED_BLINK) {
8884 + ha->task_daemon_flags &= ~LED_BLINK;
8885 + TASK_DAEMON_UNLOCK(ha);
8886 + ql_blink_led(ha);
8887 + TASK_DAEMON_LOCK(ha);
8888 + loop_again = B_TRUE;
8478 8889 }
8479 8890
8480 - } while (loop_again);
8891 + } while (loop_again == B_TRUE);
8892 +
8893 + if (ha->driver_thread_awake) {
8894 + ha->driver_thread_awake--;
8895 + }
8896 + QL_PRINT_3(ha, "done\n");
8481 8897 }
8482 8898
8483 8899 /*
8484 8900 * ql_idle_check
8485 8901 * Test for adapter is alive and well.
8486 8902 *
8487 8903 * Input:
8488 8904 * ha: adapter state pointer.
8489 8905 *
8490 8906 * Context:
8491 8907 * Kernel context.
8492 8908 */
8493 8909 static void
8494 8910 ql_idle_check(ql_adapter_state_t *ha)
8495 8911 {
8496 - ddi_devstate_t state;
8497 8912 int rval;
8498 8913 ql_mbx_data_t mr;
8499 8914
8500 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8915 + QL_PRINT_3(ha, "started\n");
8501 8916
8502 8917 /* Firmware Ready Test. */
8503 8918 rval = ql_get_firmware_state(ha, &mr);
8504 - if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8919 + if (!DRIVER_SUSPENDED(ha) &&
8505 8920 (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8506 8921 EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8507 - state = ddi_get_devstate(ha->dip);
8508 - if (state == DDI_DEVSTATE_UP) {
8509 - /*EMPTY*/
8510 - ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
8511 - DDI_DEVICE_FAULT, "Firmware Ready Test failed");
8512 - }
8513 8922 TASK_DAEMON_LOCK(ha);
8514 8923 if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8515 8924 EL(ha, "fstate_ready, isp_abort_needed\n");
8516 8925 ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8517 8926 }
8518 8927 TASK_DAEMON_UNLOCK(ha);
8519 8928 }
8520 8929
8521 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8930 + QL_PRINT_3(ha, "done\n");
8522 8931 }
8523 8932
8524 8933 /*
8525 8934 * ql_unsol_callback
8526 8935 * Handle unsolicited buffer callbacks.
8527 8936 *
8528 8937 * Input:
8529 8938 * ha = adapter state pointer.
8530 8939 * sp = srb pointer.
8531 8940 *
8532 8941 * Context:
8533 8942 * Kernel context.
8534 8943 */
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
8535 8944 static void
8536 8945 ql_unsol_callback(ql_srb_t *sp)
8537 8946 {
8538 8947 fc_affected_id_t *af;
8539 8948 fc_unsol_buf_t *ubp;
8540 8949 uchar_t r_ctl;
8541 8950 uchar_t ls_code;
8542 8951 ql_tgt_t *tq;
8543 8952 ql_adapter_state_t *ha = sp->ha, *pha = sp->ha->pha;
8544 8953
8545 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8954 + QL_PRINT_3(ha, "started\n");
8546 8955
8547 8956 ubp = ha->ub_array[sp->handle];
8548 8957 r_ctl = ubp->ub_frame.r_ctl;
8549 8958 ls_code = ubp->ub_buffer[0];
8550 8959
8551 8960 if (sp->lun_queue == NULL) {
8552 8961 tq = NULL;
8553 8962 } else {
8554 8963 tq = sp->lun_queue->target_queue;
8555 8964 }
8556 8965
8557 8966 QL_UB_LOCK(ha);
8558 8967 if (sp->flags & SRB_UB_FREE_REQUESTED ||
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
8559 8968 pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8560 8969 sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8561 8970 SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8562 8971 sp->flags |= SRB_UB_IN_FCA;
8563 8972 QL_UB_UNLOCK(ha);
8564 8973 return;
8565 8974 }
8566 8975
8567 8976 /* Process RSCN */
8568 8977 if (sp->flags & SRB_UB_RSCN) {
8569 - int sendup = 1;
8978 + int sendup;
8570 8979
8571 8980 /*
8572 8981 * Defer RSCN posting until commands return
8573 8982 */
8574 8983 QL_UB_UNLOCK(ha);
8575 8984
8576 8985 af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8577 8986
8578 8987 /* Abort outstanding commands */
8579 8988 sendup = ql_process_rscn(ha, af);
8580 8989 if (sendup == 0) {
8581 8990
8582 8991 TASK_DAEMON_LOCK(ha);
8583 - ql_add_link_b(&pha->callback_queue, &sp->cmd);
8992 + ql_add_link_b(&pha->unsol_callback_queue, &sp->cmd);
8584 8993 TASK_DAEMON_UNLOCK(ha);
8585 8994
8586 8995 /*
8587 8996 * Wait for commands to drain in F/W (doesn't take
8588 8997 * more than a few milliseconds)
8589 8998 */
8590 8999 ql_delay(ha, 10000);
8591 9000
8592 - QL_PRINT_2(CE_CONT, "(%d,%d): done rscn_sendup=0, "
8593 - "fmt=%xh, d_id=%xh\n", ha->instance, ha->vp_index,
9001 + QL_PRINT_2(ha, "done rscn_sendup=0, "
9002 + "fmt=%xh, d_id=%xh\n",
8594 9003 af->aff_format, af->aff_d_id);
8595 9004 return;
8596 9005 }
8597 9006
8598 9007 QL_UB_LOCK(ha);
8599 9008
8600 9009 EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
8601 9010 af->aff_format, af->aff_d_id);
8602 9011 }
8603 9012
8604 9013 /* Process UNSOL LOGO */
8605 9014 if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
8606 9015 QL_UB_UNLOCK(ha);
8607 9016
8608 9017 if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
8609 9018 TASK_DAEMON_LOCK(ha);
8610 - ql_add_link_b(&pha->callback_queue, &sp->cmd);
9019 + ql_add_link_b(&pha->unsol_callback_queue, &sp->cmd);
8611 9020 TASK_DAEMON_UNLOCK(ha);
8612 - QL_PRINT_2(CE_CONT, "(%d,%d): logo_sendup=0, d_id=%xh"
8613 - "\n", ha->instance, ha->vp_index, tq->d_id.b24);
9021 + QL_PRINT_2(ha, "logo_sendup=0, d_id=%xh"
9022 + "\n", tq->d_id.b24);
8614 9023 return;
8615 9024 }
8616 9025
8617 9026 QL_UB_LOCK(ha);
8618 9027 EL(ha, "sending unsol logout for %xh to transport\n",
8619 9028 ubp->ub_frame.s_id);
8620 9029 }
8621 9030
9031 + if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_PLOGI)) {
9032 + EL(ha, "sending unsol plogi for %xh to transport\n",
9033 + ubp->ub_frame.s_id);
9034 + }
9035 +
8622 9036 sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
8623 9037 SRB_UB_FCP);
8624 9038
8625 9039 if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
8626 9040 (void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
8627 9041 ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
8628 9042 }
8629 9043 QL_UB_UNLOCK(ha);
8630 9044
8631 9045 (ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
8632 9046 ubp, sp->ub_type);
8633 9047
8634 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9048 + QL_PRINT_3(ha, "done\n");
8635 9049 }
8636 9050
8637 9051 /*
8638 9052 * ql_send_logo
8639 9053 *
8640 9054 * Input:
8641 9055 * ha: adapter state pointer.
8642 9056 * tq: target queue pointer.
8643 9057 * done_q: done queue pointer.
8644 9058 *
8645 9059 * Context:
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
8646 9060 * Interrupt or Kernel context, no mailbox commands allowed.
8647 9061 */
8648 9062 void
8649 9063 ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
8650 9064 {
8651 9065 fc_unsol_buf_t *ubp;
8652 9066 ql_srb_t *sp;
8653 9067 la_els_logo_t *payload;
8654 9068 ql_adapter_state_t *ha = vha->pha;
8655 9069
8656 - QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
8657 - tq->d_id.b24);
9070 + QL_PRINT_3(ha, "started, d_id=%xh\n", tq->d_id.b24);
8658 9071
8659 - if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == 0xffffff)) {
9072 + if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == FS_BROADCAST)) {
8660 9073 EL(ha, "no device, d_id=%xh\n", tq->d_id.b24);
8661 9074 return;
8662 9075 }
8663 9076
8664 9077 if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
8665 9078 tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
8666 9079
8667 9080 /* Locate a buffer to use. */
8668 9081 ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
8669 9082 if (ubp == NULL) {
8670 9083 EL(vha, "Failed, get_unsolicited_buffer\n");
8671 9084 return;
8672 9085 }
8673 9086
8674 9087 DEVICE_QUEUE_LOCK(tq);
8675 9088 tq->flags |= TQF_NEED_AUTHENTICATION;
8676 9089 tq->logout_sent++;
8677 9090 DEVICE_QUEUE_UNLOCK(tq);
8678 9091
8679 - EL(vha, "Received LOGO from = %xh\n", tq->d_id.b24);
8680 -
8681 9092 sp = ubp->ub_fca_private;
8682 9093
8683 9094 /* Set header. */
8684 9095 ubp->ub_frame.d_id = vha->d_id.b24;
8685 9096 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8686 9097 ubp->ub_frame.s_id = tq->d_id.b24;
8687 9098 ubp->ub_frame.rsvd = 0;
8688 9099 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8689 9100 F_CTL_SEQ_INITIATIVE;
8690 9101 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8691 9102 ubp->ub_frame.seq_cnt = 0;
8692 9103 ubp->ub_frame.df_ctl = 0;
8693 9104 ubp->ub_frame.seq_id = 0;
8694 9105 ubp->ub_frame.rx_id = 0xffff;
8695 9106 ubp->ub_frame.ox_id = 0xffff;
8696 9107
8697 9108 /* set payload. */
8698 9109 payload = (la_els_logo_t *)ubp->ub_buffer;
8699 9110 bzero(payload, sizeof (la_els_logo_t));
8700 9111 /* Make sure ls_code in payload is always big endian */
8701 9112 ubp->ub_buffer[0] = LA_ELS_LOGO;
8702 9113 ubp->ub_buffer[1] = 0;
8703 9114 ubp->ub_buffer[2] = 0;
8704 9115 ubp->ub_buffer[3] = 0;
8705 9116 bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
8706 9117 &payload->nport_ww_name.raw_wwn[0], 8);
8707 9118 payload->nport_id.port_id = tq->d_id.b24;
8708 9119
8709 9120 QL_UB_LOCK(ha);
8710 9121 sp->flags |= SRB_UB_CALLBACK;
8711 9122 QL_UB_UNLOCK(ha);
8712 9123 if (tq->lun_queues.first != NULL) {
8713 9124 sp->lun_queue = (tq->lun_queues.first)->base_address;
|
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
8714 9125 } else {
8715 9126 sp->lun_queue = ql_lun_queue(vha, tq, 0);
8716 9127 }
8717 9128 if (done_q) {
8718 9129 ql_add_link_b(done_q, &sp->cmd);
8719 9130 } else {
8720 9131 ql_awaken_task_daemon(ha, sp, 0, 0);
8721 9132 }
8722 9133 }
8723 9134
8724 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9135 + QL_PRINT_3(ha, "done\n");
8725 9136 }
8726 9137
8727 9138 static int
8728 9139 ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8729 9140 {
8730 9141 port_id_t d_id;
8731 9142 ql_srb_t *sp;
8732 9143 ql_link_t *link;
8733 9144 int sendup = 1;
8734 9145
8735 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9146 + QL_PRINT_3(ha, "started\n");
8736 9147
8737 9148 DEVICE_QUEUE_LOCK(tq);
8738 9149 if (tq->outcnt) {
8739 9150 DEVICE_QUEUE_UNLOCK(tq);
8740 9151 sendup = 0;
8741 9152 (void) ql_abort_device(ha, tq, 1);
8742 9153 ql_delay(ha, 10000);
8743 9154 } else {
8744 9155 DEVICE_QUEUE_UNLOCK(tq);
8745 9156 TASK_DAEMON_LOCK(ha);
8746 9157
8747 - for (link = ha->pha->callback_queue.first; link != NULL;
9158 + for (link = ha->pha->unsol_callback_queue.first; link != NULL;
8748 9159 link = link->next) {
8749 9160 sp = link->base_address;
8750 9161 if (sp->flags & SRB_UB_CALLBACK) {
8751 9162 continue;
8752 9163 }
8753 9164 d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
8754 9165
8755 9166 if (tq->d_id.b24 == d_id.b24) {
8756 9167 sendup = 0;
8757 9168 break;
8758 9169 }
8759 9170 }
8760 9171
8761 9172 TASK_DAEMON_UNLOCK(ha);
8762 9173 }
8763 9174
8764 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9175 + QL_PRINT_3(ha, "done\n");
8765 9176
8766 9177 return (sendup);
8767 9178 }
8768 9179
8769 9180 static int
8770 9181 ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
8771 9182 {
8772 9183 fc_unsol_buf_t *ubp;
8773 9184 ql_srb_t *sp;
8774 9185 la_els_logi_t *payload;
8775 9186 class_svc_param_t *class3_param;
8776 9187
8777 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9188 + QL_PRINT_3(ha, "started\n");
8778 9189
8779 9190 if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
8780 9191 LOOP_DOWN)) {
8781 9192 EL(ha, "Failed, tqf=%xh\n", tq->flags);
8782 9193 return (QL_FUNCTION_FAILED);
8783 9194 }
8784 9195
8785 9196 /* Locate a buffer to use. */
8786 9197 ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8787 9198 if (ubp == NULL) {
8788 9199 EL(ha, "Failed\n");
8789 9200 return (QL_FUNCTION_FAILED);
8790 9201 }
8791 9202
8792 - QL_PRINT_3(CE_CONT, "(%d): Received LOGO from = %xh\n",
8793 - ha->instance, tq->d_id.b24);
9203 + QL_PRINT_3(ha, "Received LOGO from = %xh\n", tq->d_id.b24);
8794 9204
8795 9205 EL(ha, "Emulate PLOGI from = %xh tq = %x\n", tq->d_id.b24, tq);
8796 9206
8797 9207 sp = ubp->ub_fca_private;
8798 9208
8799 9209 /* Set header. */
8800 9210 ubp->ub_frame.d_id = ha->d_id.b24;
8801 9211 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8802 9212 ubp->ub_frame.s_id = tq->d_id.b24;
8803 9213 ubp->ub_frame.rsvd = 0;
8804 9214 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
8805 9215 F_CTL_SEQ_INITIATIVE;
8806 9216 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8807 9217 ubp->ub_frame.seq_cnt = 0;
8808 9218 ubp->ub_frame.df_ctl = 0;
8809 9219 ubp->ub_frame.seq_id = 0;
8810 9220 ubp->ub_frame.rx_id = 0xffff;
8811 9221 ubp->ub_frame.ox_id = 0xffff;
8812 9222
8813 9223 /* set payload. */
8814 9224 payload = (la_els_logi_t *)ubp->ub_buffer;
8815 - bzero(payload, sizeof (payload));
9225 + bzero(payload, sizeof (la_els_logi_t));
8816 9226
8817 9227 payload->ls_code.ls_code = LA_ELS_PLOGI;
8818 9228 payload->common_service.fcph_version = 0x2006;
8819 - payload->common_service.cmn_features = 0x8800;
8820 -
8821 - CFG_IST(ha, CFG_CTRL_24258081) ?
8822 - (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8823 - ha->init_ctrl_blk.cb24.max_frame_length[0],
8824 - ha->init_ctrl_blk.cb24.max_frame_length[1])) :
8825 - (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8826 - ha->init_ctrl_blk.cb.max_frame_length[0],
8827 - ha->init_ctrl_blk.cb.max_frame_length[1]));
8828 -
9229 + payload->common_service.cmn_features =
9230 + ha->topology & QL_N_PORT ? 0x8000 : 0x8800;
9231 + payload->common_service.rx_bufsize =
9232 + ha->loginparams.common_service.rx_bufsize;
8829 9233 payload->common_service.conc_sequences = 0xff;
8830 9234 payload->common_service.relative_offset = 0x03;
8831 9235 payload->common_service.e_d_tov = 0x7d0;
8832 9236
8833 9237 bcopy((void *)&tq->port_name[0],
8834 9238 (void *)&payload->nport_ww_name.raw_wwn[0], 8);
8835 9239
8836 9240 bcopy((void *)&tq->node_name[0],
8837 9241 (void *)&payload->node_ww_name.raw_wwn[0], 8);
8838 9242
8839 9243 class3_param = (class_svc_param_t *)&payload->class_3;
8840 9244 class3_param->class_valid_svc_opt = 0x8000;
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
8841 9245 class3_param->recipient_ctl = tq->class3_recipient_ctl;
8842 9246 class3_param->rcv_data_size = tq->class3_rcv_data_size;
8843 9247 class3_param->conc_sequences = tq->class3_conc_sequences;
8844 9248 class3_param->open_sequences_per_exch =
8845 9249 tq->class3_open_sequences_per_exch;
8846 9250
8847 9251 QL_UB_LOCK(ha);
8848 9252 sp->flags |= SRB_UB_CALLBACK;
8849 9253 QL_UB_UNLOCK(ha);
8850 9254
8851 - ql_isp_els_handle_endian(ha, (uint8_t *)payload, LA_ELS_PLOGI);
8852 -
8853 9255 if (done_q) {
8854 9256 ql_add_link_b(done_q, &sp->cmd);
8855 9257 } else {
8856 9258 ql_awaken_task_daemon(ha, sp, 0, 0);
8857 9259 }
8858 9260
8859 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9261 + QL_PRINT_3(ha, "done\n");
8860 9262
8861 9263 return (QL_SUCCESS);
8862 9264 }
8863 9265
8864 9266 /*
8865 9267 * Abort outstanding commands in the Firmware, clear internally
8866 9268 * queued commands in the driver, Synchronize the target with
8867 9269 * the Firmware
8868 9270 */
8869 9271 int
8870 9272 ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
8871 9273 {
8872 9274 ql_link_t *link, *link2;
8873 9275 ql_lun_t *lq;
8874 9276 int rval = QL_SUCCESS;
8875 9277 ql_srb_t *sp;
8876 9278 ql_head_t done_q = { NULL, NULL };
8877 9279
8878 - QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
9280 + QL_PRINT_10(ha, "started\n");
8879 9281
8880 9282 /*
8881 9283 * First clear, internally queued commands
8882 9284 */
8883 9285 DEVICE_QUEUE_LOCK(tq);
8884 9286 for (link = tq->lun_queues.first; link != NULL; link = link->next) {
8885 9287 lq = link->base_address;
8886 9288
8887 9289 link2 = lq->cmd.first;
8888 9290 while (link2 != NULL) {
8889 9291 sp = link2->base_address;
8890 9292 link2 = link2->next;
8891 9293
8892 - if (sp->flags & SRB_ABORT) {
8893 - continue;
8894 - }
8895 -
8896 9294 /* Remove srb from device command queue. */
8897 9295 ql_remove_link(&lq->cmd, &sp->cmd);
8898 9296 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8899 9297
8900 9298 /* Set ending status. */
8901 9299 sp->pkt->pkt_reason = CS_ABORTED;
8902 9300
8903 9301 /* Call done routine to handle completions. */
8904 9302 ql_add_link_b(&done_q, &sp->cmd);
8905 9303 }
8906 9304 }
8907 9305 DEVICE_QUEUE_UNLOCK(tq);
8908 9306
8909 9307 if (done_q.first != NULL) {
8910 - ql_done(done_q.first);
9308 + ql_done(done_q.first, B_FALSE);
8911 9309 }
8912 9310
8913 9311 if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
8914 9312 rval = ql_abort_target(ha, tq, 0);
8915 9313 }
8916 9314
8917 9315 if (rval != QL_SUCCESS) {
8918 9316 EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
8919 9317 } else {
8920 9318 /*EMPTY*/
8921 - QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
8922 - ha->vp_index);
9319 + QL_PRINT_10(ha, "done\n");
8923 9320 }
8924 9321
8925 9322 return (rval);
8926 9323 }
8927 9324
8928 9325 /*
8929 9326 * ql_rcv_rscn_els
8930 9327 * Processes received RSCN extended link service.
8931 9328 *
8932 9329 * Input:
8933 9330 * ha: adapter state pointer.
8934 9331 * mb: array containing input mailbox registers.
8935 9332 * done_q: done queue pointer.
8936 9333 *
8937 9334 * Context:
8938 9335 * Interrupt or Kernel context, no mailbox commands allowed.
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
8939 9336 */
8940 9337 void
8941 9338 ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
8942 9339 {
8943 9340 fc_unsol_buf_t *ubp;
8944 9341 ql_srb_t *sp;
8945 9342 fc_rscn_t *rn;
8946 9343 fc_affected_id_t *af;
8947 9344 port_id_t d_id;
8948 9345
8949 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9346 + QL_PRINT_3(ha, "started\n");
8950 9347
8951 9348 /* Locate a buffer to use. */
8952 9349 ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8953 9350 if (ubp != NULL) {
8954 9351 sp = ubp->ub_fca_private;
8955 9352
8956 9353 /* Set header. */
8957 9354 ubp->ub_frame.d_id = ha->d_id.b24;
8958 9355 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8959 9356 ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
8960 9357 ubp->ub_frame.rsvd = 0;
8961 9358 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8962 9359 F_CTL_SEQ_INITIATIVE;
8963 9360 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8964 9361 ubp->ub_frame.seq_cnt = 0;
8965 9362 ubp->ub_frame.df_ctl = 0;
8966 9363 ubp->ub_frame.seq_id = 0;
8967 9364 ubp->ub_frame.rx_id = 0xffff;
8968 9365 ubp->ub_frame.ox_id = 0xffff;
8969 9366
8970 9367 /* set payload. */
8971 9368 rn = (fc_rscn_t *)ubp->ub_buffer;
8972 9369 af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8973 9370
8974 9371 rn->rscn_code = LA_ELS_RSCN;
8975 9372 rn->rscn_len = 4;
8976 9373 rn->rscn_payload_len = 8;
8977 9374 d_id.b.al_pa = LSB(mb[2]);
8978 9375 d_id.b.area = MSB(mb[2]);
8979 9376 d_id.b.domain = LSB(mb[1]);
8980 9377 af->aff_d_id = d_id.b24;
8981 9378 af->aff_format = MSB(mb[1]);
8982 9379
8983 9380 EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
8984 9381 af->aff_d_id);
8985 9382
8986 9383 ql_update_rscn(ha, af);
8987 9384
|
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
8988 9385 QL_UB_LOCK(ha);
8989 9386 sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
8990 9387 QL_UB_UNLOCK(ha);
8991 9388 ql_add_link_b(done_q, &sp->cmd);
8992 9389 }
8993 9390
8994 9391 if (ubp == NULL) {
8995 9392 EL(ha, "Failed, get_unsolicited_buffer\n");
8996 9393 } else {
8997 9394 /*EMPTY*/
8998 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9395 + QL_PRINT_3(ha, "done\n");
8999 9396 }
9000 9397 }
9001 9398
9002 9399 /*
9003 9400 * ql_update_rscn
9004 9401 * Update devices from received RSCN.
9005 9402 *
9006 9403 * Input:
9007 9404 * ha: adapter state pointer.
9008 9405 * af: pointer to RSCN data.
9009 9406 *
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
9010 9407 * Context:
9011 9408 * Interrupt or Kernel context, no mailbox commands allowed.
9012 9409 */
9013 9410 static void
9014 9411 ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9015 9412 {
9016 9413 ql_link_t *link;
9017 9414 uint16_t index;
9018 9415 ql_tgt_t *tq;
9019 9416
9020 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9417 + QL_PRINT_3(ha, "started\n");
9021 9418
9022 9419 if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9023 9420 port_id_t d_id;
9024 9421
9025 9422 d_id.r.rsvd_1 = 0;
9026 9423 d_id.b24 = af->aff_d_id;
9027 9424
9028 9425 tq = ql_d_id_to_queue(ha, d_id);
9029 9426 if (tq) {
9030 9427 EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
9031 9428 DEVICE_QUEUE_LOCK(tq);
9032 9429 tq->flags |= TQF_RSCN_RCVD;
9430 + ql_requeue_pending_cmds(ha, tq);
9033 9431 DEVICE_QUEUE_UNLOCK(tq);
9034 9432 }
9035 - QL_PRINT_3(CE_CONT, "(%d): FC_RSCN_PORT_ADDRESS done\n",
9036 - ha->instance);
9433 + QL_PRINT_3(ha, "FC_RSCN_PORT_ADDRESS done\n");
9037 9434
9038 9435 return;
9039 9436 }
9040 9437
9041 9438 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9042 9439 for (link = ha->dev[index].first; link != NULL;
9043 9440 link = link->next) {
9044 9441 tq = link->base_address;
9045 9442
9046 9443 switch (af->aff_format) {
9047 9444 case FC_RSCN_FABRIC_ADDRESS:
9048 9445 if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9049 9446 EL(ha, "SD_RSCN_RCVD %xh RFA\n",
9050 9447 tq->d_id.b24);
9051 9448 DEVICE_QUEUE_LOCK(tq);
9052 9449 tq->flags |= TQF_RSCN_RCVD;
9450 + ql_requeue_pending_cmds(ha, tq);
9053 9451 DEVICE_QUEUE_UNLOCK(tq);
9054 9452 }
9055 9453 break;
9056 9454
9057 9455 case FC_RSCN_AREA_ADDRESS:
9058 9456 if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
9059 9457 EL(ha, "SD_RSCN_RCVD %xh RAA\n",
9060 9458 tq->d_id.b24);
9061 9459 DEVICE_QUEUE_LOCK(tq);
9062 9460 tq->flags |= TQF_RSCN_RCVD;
9461 + ql_requeue_pending_cmds(ha, tq);
9063 9462 DEVICE_QUEUE_UNLOCK(tq);
9064 9463 }
9065 9464 break;
9066 9465
9067 9466 case FC_RSCN_DOMAIN_ADDRESS:
9068 9467 if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
9069 9468 EL(ha, "SD_RSCN_RCVD %xh RDA\n",
9070 9469 tq->d_id.b24);
9071 9470 DEVICE_QUEUE_LOCK(tq);
9072 9471 tq->flags |= TQF_RSCN_RCVD;
9472 + ql_requeue_pending_cmds(ha, tq);
9073 9473 DEVICE_QUEUE_UNLOCK(tq);
9074 9474 }
9075 9475 break;
9076 9476
9077 9477 default:
9078 9478 break;
9079 9479 }
9080 9480 }
9081 9481 }
9082 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9482 + QL_PRINT_3(ha, "done\n");
9083 9483 }
9084 9484
9085 9485 /*
9486 + * ql_requeue_pending_cmds
9487 + * Requeue target commands from pending queue to LUN queue
9488 + *
9489 + * Input:
9490 + * ha: adapter state pointer.
9491 + * tq: target queue pointer.
9492 + * DEVICE_QUEUE_LOCK must be already obtained.
9493 + *
9494 + * Context:
9495 + * Interrupt or Kernel context, no mailbox commands allowed.
9496 + */
9497 +void
9498 +ql_requeue_pending_cmds(ql_adapter_state_t *vha, ql_tgt_t *tq)
9499 +{
9500 + ql_link_t *link;
9501 + ql_srb_t *sp;
9502 + ql_lun_t *lq;
9503 + ql_adapter_state_t *ha = vha->pha;
9504 +
9505 + QL_PRINT_3(ha, "started\n");
9506 +
9507 + REQUEST_RING_LOCK(ha);
9508 + for (link = ha->pending_cmds.first; link != NULL; link = link->next) {
9509 + sp = link->base_address;
9510 + if ((lq = sp->lun_queue) == NULL || lq->target_queue != tq) {
9511 + continue;
9512 + }
9513 + ql_remove_link(&ha->pending_cmds, &sp->cmd);
9514 +
9515 + if (tq->outcnt) {
9516 + tq->outcnt--;
9517 + }
9518 + if (sp->flags & SRB_FCP_CMD_PKT) {
9519 + if (sp->fcp->fcp_cntl.cntl_qtype ==
9520 + FCP_QTYPE_UNTAGGED) {
9521 + lq->flags &= ~LQF_UNTAGGED_PENDING;
9522 + }
9523 + if (lq->lun_outcnt != 0) {
9524 + lq->lun_outcnt--;
9525 + }
9526 + }
9527 + ql_add_link_t(&lq->cmd, &sp->cmd);
9528 + sp->flags |= SRB_IN_DEVICE_QUEUE;
9529 + }
9530 + REQUEST_RING_UNLOCK(ha);
9531 +
9532 + QL_PRINT_3(ha, "done\n");
9533 +}
9534 +
9535 +/*
9086 9536 * ql_process_rscn
9087 9537 *
9088 9538 * Input:
9089 9539 * ha: adapter state pointer.
9090 9540 * af: RSCN payload pointer.
9091 9541 *
9092 9542 * Context:
9093 9543 * Kernel context.
9094 9544 */
9095 9545 static int
9096 9546 ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9097 9547 {
9098 9548 int sendit;
9099 9549 int sendup = 1;
9100 9550 ql_link_t *link;
9101 9551 uint16_t index;
9102 9552 ql_tgt_t *tq;
9103 9553
9104 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9554 + QL_PRINT_3(ha, "started\n");
9105 9555
9106 9556 if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9107 9557 port_id_t d_id;
9108 9558
9109 9559 d_id.r.rsvd_1 = 0;
9110 9560 d_id.b24 = af->aff_d_id;
9111 9561
9112 9562 tq = ql_d_id_to_queue(ha, d_id);
9113 9563 if (tq) {
9114 9564 sendup = ql_process_rscn_for_device(ha, tq);
9115 9565 }
9116 9566
9117 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9567 + QL_PRINT_3(ha, "done\n");
9118 9568
9119 9569 return (sendup);
9120 9570 }
9121 9571
9122 9572 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9123 9573 for (link = ha->dev[index].first; link != NULL;
9124 9574 link = link->next) {
9125 9575
9126 9576 tq = link->base_address;
9127 9577 if (tq == NULL) {
9128 9578 continue;
9129 9579 }
9130 9580
9131 9581 switch (af->aff_format) {
9132 9582 case FC_RSCN_FABRIC_ADDRESS:
9133 9583 if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9134 9584 sendit = ql_process_rscn_for_device(
9135 9585 ha, tq);
9136 9586 if (sendup) {
9137 9587 sendup = sendit;
9138 9588 }
9139 9589 }
9140 9590 break;
9141 9591
9142 9592 case FC_RSCN_AREA_ADDRESS:
9143 9593 if ((tq->d_id.b24 & 0xffff00) ==
9144 9594 af->aff_d_id) {
9145 9595 sendit = ql_process_rscn_for_device(
9146 9596 ha, tq);
9147 9597
9148 9598 if (sendup) {
9149 9599 sendup = sendit;
9150 9600 }
9151 9601 }
9152 9602 break;
9153 9603
9154 9604 case FC_RSCN_DOMAIN_ADDRESS:
9155 9605 if ((tq->d_id.b24 & 0xff0000) ==
9156 9606 af->aff_d_id) {
9157 9607 sendit = ql_process_rscn_for_device(
9158 9608 ha, tq);
9159 9609
9160 9610 if (sendup) {
9161 9611 sendup = sendit;
|
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
9162 9612 }
9163 9613 }
9164 9614 break;
9165 9615
9166 9616 default:
9167 9617 break;
9168 9618 }
9169 9619 }
9170 9620 }
9171 9621
9172 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9622 + QL_PRINT_3(ha, "done\n");
9173 9623
9174 9624 return (sendup);
9175 9625 }
9176 9626
9177 9627 /*
9178 9628 * ql_process_rscn_for_device
9179 9629 *
9180 9630 * Input:
9181 9631 * ha: adapter state pointer.
9182 9632 * tq: target queue pointer.
9183 9633 *
9184 9634 * Context:
9185 9635 * Kernel context.
9186 9636 */
9187 9637 static int
9188 9638 ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9189 9639 {
9190 9640 int sendup = 1;
9191 9641
9192 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9642 + QL_PRINT_3(ha, "started\n");
9193 9643
9194 9644 DEVICE_QUEUE_LOCK(tq);
9195 9645
9196 9646 /*
9197 9647 * Let FCP-2 compliant devices continue I/Os
9198 9648 * with their low level recoveries.
9199 9649 */
9200 9650 if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
9201 9651 (tq->prli_svc_param_word_3 & PRLI_W3_RETRY)) {
9202 9652 /*
9203 9653 * Cause ADISC to go out
9204 9654 */
9205 9655 DEVICE_QUEUE_UNLOCK(tq);
9206 9656
9207 9657 (void) ql_get_port_database(ha, tq, PDF_NONE);
9208 9658
9209 9659 DEVICE_QUEUE_LOCK(tq);
9210 9660 tq->flags &= ~TQF_RSCN_RCVD;
9211 9661
9212 9662 } else if (tq->loop_id != PORT_NO_LOOP_ID) {
9213 9663 if (tq->d_id.b24 != BROADCAST_ADDR) {
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
9214 9664 tq->flags |= TQF_NEED_AUTHENTICATION;
9215 9665 }
9216 9666
9217 9667 DEVICE_QUEUE_UNLOCK(tq);
9218 9668
9219 9669 (void) ql_abort_device(ha, tq, 1);
9220 9670
9221 9671 DEVICE_QUEUE_LOCK(tq);
9222 9672
9223 9673 if (tq->outcnt) {
9674 + EL(ha, "busy tq->outcnt=%d\n", tq->outcnt);
9224 9675 sendup = 0;
9225 9676 } else {
9226 9677 tq->flags &= ~TQF_RSCN_RCVD;
9227 9678 }
9228 9679 } else {
9229 9680 tq->flags &= ~TQF_RSCN_RCVD;
9230 9681 }
9231 9682
9232 9683 if (sendup) {
9233 9684 if (tq->d_id.b24 != BROADCAST_ADDR) {
9234 9685 tq->flags |= TQF_NEED_AUTHENTICATION;
9235 9686 }
9236 9687 }
9237 9688
9238 9689 DEVICE_QUEUE_UNLOCK(tq);
9239 9690
9240 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9691 + QL_PRINT_3(ha, "done\n");
9241 9692
9242 9693 return (sendup);
9243 9694 }
9244 9695
9245 9696 static int
9246 9697 ql_handle_rscn_update(ql_adapter_state_t *ha)
9247 9698 {
9248 9699 int rval;
9249 9700 ql_tgt_t *tq;
9250 9701 uint16_t index, loop_id;
9251 9702 ql_dev_id_list_t *list;
9252 9703 uint32_t list_size;
9253 9704 port_id_t d_id;
9254 9705 ql_mbx_data_t mr;
9255 9706 ql_head_t done_q = { NULL, NULL };
9256 9707
9257 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9708 + QL_PRINT_3(ha, "started\n");
9258 9709
9259 9710 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
9260 9711 list = kmem_zalloc(list_size, KM_SLEEP);
9261 9712 if (list == NULL) {
9262 9713 rval = QL_MEMORY_ALLOC_FAILED;
9263 9714 EL(ha, "kmem_zalloc failed=%xh\n", rval);
9264 9715 return (rval);
9265 9716 }
9266 9717
9267 9718 /*
9268 9719 * Get data from RISC code d_id list to init each device queue.
9269 9720 */
9270 9721 rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
9271 9722 if (rval != QL_SUCCESS) {
9272 9723 kmem_free(list, list_size);
9273 9724 EL(ha, "get_id_list failed=%xh\n", rval);
9274 9725 return (rval);
9275 9726 }
9276 9727
9277 9728 /* Acquire adapter state lock. */
9278 9729 ADAPTER_STATE_LOCK(ha);
9279 9730
9280 9731 /* Check for new devices */
9281 9732 for (index = 0; index < mr.mb[1]; index++) {
9282 9733 ql_dev_list(ha, list, index, &d_id, &loop_id);
9283 9734
9284 9735 if (VALID_DEVICE_ID(ha, loop_id)) {
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
9285 9736 d_id.r.rsvd_1 = 0;
9286 9737
9287 9738 tq = ql_d_id_to_queue(ha, d_id);
9288 9739 if (tq != NULL) {
9289 9740 continue;
9290 9741 }
9291 9742
9292 9743 tq = ql_dev_init(ha, d_id, loop_id);
9293 9744
9294 9745 /* Test for fabric device. */
9295 - if (d_id.b.domain != ha->d_id.b.domain ||
9746 + if (ha->topology & QL_F_PORT ||
9747 + d_id.b.domain != ha->d_id.b.domain ||
9296 9748 d_id.b.area != ha->d_id.b.area) {
9297 9749 tq->flags |= TQF_FABRIC_DEVICE;
9298 9750 }
9299 9751
9300 9752 ADAPTER_STATE_UNLOCK(ha);
9301 9753 if (ql_get_port_database(ha, tq, PDF_NONE) !=
9302 9754 QL_SUCCESS) {
9303 9755 tq->loop_id = PORT_NO_LOOP_ID;
9304 9756 }
9305 9757 ADAPTER_STATE_LOCK(ha);
9306 9758
9307 9759 /*
9308 9760 * Send up a PLOGI about the new device
9309 9761 */
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
9310 9762 if (VALID_DEVICE_ID(ha, tq->loop_id)) {
9311 9763 (void) ql_send_plogi(ha, tq, &done_q);
9312 9764 }
9313 9765 }
9314 9766 }
9315 9767
9316 9768 /* Release adapter state lock. */
9317 9769 ADAPTER_STATE_UNLOCK(ha);
9318 9770
9319 9771 if (done_q.first != NULL) {
9320 - ql_done(done_q.first);
9772 + ql_done(done_q.first, B_FALSE);
9321 9773 }
9322 9774
9323 9775 kmem_free(list, list_size);
9324 9776
9325 9777 if (rval != QL_SUCCESS) {
9326 9778 EL(ha, "failed=%xh\n", rval);
9327 9779 } else {
9328 9780 /*EMPTY*/
9329 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9781 + QL_PRINT_3(ha, "done\n");
9330 9782 }
9331 9783
9332 9784 return (rval);
9333 9785 }
9334 9786
9335 9787 /*
9336 9788 * ql_free_unsolicited_buffer
9337 9789 * Frees allocated buffer.
9338 9790 *
9339 9791 * Input:
9340 9792 * ha = adapter state pointer.
9341 9793 * index = buffer array index.
9342 9794 * ADAPTER_STATE_LOCK must be already obtained.
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
9343 9795 *
9344 9796 * Context:
9345 9797 * Kernel context.
9346 9798 */
9347 9799 static void
9348 9800 ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9349 9801 {
9350 9802 ql_srb_t *sp;
9351 9803 int status;
9352 9804
9353 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9805 + QL_PRINT_3(ha, "started\n");
9354 9806
9355 9807 sp = ubp->ub_fca_private;
9356 9808 if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9357 9809 /* Disconnect IP from system buffers. */
9358 9810 if (ha->flags & IP_INITIALIZED) {
9359 - ADAPTER_STATE_UNLOCK(ha);
9360 9811 status = ql_shutdown_ip(ha);
9361 - ADAPTER_STATE_LOCK(ha);
9362 9812 if (status != QL_SUCCESS) {
9363 9813 cmn_err(CE_WARN,
9364 9814 "!Qlogic %s(%d): Failed to shutdown IP",
9365 9815 QL_NAME, ha->instance);
9366 9816 return;
9367 9817 }
9368 9818
9369 9819 ha->flags &= ~IP_ENABLED;
9370 9820 }
9371 9821
9372 9822 ql_free_phys(ha, &sp->ub_buffer);
9373 9823 } else {
9374 9824 kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9375 9825 }
9376 9826
9377 9827 kmem_free(sp, sizeof (ql_srb_t));
9378 9828 kmem_free(ubp, sizeof (fc_unsol_buf_t));
9379 9829
9830 + QL_UB_LOCK(ha);
9380 9831 if (ha->ub_allocated != 0) {
9381 9832 ha->ub_allocated--;
9382 9833 }
9834 + QL_UB_UNLOCK(ha);
9383 9835
9384 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9836 + QL_PRINT_3(ha, "done\n");
9385 9837 }
9386 9838
9387 9839 /*
9388 9840 * ql_get_unsolicited_buffer
9389 9841 * Locates a free unsolicited buffer.
9390 9842 *
9391 9843 * Input:
9392 9844 * ha = adapter state pointer.
9393 9845 * type = buffer type.
9394 9846 *
9395 9847 * Returns:
9396 9848 * Unsolicited buffer pointer.
9397 9849 *
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
9398 9850 * Context:
9399 9851 * Interrupt or Kernel context, no mailbox commands allowed.
9400 9852 */
9401 9853 fc_unsol_buf_t *
9402 9854 ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9403 9855 {
9404 9856 fc_unsol_buf_t *ubp;
9405 9857 ql_srb_t *sp;
9406 9858 uint16_t index;
9407 9859
9408 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9860 + QL_PRINT_3(ha, "started\n");
9409 9861
9410 9862 /* Locate a buffer to use. */
9411 9863 ubp = NULL;
9412 9864
9413 9865 QL_UB_LOCK(ha);
9414 9866 for (index = 0; index < QL_UB_LIMIT; index++) {
9415 9867 ubp = ha->ub_array[index];
9416 9868 if (ubp != NULL) {
9417 9869 sp = ubp->ub_fca_private;
9418 9870 if ((sp->ub_type == type) &&
9419 9871 (sp->flags & SRB_UB_IN_FCA) &&
9420 9872 (!(sp->flags & (SRB_UB_CALLBACK |
9421 9873 SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9422 9874 sp->flags |= SRB_UB_ACQUIRED;
9423 9875 ubp->ub_resp_flags = 0;
9424 9876 break;
9425 9877 }
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
9426 9878 ubp = NULL;
9427 9879 }
9428 9880 }
9429 9881 QL_UB_UNLOCK(ha);
9430 9882
9431 9883 if (ubp) {
9432 9884 ubp->ub_resp_token = NULL;
9433 9885 ubp->ub_class = FC_TRAN_CLASS3;
9434 9886 }
9435 9887
9436 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9888 + QL_PRINT_3(ha, "done\n");
9437 9889
9438 9890 return (ubp);
9439 9891 }
9440 9892
9441 9893 /*
9442 9894 * ql_ub_frame_hdr
9443 9895 * Processes received unsolicited buffers from ISP.
9444 9896 *
9445 9897 * Input:
9446 9898 * ha: adapter state pointer.
9447 9899 * tq: target queue pointer.
9448 9900 * index: unsolicited buffer array index.
9449 9901 * done_q: done queue pointer.
9450 9902 *
9451 9903 * Returns:
9452 9904 * ql local function return status code.
9453 9905 *
9454 9906 * Context:
9455 9907 * Interrupt or Kernel context, no mailbox commands allowed.
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
9456 9908 */
9457 9909 int
9458 9910 ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9459 9911 ql_head_t *done_q)
9460 9912 {
9461 9913 fc_unsol_buf_t *ubp;
9462 9914 ql_srb_t *sp;
9463 9915 uint16_t loop_id;
9464 9916 int rval = QL_FUNCTION_FAILED;
9465 9917
9466 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9918 + QL_PRINT_3(ha, "started\n");
9467 9919
9468 9920 QL_UB_LOCK(ha);
9469 9921 if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9470 9922 EL(ha, "Invalid buffer index=%xh\n", index);
9471 9923 QL_UB_UNLOCK(ha);
9472 9924 return (rval);
9473 9925 }
9474 9926
9475 9927 sp = ubp->ub_fca_private;
9476 9928 if (sp->flags & SRB_UB_FREE_REQUESTED) {
9477 9929 EL(ha, "buffer freed index=%xh\n", index);
9478 9930 sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9479 9931 SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9480 9932
9481 9933 sp->flags |= SRB_UB_IN_FCA;
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
9482 9934
9483 9935 QL_UB_UNLOCK(ha);
9484 9936 return (rval);
9485 9937 }
9486 9938
9487 9939 if ((sp->handle == index) &&
9488 9940 (sp->flags & SRB_UB_IN_ISP) &&
9489 9941 (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9490 9942 (!(sp->flags & SRB_UB_ACQUIRED))) {
9491 9943 /* set broadcast D_ID */
9492 - loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
9944 + loop_id = (uint16_t)(CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
9493 9945 BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9494 9946 if (tq->ub_loop_id == loop_id) {
9495 9947 if (ha->topology & QL_FL_PORT) {
9496 9948 ubp->ub_frame.d_id = 0x000000;
9497 9949 } else {
9498 - ubp->ub_frame.d_id = 0xffffff;
9950 + ubp->ub_frame.d_id = FS_BROADCAST;
9499 9951 }
9500 9952 } else {
9501 9953 ubp->ub_frame.d_id = ha->d_id.b24;
9502 9954 }
9503 9955 ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9504 9956 ubp->ub_frame.rsvd = 0;
9505 9957 ubp->ub_frame.s_id = tq->d_id.b24;
9506 9958 ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9507 9959 ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9508 9960 ubp->ub_frame.df_ctl = 0;
9509 9961 ubp->ub_frame.seq_id = tq->ub_seq_id;
9510 9962 ubp->ub_frame.rx_id = 0xffff;
9511 9963 ubp->ub_frame.ox_id = 0xffff;
9512 9964 ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9513 9965 sp->ub_size : tq->ub_sequence_length;
9514 9966 ubp->ub_frame.ro = tq->ub_frame_ro;
9515 9967
9516 9968 tq->ub_sequence_length = (uint16_t)
9517 9969 (tq->ub_sequence_length - ubp->ub_bufsize);
9518 9970 tq->ub_frame_ro += ubp->ub_bufsize;
9519 9971 tq->ub_seq_cnt++;
9520 9972
9521 9973 if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9522 9974 if (tq->ub_seq_cnt == 1) {
9523 9975 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9524 9976 F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9525 9977 } else {
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
9526 9978 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9527 9979 F_CTL_END_SEQ;
9528 9980 }
9529 9981 tq->ub_total_seg_cnt = 0;
9530 9982 } else if (tq->ub_seq_cnt == 1) {
9531 9983 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9532 9984 F_CTL_FIRST_SEQ;
9533 9985 ubp->ub_frame.df_ctl = 0x20;
9534 9986 }
9535 9987
9536 - QL_PRINT_3(CE_CONT, "(%d): ub_frame.d_id=%xh\n",
9537 - ha->instance, ubp->ub_frame.d_id);
9538 - QL_PRINT_3(CE_CONT, "(%d): ub_frame.s_id=%xh\n",
9539 - ha->instance, ubp->ub_frame.s_id);
9540 - QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_cnt=%xh\n",
9541 - ha->instance, ubp->ub_frame.seq_cnt);
9542 - QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_id=%xh\n",
9543 - ha->instance, ubp->ub_frame.seq_id);
9544 - QL_PRINT_3(CE_CONT, "(%d): ub_frame.ro=%xh\n",
9545 - ha->instance, ubp->ub_frame.ro);
9546 - QL_PRINT_3(CE_CONT, "(%d): ub_frame.f_ctl=%xh\n",
9547 - ha->instance, ubp->ub_frame.f_ctl);
9548 - QL_PRINT_3(CE_CONT, "(%d): ub_bufsize=%xh\n",
9549 - ha->instance, ubp->ub_bufsize);
9988 + QL_PRINT_3(ha, "ub_frame.d_id=%xh\n", ubp->ub_frame.d_id);
9989 + QL_PRINT_3(ha, "ub_frame.s_id=%xh\n", ubp->ub_frame.s_id);
9990 + QL_PRINT_3(ha, "ub_frame.seq_cnt=%xh\n", ubp->ub_frame.seq_cnt);
9991 + QL_PRINT_3(ha, "ub_frame.seq_id=%xh\n", ubp->ub_frame.seq_id);
9992 + QL_PRINT_3(ha, "ub_frame.ro=%xh\n", ubp->ub_frame.ro);
9993 + QL_PRINT_3(ha, "ub_frame.f_ctl=%xh\n", ubp->ub_frame.f_ctl);
9994 + QL_PRINT_3(ha, "ub_bufsize=%xh\n", ubp->ub_bufsize);
9550 9995 QL_DUMP_3(ubp->ub_buffer, 8,
9551 9996 ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9552 9997
9553 9998 sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9554 9999 ql_add_link_b(done_q, &sp->cmd);
9555 10000 rval = QL_SUCCESS;
9556 10001 } else {
9557 10002 if (sp->handle != index) {
9558 10003 EL(ha, "Bad index=%xh, expect=%xh\n", index,
9559 10004 sp->handle);
9560 10005 }
9561 10006 if ((sp->flags & SRB_UB_IN_ISP) == 0) {
9562 10007 EL(ha, "buffer was already in driver, index=%xh\n",
9563 10008 index);
9564 10009 }
9565 10010 if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
9566 10011 EL(ha, "buffer was not an IP buffer, index=%xh\n",
9567 10012 index);
9568 10013 }
9569 10014 if (sp->flags & SRB_UB_ACQUIRED) {
9570 10015 EL(ha, "buffer was being used by driver, index=%xh\n",
9571 10016 index);
9572 10017 }
9573 10018 }
9574 10019 QL_UB_UNLOCK(ha);
9575 10020
9576 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10021 + QL_PRINT_3(ha, "done\n");
9577 10022
9578 10023 return (rval);
9579 10024 }
9580 10025
9581 10026 /*
9582 10027 * ql_timer
9583 10028 * One second timer function.
9584 10029 *
9585 10030 * Input:
9586 10031 * ql_hba.first = first link in adapter list.
9587 10032 *
9588 10033 * Context:
9589 10034 * Interrupt context, no mailbox commands allowed.
9590 10035 */
9591 10036 static void
9592 10037 ql_timer(void *arg)
9593 10038 {
9594 10039 ql_link_t *link;
9595 - uint32_t set_flags;
9596 - uint32_t reset_flags;
9597 - ql_adapter_state_t *ha = NULL, *vha;
10040 + uint64_t set_flags;
10041 + ql_adapter_state_t *ha;
10042 + static uint32_t sec_cnt = 0;
9598 10043
9599 - QL_PRINT_6(CE_CONT, "started\n");
10044 + QL_PRINT_6(NULL, "started\n");
9600 10045
9601 10046 /* Acquire global state lock. */
9602 - GLOBAL_STATE_LOCK();
10047 + GLOBAL_TIMER_LOCK();
9603 10048 if (ql_timer_timeout_id == NULL) {
9604 10049 /* Release global state lock. */
9605 - GLOBAL_STATE_UNLOCK();
10050 + GLOBAL_TIMER_UNLOCK();
9606 10051 return;
9607 10052 }
9608 10053
10054 + sec_cnt++;
9609 10055 for (link = ql_hba.first; link != NULL; link = link->next) {
9610 10056 ha = link->base_address;
9611 10057
9612 - /* Skip adapter if suspended of stalled. */
9613 - ADAPTER_STATE_LOCK(ha);
10058 + /* Skip adapter if suspended or stalled. */
9614 10059 if (ha->flags & ADAPTER_SUSPENDED ||
9615 - ha->task_daemon_flags & DRIVER_STALL) {
9616 - ADAPTER_STATE_UNLOCK(ha);
10060 + ha->task_daemon_flags & DRIVER_STALL ||
10061 + !(ha->task_daemon_flags & FIRMWARE_UP)) {
9617 10062 continue;
9618 10063 }
9619 - ha->flags |= ADAPTER_TIMER_BUSY;
9620 - ADAPTER_STATE_UNLOCK(ha);
9621 10064
9622 10065 QL_PM_LOCK(ha);
9623 10066 if (ha->power_level != PM_LEVEL_D0) {
9624 10067 QL_PM_UNLOCK(ha);
9625 -
9626 - ADAPTER_STATE_LOCK(ha);
9627 - ha->flags &= ~ADAPTER_TIMER_BUSY;
9628 - ADAPTER_STATE_UNLOCK(ha);
9629 10068 continue;
9630 10069 }
9631 - ha->busy++;
10070 + ha->pm_busy++;
9632 10071 QL_PM_UNLOCK(ha);
9633 10072
9634 10073 set_flags = 0;
9635 - reset_flags = 0;
9636 10074
10075 + /* All completion treads busy, wake up a helper thread. */
10076 + if (ha->comp_thds_awake == ha->comp_thds_active &&
10077 + ha->comp_q.first != NULL) {
10078 + QL_PRINT_10(ha, "comp queue helper thrd started\n");
10079 + (void) timeout(ql_process_comp_queue, (void *)ha, 1);
10080 + }
10081 +
9637 10082 /* Port retry timer handler. */
9638 10083 if (LOOP_READY(ha)) {
9639 10084 ADAPTER_STATE_LOCK(ha);
9640 10085 if (ha->port_retry_timer != 0) {
9641 10086 ha->port_retry_timer--;
9642 10087 if (ha->port_retry_timer == 0) {
9643 10088 set_flags |= PORT_RETRY_NEEDED;
9644 10089 }
9645 10090 }
9646 10091 ADAPTER_STATE_UNLOCK(ha);
9647 10092 }
9648 10093
9649 10094 /* Loop down timer handler. */
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
9650 10095 if (LOOP_RECONFIGURE(ha) == 0) {
9651 10096 if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
9652 10097 ha->loop_down_timer--;
9653 10098 /*
9654 10099 * give the firmware loop down dump flag
9655 10100 * a chance to work.
9656 10101 */
9657 10102 if (ha->loop_down_timer == LOOP_DOWN_RESET) {
9658 10103 if (CFG_IST(ha,
9659 10104 CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
9660 - (void) ql_binary_fw_dump(ha,
9661 - TRUE);
10105 + ADAPTER_STATE_LOCK(ha);
10106 + ha->flags |= FW_DUMP_NEEDED;
10107 + ADAPTER_STATE_UNLOCK(ha);
9662 10108 }
9663 10109 EL(ha, "loop_down_reset, "
9664 10110 "isp_abort_needed\n");
9665 10111 set_flags |= ISP_ABORT_NEEDED;
9666 10112 }
9667 10113 }
9668 10114 if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
9669 10115 /* Command abort time handler. */
9670 10116 if (ha->loop_down_timer ==
9671 10117 ha->loop_down_abort_time) {
9672 10118 ADAPTER_STATE_LOCK(ha);
9673 10119 ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
9674 10120 ADAPTER_STATE_UNLOCK(ha);
9675 10121 set_flags |= ABORT_QUEUES_NEEDED;
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
9676 10122 EL(ha, "loop_down_abort_time, "
9677 10123 "abort_queues_needed\n");
9678 10124 }
9679 10125
9680 10126 /* Watchdog timer handler. */
9681 10127 if (ha->watchdog_timer == 0) {
9682 10128 ha->watchdog_timer = WATCHDOG_TIME;
9683 10129 } else if (LOOP_READY(ha)) {
9684 10130 ha->watchdog_timer--;
9685 10131 if (ha->watchdog_timer == 0) {
9686 - for (vha = ha; vha != NULL;
9687 - vha = vha->vp_next) {
9688 - ql_watchdog(vha,
9689 - &set_flags,
9690 - &reset_flags);
9691 - }
9692 - ha->watchdog_timer =
9693 - WATCHDOG_TIME;
10132 + set_flags |= WATCHDOG_NEEDED;
9694 10133 }
9695 10134 }
9696 10135 }
9697 10136 }
9698 10137
9699 10138 /* Idle timer handler. */
9700 10139 if (!DRIVER_SUSPENDED(ha)) {
9701 10140 if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
9702 10141 #if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
9703 10142 set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
9704 10143 #endif
9705 10144 ha->idle_timer = 0;
9706 10145 }
9707 10146 if (ha->send_plogi_timer != NULL) {
9708 10147 ha->send_plogi_timer--;
9709 10148 if (ha->send_plogi_timer == NULL) {
9710 10149 set_flags |= SEND_PLOGI;
9711 10150 }
9712 10151 }
9713 10152 }
9714 - ADAPTER_STATE_LOCK(ha);
9715 - if (ha->idc_restart_timer != 0) {
9716 - ha->idc_restart_timer--;
9717 - if (ha->idc_restart_timer == 0) {
9718 - ha->idc_restart_cnt = 0;
9719 - reset_flags |= DRIVER_STALL;
9720 - }
10153 +
10154 + if (CFG_IST(ha, CFG_CTRL_82XX) && ha->flags & ONLINE &&
10155 + !(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
10156 + ABORT_ISP_ACTIVE)) &&
10157 + !(sec_cnt % 2)) {
10158 + set_flags |= IDC_POLL_NEEDED;
9721 10159 }
9722 - if (ha->idc_flash_acc_timer != 0) {
9723 - ha->idc_flash_acc_timer--;
9724 - if (ha->idc_flash_acc_timer == 0 &&
9725 - ha->idc_flash_acc != 0) {
9726 - ha->idc_flash_acc = 1;
9727 - ha->idc_mb[0] = MBA_IDC_NOTIFICATION;
9728 - ha->idc_mb[1] = 0;
9729 - ha->idc_mb[2] = IDC_OPC_DRV_START;
9730 - set_flags |= IDC_EVENT;
9731 - }
9732 - }
9733 - ADAPTER_STATE_UNLOCK(ha);
9734 10160
9735 - if (set_flags != 0 || reset_flags != 0) {
9736 - ql_awaken_task_daemon(ha, NULL, set_flags,
9737 - reset_flags);
10161 + if (ha->ledstate.BeaconState == BEACON_ON) {
10162 + set_flags |= LED_BLINK;
9738 10163 }
9739 10164
9740 - if (ha->xioctl->ledstate.BeaconState == BEACON_ON) {
9741 - ql_blink_led(ha);
10165 + if (set_flags != 0) {
10166 + ql_awaken_task_daemon(ha, NULL, set_flags, 0);
9742 10167 }
9743 10168
9744 10169 /* Update the IO stats */
9745 10170 if (ha->xioctl->IOInputByteCnt >= 0x100000) {
9746 10171 ha->xioctl->IOInputMByteCnt +=
9747 10172 (ha->xioctl->IOInputByteCnt / 0x100000);
9748 10173 ha->xioctl->IOInputByteCnt %= 0x100000;
9749 10174 }
9750 10175
9751 10176 if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
9752 10177 ha->xioctl->IOOutputMByteCnt +=
9753 10178 (ha->xioctl->IOOutputByteCnt / 0x100000);
9754 10179 ha->xioctl->IOOutputByteCnt %= 0x100000;
9755 10180 }
9756 10181
9757 - if (CFG_IST(ha, CFG_CTRL_8021)) {
9758 - (void) ql_8021_idc_handler(ha);
9759 - }
9760 -
9761 - ADAPTER_STATE_LOCK(ha);
9762 - ha->flags &= ~ADAPTER_TIMER_BUSY;
9763 - ADAPTER_STATE_UNLOCK(ha);
9764 -
9765 10182 QL_PM_LOCK(ha);
9766 - ha->busy--;
10183 + if (ha->pm_busy) {
10184 + ha->pm_busy--;
10185 + }
9767 10186 QL_PM_UNLOCK(ha);
9768 10187 }
9769 10188
9770 10189 /* Restart timer, if not being stopped. */
9771 10190 if (ql_timer_timeout_id != NULL) {
9772 10191 ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
9773 10192 }
9774 10193
9775 10194 /* Release global state lock. */
9776 - GLOBAL_STATE_UNLOCK();
10195 + GLOBAL_TIMER_UNLOCK();
9777 10196
9778 - QL_PRINT_6(CE_CONT, "done\n");
10197 + QL_PRINT_6(ha, "done\n");
9779 10198 }
9780 10199
9781 10200 /*
9782 10201 * ql_timeout_insert
9783 10202 * Function used to insert a command block onto the
9784 10203 * watchdog timer queue.
9785 10204 *
9786 10205 * Note: Must insure that pkt_time is not zero
9787 10206 * before calling ql_timeout_insert.
9788 10207 *
9789 10208 * Input:
9790 10209 * ha: adapter state pointer.
9791 10210 * tq: target queue pointer.
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
9792 10211 * sp: SRB pointer.
9793 10212 * DEVICE_QUEUE_LOCK must be already obtained.
9794 10213 *
9795 10214 * Context:
9796 10215 * Kernel context.
9797 10216 */
9798 10217 /* ARGSUSED */
9799 10218 static void
9800 10219 ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
9801 10220 {
9802 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10221 + QL_PRINT_3(ha, "started\n");
9803 10222
9804 10223 if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
9805 10224 sp->isp_timeout = (uint16_t)(sp->pkt->pkt_timeout);
9806 10225 /*
9807 10226 * The WATCHDOG_TIME must be rounded up + 1. As an example,
9808 10227 * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
9809 10228 * will expire in the next watchdog call, which could be in
9810 10229 * 1 microsecond.
9811 10230 *
9812 10231 */
9813 10232 sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
9814 10233 WATCHDOG_TIME;
9815 10234 /*
9816 10235 * Added an additional 10 to account for the
9817 10236 * firmware timer drift which can occur with
9818 10237 * very long timeout values.
9819 10238 */
9820 10239 sp->wdg_q_time += 10;
9821 10240
9822 10241 /*
9823 10242 * Add 6 more to insure watchdog does not timeout at the same
9824 10243 * time as ISP RISC code timeout.
9825 10244 */
9826 10245 sp->wdg_q_time += 6;
9827 10246
9828 10247 /* Save initial time for resetting watchdog time. */
9829 10248 sp->init_wdg_q_time = sp->wdg_q_time;
9830 10249
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
9831 10250 /* Insert command onto watchdog queue. */
9832 10251 ql_add_link_b(&tq->wdg, &sp->wdg);
9833 10252
9834 10253 sp->flags |= SRB_WATCHDOG_ENABLED;
9835 10254 } else {
9836 10255 sp->isp_timeout = 0;
9837 10256 sp->wdg_q_time = 0;
9838 10257 sp->init_wdg_q_time = 0;
9839 10258 }
9840 10259
9841 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10260 + QL_PRINT_3(ha, "done\n");
9842 10261 }
9843 10262
9844 10263 /*
9845 10264 * ql_watchdog
9846 10265 * Timeout handler that runs in interrupt context. The
9847 10266 * ql_adapter_state_t * argument is the parameter set up when the
9848 10267 * timeout was initialized (state structure pointer).
9849 10268 * Function used to update timeout values and if timeout
9850 10269 * has occurred command will be aborted.
9851 10270 *
9852 10271 * Input:
9853 - * ha: adapter state pointer.
9854 - * set_flags: task daemon flags to set.
9855 - * reset_flags: task daemon flags to reset.
10272 + * ha: adapter state pointer.
9856 10273 *
9857 10274 * Context:
9858 - * Interrupt context, no mailbox commands allowed.
10275 + * Kernel context.
9859 10276 */
9860 10277 static void
9861 -ql_watchdog(ql_adapter_state_t *ha, uint32_t *set_flags, uint32_t *reset_flags)
10278 +ql_watchdog(ql_adapter_state_t *ha)
9862 10279 {
9863 - ql_srb_t *sp;
9864 - ql_link_t *link;
9865 - ql_link_t *next_cmd;
9866 - ql_link_t *next_device;
9867 - ql_tgt_t *tq;
9868 - ql_lun_t *lq;
9869 - uint16_t index;
9870 - int q_sane;
10280 + ql_link_t *link;
10281 + ql_tgt_t *tq;
10282 + uint16_t index;
10283 + ql_adapter_state_t *vha;
9871 10284
9872 - QL_PRINT_6(CE_CONT, "(%d): started\n", ha->instance);
10285 + QL_PRINT_6(ha, "started\n");
9873 10286
9874 - /* Loop through all targets. */
9875 - for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9876 - for (link = ha->dev[index].first; link != NULL;
9877 - link = next_device) {
9878 - tq = link->base_address;
10287 + for (vha = ha; vha != NULL; vha = vha->vp_next) {
10288 + /* Loop through all targets. */
10289 + for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10290 + for (link = vha->dev[index].first; link != NULL;
10291 + link = link->next) {
10292 + tq = link->base_address;
9879 10293
9880 - /* Try to acquire device queue lock. */
9881 - if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
9882 - next_device = NULL;
9883 - continue;
9884 - }
10294 + /* Try to acquire device queue lock. */
10295 + if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
10296 + break;
10297 + }
9885 10298
9886 - next_device = link->next;
9887 -
9888 - if (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) &&
9889 - (tq->port_down_retry_count == 0)) {
9890 - /* Release device queue lock. */
9891 - DEVICE_QUEUE_UNLOCK(tq);
9892 - continue;
10299 + if (!(CFG_IST(ha,
10300 + CFG_ENABLE_LINK_DOWN_REPORTING)) &&
10301 + (tq->port_down_retry_count == 0)) {
10302 + /* Release device queue lock. */
10303 + DEVICE_QUEUE_UNLOCK(tq);
10304 + continue;
10305 + }
10306 + ql_wdg_tq_list(vha, tq);
9893 10307 }
10308 + }
10309 + }
10310 + ha->watchdog_timer = WATCHDOG_TIME;
9894 10311
9895 - /* Find out if this device is in a sane state. */
9896 - if (tq->flags & (TQF_RSCN_RCVD |
9897 - TQF_NEED_AUTHENTICATION | TQF_QUEUE_SUSPENDED)) {
9898 - q_sane = 0;
9899 - } else {
9900 - q_sane = 1;
9901 - }
9902 - /* Loop through commands on watchdog queue. */
9903 - for (link = tq->wdg.first; link != NULL;
9904 - link = next_cmd) {
9905 - next_cmd = link->next;
9906 - sp = link->base_address;
9907 - lq = sp->lun_queue;
10312 + QL_PRINT_6(ha, "done\n");
10313 +}
9908 10314
9909 - /*
9910 - * For SCSI commands, if everything seems to
9911 - * be going fine and this packet is stuck
9912 - * because of throttling at LUN or target
9913 - * level then do not decrement the
9914 - * sp->wdg_q_time
9915 - */
9916 - if (ha->task_daemon_flags & STATE_ONLINE &&
9917 - (sp->flags & SRB_ISP_STARTED) == 0 &&
9918 - q_sane && sp->flags & SRB_FCP_CMD_PKT &&
9919 - lq->lun_outcnt >= ha->execution_throttle) {
9920 - continue;
9921 - }
10315 +/*
10316 + * ql_wdg_tq_list
10317 + * Timeout handler that runs in interrupt context. The
10318 + * ql_adapter_state_t * argument is the parameter set up when the
10319 + * timeout was initialized (state structure pointer).
10320 + * Function used to update timeout values and if timeout
10321 + * has occurred command will be aborted.
10322 + *
10323 + * Input:
10324 + * ha: adapter state pointer.
10325 + * tq: target queue pointer.
10326 + * DEVICE_QUEUE_LOCK must be already obtained.
10327 + *
10328 + * Output:
10329 + * Releases DEVICE_QUEUE_LOCK upon exit.
10330 + *
10331 + * Context:
10332 + * Kernel context.
10333 + */
10334 +static void
10335 +ql_wdg_tq_list(ql_adapter_state_t *ha, ql_tgt_t *tq)
10336 +{
10337 + ql_srb_t *sp;
10338 + ql_link_t *link, *next_cmd;
10339 + ql_lun_t *lq;
10340 + boolean_t q_sane, timeout = B_FALSE;
9922 10341
9923 - if (sp->wdg_q_time != 0) {
9924 - sp->wdg_q_time--;
10342 + QL_PRINT_6(ha, "started\n");
9925 10343
9926 - /* Timeout? */
9927 - if (sp->wdg_q_time != 0) {
9928 - continue;
9929 - }
10344 + /* Find out if this device is in a sane state */
10345 + if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
10346 + TQF_QUEUE_SUSPENDED)) {
10347 + q_sane = B_FALSE;
10348 + } else {
10349 + q_sane = B_TRUE;
10350 + }
10351 + /* Loop through commands on watchdog queue. */
10352 + for (link = tq->wdg.first; link != NULL; link = next_cmd) {
10353 + next_cmd = link->next;
10354 + sp = link->base_address;
10355 + lq = sp->lun_queue;
9930 10356
9931 - ql_remove_link(&tq->wdg, &sp->wdg);
9932 - sp->flags &= ~SRB_WATCHDOG_ENABLED;
10357 + /*
10358 + * For SCSI commands, if everything
10359 + * seems to * be going fine and this
10360 + * packet is stuck
10361 + * because of throttling at LUN or
10362 + * target level then do not decrement
10363 + * the sp->wdg_q_time
10364 + */
10365 + if (ha->task_daemon_flags & STATE_ONLINE &&
10366 + !(sp->flags & SRB_ISP_STARTED) &&
10367 + q_sane == B_TRUE &&
10368 + sp->flags & SRB_FCP_CMD_PKT &&
10369 + lq->lun_outcnt >= ha->execution_throttle) {
10370 + continue;
10371 + }
9933 10372
9934 - if (sp->flags & SRB_ISP_STARTED) {
9935 - ql_cmd_timeout(ha, tq, sp,
9936 - set_flags, reset_flags);
10373 + if (sp->wdg_q_time != 0) {
10374 + sp->wdg_q_time--;
9937 10375
9938 - DEVICE_QUEUE_UNLOCK(tq);
9939 - tq = NULL;
9940 - next_cmd = NULL;
9941 - next_device = NULL;
9942 - index = DEVICE_HEAD_LIST_SIZE;
9943 - } else {
9944 - ql_cmd_timeout(ha, tq, sp,
9945 - set_flags, reset_flags);
9946 - }
9947 - }
10376 + /* Timeout? */
10377 + if (sp->wdg_q_time != 0) {
10378 + continue;
9948 10379 }
9949 10380
9950 - /* Release device queue lock. */
9951 - if (tq != NULL) {
9952 - DEVICE_QUEUE_UNLOCK(tq);
10381 + sp->flags |= SRB_COMMAND_TIMEOUT;
10382 + timeout = B_TRUE;
10383 + }
10384 + }
10385 +
10386 + /*
10387 + * Loop through commands on watchdog queue and
10388 + * abort timed out commands.
10389 + */
10390 + if (timeout == B_TRUE) {
10391 + for (link = tq->wdg.first; link != NULL; link = next_cmd) {
10392 + sp = link->base_address;
10393 + next_cmd = link->next;
10394 +
10395 + if (sp->flags & SRB_COMMAND_TIMEOUT) {
10396 + ql_remove_link(&tq->wdg, &sp->wdg);
10397 + sp->flags &= ~(SRB_WATCHDOG_ENABLED |
10398 + SRB_COMMAND_TIMEOUT);
10399 + ql_cmd_timeout(ha, tq, sp);
10400 + next_cmd = tq->wdg.first;
9953 10401 }
9954 10402 }
9955 10403 }
9956 10404
9957 - QL_PRINT_6(CE_CONT, "(%d): done\n", ha->instance);
10405 + /* Release device queue lock. */
10406 + DEVICE_QUEUE_UNLOCK(tq);
10407 +
10408 + QL_PRINT_6(ha, "done\n");
9958 10409 }
9959 10410
9960 10411 /*
9961 10412 * ql_cmd_timeout
9962 10413 * Command timeout handler.
9963 10414 *
9964 10415 * Input:
9965 10416 * ha: adapter state pointer.
9966 10417 * tq: target queue pointer.
9967 10418 * sp: SRB pointer.
9968 - * set_flags: task daemon flags to set.
9969 - * reset_flags: task daemon flags to reset.
9970 10419 *
9971 10420 * Context:
9972 - * Interrupt context, no mailbox commands allowed.
10421 + * Kernel context.
9973 10422 */
9974 -/* ARGSUSED */
9975 10423 static void
9976 -ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp,
9977 - uint32_t *set_flags, uint32_t *reset_flags)
10424 +ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
9978 10425 {
9979 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10426 + int rval = 0;
9980 10427
10428 + QL_PRINT_3(ha, "started\n");
10429 +
10430 + REQUEST_RING_LOCK(ha);
9981 10431 if (!(sp->flags & SRB_ISP_STARTED)) {
10432 + EL(ha, "command timed out in driver, sp=%ph spf=%xh\n",
10433 + (void *)sp, sp->flags);
9982 10434
9983 - EL(ha, "command timed out in driver = %ph\n", (void *)sp);
9984 -
9985 - REQUEST_RING_LOCK(ha);
9986 -
9987 10435 /* if it's on a queue */
9988 10436 if (sp->cmd.head) {
9989 10437 /*
9990 10438 * The pending_cmds que needs to be
9991 10439 * protected by the ring lock
9992 10440 */
9993 10441 ql_remove_link(sp->cmd.head, &sp->cmd);
9994 10442 }
9995 10443 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
9996 10444
9997 10445 /* Release device queue lock. */
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
9998 10446 REQUEST_RING_UNLOCK(ha);
9999 10447 DEVICE_QUEUE_UNLOCK(tq);
10000 10448
10001 10449 /* Set timeout status */
10002 10450 sp->pkt->pkt_reason = CS_TIMEOUT;
10003 10451
10004 10452 /* Ensure no retry */
10005 10453 sp->flags &= ~SRB_RETRY;
10006 10454
10007 10455 /* Call done routine to handle completion. */
10008 - ql_done(&sp->cmd);
10456 + ql_done(&sp->cmd, B_FALSE);
10457 + } else if (CFG_IST(ha, CFG_CTRL_82XX)) {
10458 + REQUEST_RING_UNLOCK(ha);
10459 + DEVICE_QUEUE_UNLOCK(tq);
10009 10460
10010 - DEVICE_QUEUE_LOCK(tq);
10011 - } else if (CFG_IST(ha, CFG_CTRL_8021)) {
10012 - int rval;
10013 - uint32_t index;
10014 -
10015 10461 EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10016 10462 "spf=%xh\n", (void *)sp,
10017 10463 (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10018 10464 sp->handle & OSC_INDEX_MASK, sp->flags);
10019 10465
10020 - DEVICE_QUEUE_UNLOCK(tq);
10021 -
10022 - INTR_LOCK(ha);
10023 - ha->pha->xioctl->ControllerErrorCount++;
10024 - if (sp->handle) {
10025 - ha->pha->timeout_cnt++;
10026 - index = sp->handle & OSC_INDEX_MASK;
10027 - if (ha->pha->outstanding_cmds[index] == sp) {
10028 - sp->request_ring_ptr->entry_type =
10029 - INVALID_ENTRY_TYPE;
10030 - sp->request_ring_ptr->entry_count = 0;
10031 - ha->pha->outstanding_cmds[index] = 0;
10032 - }
10033 - INTR_UNLOCK(ha);
10034 -
10035 - rval = ql_abort_command(ha, sp);
10036 - if (rval == QL_FUNCTION_TIMEOUT ||
10037 - rval == QL_LOCK_TIMEOUT ||
10038 - rval == QL_FUNCTION_PARAMETER_ERROR ||
10039 - ha->pha->timeout_cnt > TIMEOUT_THRESHOLD) {
10040 - *set_flags |= ISP_ABORT_NEEDED;
10041 - EL(ha, "abort status=%xh, tc=%xh, isp_abort_"
10042 - "needed\n", rval, ha->pha->timeout_cnt);
10043 - }
10044 -
10045 - sp->handle = 0;
10046 - sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10047 - } else {
10048 - INTR_UNLOCK(ha);
10466 + if (ha->pha->timeout_cnt++ > TIMEOUT_THRESHOLD ||
10467 + (rval = ql_abort_io(ha, sp)) != QL_SUCCESS) {
10468 + sp->flags |= SRB_COMMAND_TIMEOUT;
10469 + TASK_DAEMON_LOCK(ha);
10470 + ha->task_daemon_flags |= ISP_ABORT_NEEDED;
10471 + TASK_DAEMON_UNLOCK(ha);
10472 + EL(ha, "abort status=%xh, tc=%xh, isp_abort_"
10473 + "needed\n", rval, ha->pha->timeout_cnt);
10049 10474 }
10050 -
10051 - /* Set timeout status */
10052 - sp->pkt->pkt_reason = CS_TIMEOUT;
10053 -
10054 - /* Ensure no retry */
10055 - sp->flags &= ~SRB_RETRY;
10056 -
10057 - /* Call done routine to handle completion. */
10058 - ql_done(&sp->cmd);
10059 -
10060 - DEVICE_QUEUE_LOCK(tq);
10061 -
10062 10475 } else {
10476 + REQUEST_RING_UNLOCK(ha);
10477 + DEVICE_QUEUE_UNLOCK(tq);
10478 +
10063 10479 EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10064 10480 "spf=%xh, isp_abort_needed\n", (void *)sp,
10065 10481 (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10066 10482 sp->handle & OSC_INDEX_MASK, sp->flags);
10067 10483
10068 - /* Release device queue lock. */
10069 - DEVICE_QUEUE_UNLOCK(tq);
10070 -
10071 10484 INTR_LOCK(ha);
10072 10485 ha->pha->xioctl->ControllerErrorCount++;
10073 10486 INTR_UNLOCK(ha);
10074 10487
10075 10488 /* Set ISP needs to be reset */
10076 10489 sp->flags |= SRB_COMMAND_TIMEOUT;
10077 10490
10078 10491 if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
10079 - (void) ql_binary_fw_dump(ha, TRUE);
10492 + ADAPTER_STATE_LOCK(ha);
10493 + ha->flags |= FW_DUMP_NEEDED;
10494 + ADAPTER_STATE_UNLOCK(ha);
10080 10495 }
10081 10496
10082 - *set_flags |= ISP_ABORT_NEEDED;
10083 -
10084 - DEVICE_QUEUE_LOCK(tq);
10497 + TASK_DAEMON_LOCK(ha);
10498 + ha->task_daemon_flags |= ISP_ABORT_NEEDED;
10499 + TASK_DAEMON_UNLOCK(ha);
10085 10500 }
10501 + DEVICE_QUEUE_LOCK(tq);
10086 10502
10087 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10503 + QL_PRINT_3(ha, "done\n");
10088 10504 }
10089 10505
10090 10506 /*
10091 - * ql_rst_aen
10092 - * Processes asynchronous reset.
10093 - *
10094 - * Input:
10095 - * ha = adapter state pointer.
10096 - *
10097 - * Context:
10098 - * Kernel context.
10099 - */
10100 -static void
10101 -ql_rst_aen(ql_adapter_state_t *ha)
10102 -{
10103 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10104 -
10105 - /* Issue marker command. */
10106 - (void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
10107 -
10108 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10109 -}
10110 -
10111 -/*
10112 10507 * ql_cmd_wait
10113 10508 * Stall driver until all outstanding commands are returned.
10114 10509 *
10115 10510 * Input:
10116 10511 * ha = adapter state pointer.
10117 10512 *
10118 10513 * Context:
10119 10514 * Kernel context.
10120 10515 */
10121 10516 void
10122 10517 ql_cmd_wait(ql_adapter_state_t *ha)
10123 10518 {
10124 10519 uint16_t index;
10125 10520 ql_link_t *link;
10126 10521 ql_tgt_t *tq;
10127 10522 ql_adapter_state_t *vha;
10128 10523
10129 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10524 + QL_PRINT_3(ha, "started\n");
10130 10525
10131 10526 /* Wait for all outstanding commands to be returned. */
10132 10527 (void) ql_wait_outstanding(ha);
10133 10528
10134 10529 /*
10135 10530 * clear out internally queued commands
10136 10531 */
10137 10532 for (vha = ha; vha != NULL; vha = vha->vp_next) {
10138 10533 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10139 10534 for (link = vha->dev[index].first; link != NULL;
10140 10535 link = link->next) {
10141 10536 tq = link->base_address;
10142 10537 if (tq &&
10143 10538 (!(tq->prli_svc_param_word_3 &
10144 - PRLI_W3_RETRY))) {
10539 + PRLI_W3_RETRY) ||
10540 + ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
10145 10541 (void) ql_abort_device(vha, tq, 0);
10146 10542 }
10147 10543 }
10148 10544 }
10149 10545 }
10150 10546
10151 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10547 + QL_PRINT_3(ha, "done\n");
10152 10548 }
10153 10549
10154 10550 /*
10155 10551 * ql_wait_outstanding
10156 10552 * Wait for all outstanding commands to complete.
10157 10553 *
10158 10554 * Input:
10159 10555 * ha = adapter state pointer.
10160 10556 *
10161 10557 * Returns:
10162 10558 * index - the index for ql_srb into outstanding_cmds.
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
10163 10559 *
10164 10560 * Context:
10165 10561 * Kernel context.
10166 10562 */
10167 10563 static uint16_t
10168 10564 ql_wait_outstanding(ql_adapter_state_t *ha)
10169 10565 {
10170 10566 ql_srb_t *sp;
10171 10567 uint16_t index, count;
10172 10568
10173 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10569 + QL_PRINT_3(ha, "started\n");
10174 10570
10175 10571 count = ql_osc_wait_count;
10176 - for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10572 + for (index = 1; index < ha->pha->osc_max_cnt; index++) {
10177 10573 if (ha->pha->pending_cmds.first != NULL) {
10178 10574 ql_start_iocb(ha, NULL);
10179 10575 index = 1;
10180 10576 }
10181 10577 if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
10578 + sp != QL_ABORTED_SRB(ha) &&
10182 10579 (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
10183 10580 if (count-- != 0) {
10184 10581 ql_delay(ha, 10000);
10185 10582 index = 0;
10186 10583 } else {
10187 - EL(ha, "failed, sp=%ph, oci=%d, hdl=%xh\n",
10188 - (void *)sp, index, sp->handle);
10584 + EL(ha, "still in OSC,sp=%ph,oci=%d,sph=%xh,"
10585 + "spf=%xh\n", (void *) sp, index, sp->handle,
10586 + sp->flags);
10189 10587 break;
10190 10588 }
10191 10589 }
10192 10590 }
10193 10591
10194 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10592 + QL_PRINT_3(ha, "done\n");
10195 10593
10196 10594 return (index);
10197 10595 }
10198 10596
10199 10597 /*
10200 10598 * ql_restart_queues
10201 10599 * Restart device queues.
10202 10600 *
10203 10601 * Input:
10204 10602 * ha = adapter state pointer.
10205 10603 * DEVICE_QUEUE_LOCK must be released.
10206 10604 *
10207 10605 * Context:
10208 10606 * Interrupt or Kernel context, no mailbox commands allowed.
10209 10607 */
10210 -static void
10608 +void
10211 10609 ql_restart_queues(ql_adapter_state_t *ha)
10212 10610 {
10213 10611 ql_link_t *link, *link2;
10214 10612 ql_tgt_t *tq;
10215 10613 ql_lun_t *lq;
10216 10614 uint16_t index;
10217 10615 ql_adapter_state_t *vha;
10218 10616
10219 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10617 + QL_PRINT_3(ha, "started\n");
10220 10618
10221 10619 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10222 10620 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10223 10621 for (link = vha->dev[index].first; link != NULL;
10224 10622 link = link->next) {
10225 10623 tq = link->base_address;
10226 10624
10227 10625 /* Acquire device queue lock. */
10228 10626 DEVICE_QUEUE_LOCK(tq);
10229 10627
10230 10628 tq->flags &= ~TQF_QUEUE_SUSPENDED;
10231 10629
10232 10630 for (link2 = tq->lun_queues.first;
10233 10631 link2 != NULL; link2 = link2->next) {
10234 10632 lq = link2->base_address;
10235 10633
10236 10634 if (lq->cmd.first != NULL) {
10237 10635 ql_next(vha, lq);
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
10238 10636 DEVICE_QUEUE_LOCK(tq);
10239 10637 }
10240 10638 }
10241 10639
10242 10640 /* Release device queue lock. */
10243 10641 DEVICE_QUEUE_UNLOCK(tq);
10244 10642 }
10245 10643 }
10246 10644 }
10247 10645
10248 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10646 + QL_PRINT_3(ha, "done\n");
10249 10647 }
10250 10648
10251 10649 /*
10252 10650 * ql_iidma
10253 10651 * Setup iiDMA parameters to firmware
10254 10652 *
10255 10653 * Input:
10256 10654 * ha = adapter state pointer.
10257 10655 * DEVICE_QUEUE_LOCK must be released.
10258 10656 *
10259 10657 * Context:
10260 10658 * Interrupt or Kernel context, no mailbox commands allowed.
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
10261 10659 */
10262 10660 static void
10263 10661 ql_iidma(ql_adapter_state_t *ha)
10264 10662 {
10265 10663 ql_link_t *link;
10266 10664 ql_tgt_t *tq;
10267 10665 uint16_t index;
10268 10666 char buf[256];
10269 10667 uint32_t data;
10270 10668
10271 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10669 + QL_PRINT_3(ha, "started\n");
10272 10670
10273 - if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
10274 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10671 + if (!CFG_IST(ha, CFG_IIDMA_SUPPORT)) {
10672 + QL_PRINT_3(ha, "done\n");
10275 10673 return;
10276 10674 }
10277 10675
10278 10676 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10279 10677 for (link = ha->dev[index].first; link != NULL;
10280 10678 link = link->next) {
10281 10679 tq = link->base_address;
10282 10680
10283 - /* Acquire device queue lock. */
10284 - DEVICE_QUEUE_LOCK(tq);
10285 -
10286 10681 if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
10287 - DEVICE_QUEUE_UNLOCK(tq);
10288 10682 continue;
10289 10683 }
10290 10684
10685 + /* Acquire device queue lock. */
10686 + DEVICE_QUEUE_LOCK(tq);
10687 +
10291 10688 tq->flags &= ~TQF_IIDMA_NEEDED;
10292 10689
10690 + /* Release device queue lock. */
10691 + DEVICE_QUEUE_UNLOCK(tq);
10692 +
10293 10693 if ((tq->loop_id > LAST_N_PORT_HDL) ||
10694 + (tq->d_id.b24 == FS_MANAGEMENT_SERVER) ||
10695 + (tq->flags & TQF_INITIATOR_DEVICE) ||
10294 10696 (tq->iidma_rate == IIDMA_RATE_NDEF)) {
10295 - DEVICE_QUEUE_UNLOCK(tq);
10296 10697 continue;
10297 10698 }
10298 10699
10299 10700 /* Get the iiDMA persistent data */
10300 - if (tq->iidma_rate == IIDMA_RATE_INIT) {
10301 - (void) sprintf(buf,
10302 - "iidma-rate-%02x%02x%02x%02x%02x"
10303 - "%02x%02x%02x", tq->port_name[0],
10304 - tq->port_name[1], tq->port_name[2],
10305 - tq->port_name[3], tq->port_name[4],
10306 - tq->port_name[5], tq->port_name[6],
10307 - tq->port_name[7]);
10701 + (void) snprintf(buf, sizeof (buf),
10702 + "iidma-rate-%02x%02x%02x%02x%02x"
10703 + "%02x%02x%02x", tq->port_name[0],
10704 + tq->port_name[1], tq->port_name[2],
10705 + tq->port_name[3], tq->port_name[4],
10706 + tq->port_name[5], tq->port_name[6],
10707 + tq->port_name[7]);
10308 10708
10309 - if ((data = ql_get_prop(ha, buf)) ==
10310 - 0xffffffff) {
10311 - tq->iidma_rate = IIDMA_RATE_NDEF;
10312 - } else {
10313 - switch (data) {
10314 - case IIDMA_RATE_1GB:
10315 - case IIDMA_RATE_2GB:
10316 - case IIDMA_RATE_4GB:
10317 - case IIDMA_RATE_10GB:
10318 - tq->iidma_rate = data;
10319 - break;
10320 - case IIDMA_RATE_8GB:
10321 - if (CFG_IST(ha,
10322 - CFG_CTRL_25XX)) {
10323 - tq->iidma_rate = data;
10324 - } else {
10325 - tq->iidma_rate =
10326 - IIDMA_RATE_4GB;
10327 - }
10328 - break;
10329 - default:
10330 - EL(ha, "invalid data for "
10331 - "parameter: %s: %xh\n",
10332 - buf, data);
10333 - tq->iidma_rate =
10334 - IIDMA_RATE_NDEF;
10335 - break;
10336 - }
10709 + if ((data = ql_get_prop(ha, buf)) ==
10710 + 0xffffffff) {
10711 + tq->iidma_rate = IIDMA_RATE_NDEF;
10712 + } else {
10713 + switch (data) {
10714 + case IIDMA_RATE_4GB:
10715 + case IIDMA_RATE_8GB:
10716 + case IIDMA_RATE_10GB:
10717 + case IIDMA_RATE_16GB:
10718 + case IIDMA_RATE_32GB:
10719 + tq->iidma_rate = data;
10720 + break;
10721 + default:
10722 + EL(ha, "invalid data for "
10723 + "parameter: %s: %xh\n",
10724 + buf, data);
10725 + tq->iidma_rate =
10726 + IIDMA_RATE_NDEF;
10727 + break;
10337 10728 }
10338 10729 }
10339 10730
10731 + EL(ha, "d_id = %xh iidma_rate = %xh\n",
10732 + tq->d_id.b24, tq->iidma_rate);
10733 +
10340 10734 /* Set the firmware's iiDMA rate */
10341 - if (tq->iidma_rate <= IIDMA_RATE_MAX &&
10342 - !(CFG_IST(ha, CFG_CTRL_8081))) {
10343 - data = ql_iidma_rate(ha, tq->loop_id,
10344 - &tq->iidma_rate, EXT_IIDMA_MODE_SET);
10345 - if (data != QL_SUCCESS) {
10346 - EL(ha, "mbx failed: %xh\n", data);
10735 + if (!CFG_IST(ha, CFG_FCOE_SUPPORT)) {
10736 + if (tq->iidma_rate <= IIDMA_RATE_MAX) {
10737 + data = ql_iidma_rate(ha, tq->loop_id,
10738 + &tq->iidma_rate,
10739 + EXT_IIDMA_MODE_SET);
10740 + if (data != QL_SUCCESS) {
10741 + EL(ha, "mbx failed: %xh\n",
10742 + data);
10743 + }
10347 10744 }
10348 10745 }
10349 -
10350 - /* Release device queue lock. */
10351 - DEVICE_QUEUE_UNLOCK(tq);
10352 10746 }
10353 10747 }
10354 10748
10355 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10749 + QL_PRINT_3(ha, "done\n");
10356 10750 }
10357 10751
10358 10752 /*
10359 10753 * ql_abort_queues
10360 10754 * Abort all commands on device queues.
10361 10755 *
10362 10756 * Input:
10363 10757 * ha = adapter state pointer.
10364 10758 *
10365 10759 * Context:
10366 10760 * Interrupt or Kernel context, no mailbox commands allowed.
10367 10761 */
10368 -static void
10762 +void
10369 10763 ql_abort_queues(ql_adapter_state_t *ha)
10370 10764 {
10371 10765 ql_link_t *link;
10372 10766 ql_tgt_t *tq;
10373 10767 ql_srb_t *sp;
10374 10768 uint16_t index;
10375 10769 ql_adapter_state_t *vha;
10376 10770
10377 - QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10771 + QL_PRINT_10(ha, "started\n");
10378 10772
10379 10773 /* Return all commands in outstanding command list. */
10380 10774 INTR_LOCK(ha);
10381 10775
10382 10776 /* Place all commands in outstanding cmd list on device queue. */
10383 - for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10777 + for (index = 1; index < ha->osc_max_cnt; index++) {
10384 10778 if (ha->pending_cmds.first != NULL) {
10385 10779 INTR_UNLOCK(ha);
10386 10780 ql_start_iocb(ha, NULL);
10387 10781 /* Delay for system */
10388 10782 ql_delay(ha, 10000);
10389 10783 INTR_LOCK(ha);
10390 10784 index = 1;
10391 10785 }
10392 10786 sp = ha->outstanding_cmds[index];
10393 10787
10788 + if (sp && (sp == QL_ABORTED_SRB(ha) || sp->ha != ha)) {
10789 + continue;
10790 + }
10791 +
10394 10792 /* skip devices capable of FCP2 retrys */
10395 - if ((sp != NULL) &&
10396 - ((tq = sp->lun_queue->target_queue) != NULL) &&
10397 - (!(tq->prli_svc_param_word_3 & PRLI_W3_RETRY))) {
10793 + if (sp != NULL &&
10794 + (sp->lun_queue == NULL ||
10795 + (tq = sp->lun_queue->target_queue) == NULL ||
10796 + !(tq->prli_svc_param_word_3 & PRLI_W3_RETRY) ||
10797 + ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
10398 10798 ha->outstanding_cmds[index] = NULL;
10399 10799 sp->handle = 0;
10400 10800 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10401 10801
10402 10802 INTR_UNLOCK(ha);
10403 10803
10404 10804 /* Set ending status. */
10405 10805 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10406 10806 sp->flags |= SRB_ISP_COMPLETED;
10407 10807
10408 10808 /* Call done routine to handle completions. */
10409 10809 sp->cmd.next = NULL;
10410 - ql_done(&sp->cmd);
10810 + ql_done(&sp->cmd, B_FALSE);
10411 10811
10412 10812 INTR_LOCK(ha);
10413 10813 }
10414 10814 }
10415 10815 INTR_UNLOCK(ha);
10416 10816
10417 10817 for (vha = ha; vha != NULL; vha = vha->vp_next) {
10418 - QL_PRINT_10(CE_CONT, "(%d,%d): abort instance\n",
10419 - vha->instance, vha->vp_index);
10818 + QL_PRINT_10(vha, "abort instance\n");
10420 10819 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10421 10820 for (link = vha->dev[index].first; link != NULL;
10422 10821 link = link->next) {
10423 10822 tq = link->base_address;
10424 10823 /* skip devices capable of FCP2 retrys */
10425 10824 if (!(tq->prli_svc_param_word_3 &
10426 - PRLI_W3_RETRY)) {
10825 + PRLI_W3_RETRY) ||
10826 + ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
10427 10827 /*
10428 10828 * Set port unavailable status and
10429 10829 * return all commands on a devices
10430 10830 * queues.
10431 10831 */
10432 10832 ql_abort_device_queues(ha, tq);
10433 10833 }
10434 10834 }
10435 10835 }
10436 10836 }
10437 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10837 + QL_PRINT_3(ha, "done\n");
10438 10838 }
10439 10839
10440 10840 /*
10441 10841 * ql_abort_device_queues
10442 10842 * Abort all commands on device queues.
10443 10843 *
10444 10844 * Input:
10445 10845 * ha = adapter state pointer.
10446 10846 *
10447 10847 * Context:
10448 10848 * Interrupt or Kernel context, no mailbox commands allowed.
10449 10849 */
10450 10850 static void
10451 10851 ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq)
10452 10852 {
10453 10853 ql_link_t *lun_link, *cmd_link;
10454 10854 ql_srb_t *sp;
10455 10855 ql_lun_t *lq;
10456 10856
10457 - QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10857 + QL_PRINT_10(ha, "started\n");
10458 10858
10459 10859 DEVICE_QUEUE_LOCK(tq);
10860 + ql_requeue_pending_cmds(ha, tq);
10460 10861
10461 10862 for (lun_link = tq->lun_queues.first; lun_link != NULL;
10462 10863 lun_link = lun_link->next) {
10463 10864 lq = lun_link->base_address;
10464 10865
10465 10866 cmd_link = lq->cmd.first;
10466 10867 while (cmd_link != NULL) {
10467 10868 sp = cmd_link->base_address;
10468 10869
10469 - if (sp->flags & SRB_ABORT) {
10470 - cmd_link = cmd_link->next;
10471 - continue;
10472 - }
10473 -
10474 10870 /* Remove srb from device cmd queue. */
10475 10871 ql_remove_link(&lq->cmd, &sp->cmd);
10476 10872
10477 10873 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10478 10874
10479 10875 DEVICE_QUEUE_UNLOCK(tq);
10480 10876
10481 10877 /* Set ending status. */
10482 10878 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10483 10879
10484 10880 /* Call done routine to handle completion. */
10485 - ql_done(&sp->cmd);
10881 + ql_done(&sp->cmd, B_FALSE);
10486 10882
10487 10883 /* Delay for system */
10488 10884 ql_delay(ha, 10000);
10489 10885
10490 10886 DEVICE_QUEUE_LOCK(tq);
10491 10887 cmd_link = lq->cmd.first;
10492 10888 }
10493 10889 }
10494 10890 DEVICE_QUEUE_UNLOCK(tq);
10495 10891
10496 - QL_PRINT_10(CE_CONT, "(%d): done\n", ha->instance);
10892 + QL_PRINT_10(ha, "done\n");
10497 10893 }
10498 10894
10499 10895 /*
10500 10896 * ql_loop_resync
10501 10897 * Resync with fibre channel devices.
10502 10898 *
10503 10899 * Input:
10504 10900 * ha = adapter state pointer.
10505 10901 * DEVICE_QUEUE_LOCK must be released.
10506 10902 *
10507 - * Returns:
10508 - * ql local function return status code.
10509 - *
10510 10903 * Context:
10511 10904 * Kernel context.
10512 10905 */
10513 -static int
10906 +static void
10514 10907 ql_loop_resync(ql_adapter_state_t *ha)
10515 10908 {
10516 10909 int rval;
10517 10910
10518 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10911 + QL_PRINT_3(ha, "started\n");
10519 10912
10520 10913 if (ha->flags & IP_INITIALIZED) {
10521 10914 (void) ql_shutdown_ip(ha);
10522 10915 }
10523 10916
10524 10917 rval = ql_fw_ready(ha, 10);
10525 10918
10526 10919 TASK_DAEMON_LOCK(ha);
10527 10920 ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10528 10921 TASK_DAEMON_UNLOCK(ha);
10529 10922
10530 10923 /* Set loop online, if it really is. */
10531 10924 if (rval == QL_SUCCESS) {
10532 10925 ql_loop_online(ha);
10533 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10926 + QL_PRINT_3(ha, "done\n");
10534 10927 } else {
10535 10928 EL(ha, "failed, rval = %xh\n", rval);
10536 10929 }
10537 -
10538 - return (rval);
10539 10930 }
10540 10931
10541 10932 /*
10542 10933 * ql_loop_online
10543 10934 * Set loop online status if it really is online.
10544 10935 *
10545 10936 * Input:
10546 10937 * ha = adapter state pointer.
10547 10938 * DEVICE_QUEUE_LOCK must be released.
10548 10939 *
10549 10940 * Context:
10550 10941 * Kernel context.
10551 10942 */
10552 10943 void
10553 10944 ql_loop_online(ql_adapter_state_t *ha)
10554 10945 {
10555 10946 ql_adapter_state_t *vha;
10556 10947
10557 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10948 + QL_PRINT_3(ha, "started\n");
10558 10949
10559 10950 /* Inform the FC Transport that the hardware is online. */
10560 10951 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10561 10952 if (!(vha->task_daemon_flags &
10562 10953 (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10563 10954 /* Restart IP if it was shutdown. */
10564 10955 if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10565 10956 !(vha->flags & IP_INITIALIZED)) {
10566 10957 (void) ql_initialize_ip(vha);
10567 10958 ql_isp_rcvbuf(vha);
10568 10959 }
10569 10960
10570 10961 if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10571 10962 FC_PORT_STATE_MASK(vha->state) !=
10572 10963 FC_STATE_ONLINE) {
10573 10964 vha->state = FC_PORT_SPEED_MASK(vha->state);
10574 10965 if (vha->topology & QL_LOOP_CONNECTION) {
10575 10966 vha->state |= FC_STATE_LOOP;
10576 10967 } else {
10577 10968 vha->state |= FC_STATE_ONLINE;
10578 10969 }
10579 10970 TASK_DAEMON_LOCK(ha);
10580 10971 vha->task_daemon_flags |= FC_STATE_CHANGE;
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
10581 10972 TASK_DAEMON_UNLOCK(ha);
10582 10973 }
10583 10974 }
10584 10975 }
10585 10976
10586 10977 ql_awaken_task_daemon(ha, NULL, 0, 0);
10587 10978
10588 10979 /* Restart device queues that may have been stopped. */
10589 10980 ql_restart_queues(ha);
10590 10981
10591 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10982 + QL_PRINT_3(ha, "done\n");
10592 10983 }
10593 10984
10594 10985 /*
10595 10986 * ql_fca_handle_to_state
10596 10987 * Verifies handle to be correct.
10597 10988 *
10598 10989 * Input:
10599 10990 * fca_handle = pointer to state structure.
10600 10991 *
10601 10992 * Returns:
10602 10993 * NULL = failure
10603 10994 *
10604 10995 * Context:
10605 10996 * Kernel context.
10606 10997 */
10607 10998 static ql_adapter_state_t *
10608 10999 ql_fca_handle_to_state(opaque_t fca_handle)
10609 11000 {
10610 11001 #ifdef QL_DEBUG_ROUTINES
10611 11002 ql_link_t *link;
10612 11003 ql_adapter_state_t *ha = NULL;
10613 11004 ql_adapter_state_t *vha = NULL;
10614 11005
10615 11006 for (link = ql_hba.first; link != NULL; link = link->next) {
10616 11007 ha = link->base_address;
10617 11008 for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
10618 11009 if ((opaque_t)vha == fca_handle) {
10619 11010 ha = vha;
10620 11011 break;
10621 11012 }
|
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
10622 11013 }
10623 11014 if ((opaque_t)ha == fca_handle) {
10624 11015 break;
10625 11016 } else {
10626 11017 ha = NULL;
10627 11018 }
10628 11019 }
10629 11020
10630 11021 if (ha == NULL) {
10631 11022 /*EMPTY*/
10632 - QL_PRINT_2(CE_CONT, "failed\n");
11023 + QL_PRINT_2(ha, "failed\n");
10633 11024 }
10634 11025
10635 11026 #endif /* QL_DEBUG_ROUTINES */
10636 11027
10637 11028 return ((ql_adapter_state_t *)fca_handle);
10638 11029 }
10639 11030
10640 11031 /*
10641 11032 * ql_d_id_to_queue
10642 11033 * Locate device queue that matches destination ID.
10643 11034 *
10644 11035 * Input:
10645 11036 * ha = adapter state pointer.
10646 11037 * d_id = destination ID
10647 11038 *
10648 11039 * Returns:
10649 11040 * NULL = failure
10650 11041 *
10651 11042 * Context:
10652 11043 * Interrupt or Kernel context, no mailbox commands allowed.
10653 11044 */
10654 11045 ql_tgt_t *
10655 11046 ql_d_id_to_queue(ql_adapter_state_t *ha, port_id_t d_id)
10656 11047 {
10657 11048 uint16_t index;
10658 11049 ql_tgt_t *tq;
10659 11050 ql_link_t *link;
10660 11051
10661 11052 /* Get head queue index. */
10662 11053 index = ql_alpa_to_index[d_id.b.al_pa];
10663 11054
10664 11055 for (link = ha->dev[index].first; link != NULL; link = link->next) {
10665 11056 tq = link->base_address;
10666 11057 if (tq->d_id.b24 == d_id.b24 &&
10667 11058 VALID_DEVICE_ID(ha, tq->loop_id)) {
10668 11059 return (tq);
10669 11060 }
10670 11061 }
10671 11062
10672 11063 return (NULL);
10673 11064 }
10674 11065
10675 11066 /*
10676 11067 * ql_loop_id_to_queue
10677 11068 * Locate device queue that matches loop ID.
10678 11069 *
10679 11070 * Input:
10680 11071 * ha: adapter state pointer.
10681 11072 * loop_id: destination ID
10682 11073 *
10683 11074 * Returns:
10684 11075 * NULL = failure
10685 11076 *
10686 11077 * Context:
10687 11078 * Interrupt or Kernel context, no mailbox commands allowed.
10688 11079 */
10689 11080 ql_tgt_t *
10690 11081 ql_loop_id_to_queue(ql_adapter_state_t *ha, uint16_t loop_id)
10691 11082 {
10692 11083 uint16_t index;
10693 11084 ql_tgt_t *tq;
10694 11085 ql_link_t *link;
10695 11086
10696 11087 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10697 11088 for (link = ha->dev[index].first; link != NULL;
10698 11089 link = link->next) {
10699 11090 tq = link->base_address;
10700 11091 if (tq->loop_id == loop_id) {
10701 11092 return (tq);
10702 11093 }
10703 11094 }
10704 11095 }
10705 11096
10706 11097 return (NULL);
10707 11098 }
10708 11099
10709 11100 /*
10710 11101 * ql_kstat_update
10711 11102 * Updates kernel statistics.
10712 11103 *
10713 11104 * Input:
10714 11105 * ksp - driver kernel statistics structure pointer.
10715 11106 * rw - function to perform
10716 11107 *
|
↓ open down ↓ |
74 lines elided |
↑ open up ↑ |
10717 11108 * Returns:
10718 11109 * 0 or EACCES
10719 11110 *
10720 11111 * Context:
10721 11112 * Kernel context.
10722 11113 */
10723 11114 /* ARGSUSED */
10724 11115 static int
10725 11116 ql_kstat_update(kstat_t *ksp, int rw)
10726 11117 {
10727 - int rval;
11118 + int rval;
10728 11119
10729 - QL_PRINT_3(CE_CONT, "started\n");
11120 + QL_PRINT_3(ksp->ks_private, "started\n");
10730 11121
10731 11122 if (rw == KSTAT_WRITE) {
10732 11123 rval = EACCES;
10733 11124 } else {
10734 11125 rval = 0;
10735 11126 }
10736 11127
10737 11128 if (rval != 0) {
10738 11129 /*EMPTY*/
10739 - QL_PRINT_2(CE_CONT, "failed, rval = %xh\n", rval);
11130 + QL_PRINT_2(ksp->ks_private, "failed, rval = %xh\n", rval);
10740 11131 } else {
10741 11132 /*EMPTY*/
10742 - QL_PRINT_3(CE_CONT, "done\n");
11133 + QL_PRINT_3(ksp->ks_private, "done\n");
10743 11134 }
10744 11135 return (rval);
10745 11136 }
10746 11137
10747 11138 /*
10748 11139 * ql_load_flash
10749 11140 * Loads flash.
10750 11141 *
10751 11142 * Input:
10752 11143 * ha: adapter state pointer.
10753 11144 * dp: data pointer.
10754 11145 * size: data length.
10755 11146 *
10756 11147 * Returns:
10757 11148 * ql local function return status code.
10758 11149 *
10759 11150 * Context:
10760 11151 * Kernel context.
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
10761 11152 */
10762 11153 int
10763 11154 ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
10764 11155 {
10765 11156 uint32_t cnt;
10766 11157 int rval;
10767 11158 uint32_t size_to_offset;
10768 11159 uint32_t size_to_compare;
10769 11160 int erase_all;
10770 11161
10771 - if (CFG_IST(ha, CFG_CTRL_24258081)) {
11162 + if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
10772 11163 return (ql_24xx_load_flash(ha, dp, size, 0));
10773 11164 }
10774 11165
10775 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11166 + QL_PRINT_3(ha, "started\n");
10776 11167
10777 11168 size_to_compare = 0x20000;
10778 11169 size_to_offset = 0;
10779 11170 erase_all = 0;
10780 11171 if (CFG_IST(ha, CFG_SBUS_CARD)) {
10781 11172 if (size == 0x80000) {
10782 11173 /* Request to flash the entire chip. */
10783 11174 size_to_compare = 0x80000;
10784 11175 erase_all = 1;
10785 11176 } else {
10786 11177 size_to_compare = 0x40000;
10787 11178 if (ql_flash_sbus_fpga) {
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
10788 11179 size_to_offset = 0x40000;
10789 11180 }
10790 11181 }
10791 11182 }
10792 11183 if (size > size_to_compare) {
10793 11184 rval = QL_FUNCTION_PARAMETER_ERROR;
10794 11185 EL(ha, "failed=%xh\n", rval);
10795 11186 return (rval);
10796 11187 }
10797 11188
10798 - GLOBAL_HW_LOCK();
10799 -
10800 11189 /* Enable Flash Read/Write. */
10801 11190 ql_flash_enable(ha);
10802 11191
10803 11192 /* Erase flash prior to write. */
10804 11193 rval = ql_erase_flash(ha, erase_all);
10805 11194
10806 11195 if (rval == QL_SUCCESS) {
10807 11196 /* Write data to flash. */
10808 11197 for (cnt = 0; cnt < size; cnt++) {
10809 11198 /* Allow other system activity. */
10810 11199 if (cnt % 0x1000 == 0) {
10811 11200 ql_delay(ha, 10000);
10812 11201 }
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
10813 11202 rval = ql_program_flash_address(ha,
10814 11203 cnt + size_to_offset, *dp++);
10815 11204 if (rval != QL_SUCCESS) {
10816 11205 break;
10817 11206 }
10818 11207 }
10819 11208 }
10820 11209
10821 11210 ql_flash_disable(ha);
10822 11211
10823 - GLOBAL_HW_UNLOCK();
10824 -
10825 11212 if (rval != QL_SUCCESS) {
10826 11213 EL(ha, "failed=%xh\n", rval);
10827 11214 } else {
10828 11215 /*EMPTY*/
10829 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11216 + QL_PRINT_3(ha, "done\n");
10830 11217 }
10831 11218 return (rval);
10832 11219 }
10833 11220
10834 11221 /*
10835 11222 * ql_program_flash_address
10836 11223 * Program flash address.
10837 11224 *
10838 11225 * Input:
10839 11226 * ha = adapter state pointer.
10840 11227 * addr = flash byte address.
10841 11228 * data = data to be written to flash.
10842 11229 *
10843 11230 * Returns:
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
10844 11231 * ql local function return status code.
10845 11232 *
10846 11233 * Context:
10847 11234 * Kernel context.
10848 11235 */
10849 11236 static int
10850 11237 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10851 11238 {
10852 11239 int rval;
10853 11240
10854 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11241 + QL_PRINT_3(ha, "started\n");
10855 11242
10856 11243 if (CFG_IST(ha, CFG_SBUS_CARD)) {
10857 11244 ql_write_flash_byte(ha, 0x5555, 0xa0);
10858 11245 ql_write_flash_byte(ha, addr, data);
10859 11246 } else {
10860 11247 /* Write Program Command Sequence */
10861 11248 ql_write_flash_byte(ha, 0x5555, 0xaa);
10862 11249 ql_write_flash_byte(ha, 0x2aaa, 0x55);
10863 11250 ql_write_flash_byte(ha, 0x5555, 0xa0);
10864 11251 ql_write_flash_byte(ha, addr, data);
10865 11252 }
10866 11253
10867 11254 /* Wait for write to complete. */
10868 11255 rval = ql_poll_flash(ha, addr, data);
10869 11256
10870 11257 if (rval != QL_SUCCESS) {
10871 11258 EL(ha, "failed=%xh\n", rval);
10872 11259 } else {
10873 11260 /*EMPTY*/
10874 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11261 + QL_PRINT_3(ha, "done\n");
10875 11262 }
10876 11263 return (rval);
10877 11264 }
10878 11265
10879 11266 /*
10880 11267 * ql_erase_flash
10881 11268 * Erases entire flash.
10882 11269 *
10883 11270 * Input:
10884 11271 * ha = adapter state pointer.
10885 11272 *
10886 11273 * Returns:
10887 11274 * ql local function return status code.
10888 11275 *
10889 11276 * Context:
10890 11277 * Kernel context.
10891 11278 */
10892 11279 int
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
10893 11280 ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
10894 11281 {
10895 11282 int rval;
10896 11283 uint32_t erase_delay = 2000000;
10897 11284 uint32_t sStartAddr;
10898 11285 uint32_t ssize;
10899 11286 uint32_t cnt;
10900 11287 uint8_t *bfp;
10901 11288 uint8_t *tmp;
10902 11289
10903 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11290 + QL_PRINT_3(ha, "started\n");
10904 11291
10905 11292 if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10906 -
10907 11293 if (ql_flash_sbus_fpga == 1) {
10908 11294 ssize = QL_SBUS_FCODE_SIZE;
10909 11295 sStartAddr = QL_FCODE_OFFSET;
10910 11296 } else {
10911 11297 ssize = QL_FPGA_SIZE;
10912 11298 sStartAddr = QL_FPGA_OFFSET;
10913 11299 }
10914 11300
10915 11301 erase_delay = 20000000;
10916 11302
10917 11303 bfp = (uint8_t *)kmem_zalloc(ssize, KM_SLEEP);
10918 11304
10919 11305 /* Save the section of flash we're not updating to buffer */
10920 11306 tmp = bfp;
10921 - for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
11307 + for (cnt = sStartAddr; cnt < ssize + sStartAddr; cnt++) {
10922 11308 /* Allow other system activity. */
10923 11309 if (cnt % 0x1000 == 0) {
10924 11310 ql_delay(ha, 10000);
10925 11311 }
10926 11312 *tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
10927 11313 }
10928 - }
10929 11314
10930 - /* Chip Erase Command Sequence */
10931 - ql_write_flash_byte(ha, 0x5555, 0xaa);
10932 - ql_write_flash_byte(ha, 0x2aaa, 0x55);
10933 - ql_write_flash_byte(ha, 0x5555, 0x80);
10934 - ql_write_flash_byte(ha, 0x5555, 0xaa);
10935 - ql_write_flash_byte(ha, 0x2aaa, 0x55);
10936 - ql_write_flash_byte(ha, 0x5555, 0x10);
11315 + /* Chip Erase Command Sequence */
11316 + ql_write_flash_byte(ha, 0x5555, 0xaa);
11317 + ql_write_flash_byte(ha, 0x2aaa, 0x55);
11318 + ql_write_flash_byte(ha, 0x5555, 0x80);
11319 + ql_write_flash_byte(ha, 0x5555, 0xaa);
11320 + ql_write_flash_byte(ha, 0x2aaa, 0x55);
11321 + ql_write_flash_byte(ha, 0x5555, 0x10);
10937 11322
10938 - ql_delay(ha, erase_delay);
11323 + ql_delay(ha, erase_delay);
10939 11324
10940 - /* Wait for erase to complete. */
10941 - rval = ql_poll_flash(ha, 0, 0x80);
11325 + /* Wait for erase to complete. */
11326 + rval = ql_poll_flash(ha, 0, 0x80);
10942 11327
10943 - if (rval != QL_SUCCESS) {
10944 - EL(ha, "failed=%xh\n", rval);
10945 - if (CFG_IST(ha, CFG_SBUS_CARD)) {
10946 - kmem_free(bfp, ssize);
10947 - }
10948 - return (rval);
10949 - }
10950 -
10951 - /* restore the section we saved in the buffer */
10952 - if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10953 - /* Restore the section we saved off */
10954 - tmp = bfp;
10955 - for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10956 - /* Allow other system activity. */
10957 - if (cnt % 0x1000 == 0) {
10958 - ql_delay(ha, 10000);
11328 + if (rval == QL_SUCCESS) {
11329 + /* Restore the section we saved off */
11330 + tmp = bfp;
11331 + for (cnt = sStartAddr; cnt < ssize + sStartAddr;
11332 + cnt++) {
11333 + /* Allow other system activity. */
11334 + if (cnt % 0x1000 == 0) {
11335 + ql_delay(ha, 10000);
11336 + }
11337 + rval = ql_program_flash_address(ha, cnt,
11338 + *tmp++);
11339 + if (rval != QL_SUCCESS) {
11340 + break;
11341 + }
10959 11342 }
10960 - rval = ql_program_flash_address(ha, cnt, *tmp++);
10961 - if (rval != QL_SUCCESS) {
10962 - break;
10963 - }
10964 11343 }
10965 -
10966 11344 kmem_free(bfp, ssize);
11345 + } else {
11346 + /* Chip Erase Command Sequence */
11347 + ql_write_flash_byte(ha, 0x5555, 0xaa);
11348 + ql_write_flash_byte(ha, 0x2aaa, 0x55);
11349 + ql_write_flash_byte(ha, 0x5555, 0x80);
11350 + ql_write_flash_byte(ha, 0x5555, 0xaa);
11351 + ql_write_flash_byte(ha, 0x2aaa, 0x55);
11352 + ql_write_flash_byte(ha, 0x5555, 0x10);
11353 +
11354 + ql_delay(ha, erase_delay);
11355 +
11356 + /* Wait for erase to complete. */
11357 + rval = ql_poll_flash(ha, 0, 0x80);
10967 11358 }
10968 11359
10969 11360 if (rval != QL_SUCCESS) {
10970 11361 EL(ha, "failed=%xh\n", rval);
10971 11362 } else {
10972 11363 /*EMPTY*/
10973 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11364 + QL_PRINT_3(ha, "done\n");
10974 11365 }
10975 11366 return (rval);
10976 11367 }
10977 11368
10978 11369 /*
10979 11370 * ql_poll_flash
10980 11371 * Polls flash for completion.
10981 11372 *
10982 11373 * Input:
10983 11374 * ha = adapter state pointer.
10984 11375 * addr = flash byte address.
10985 11376 * data = data to be polled.
10986 11377 *
10987 11378 * Returns:
10988 11379 * ql local function return status code.
10989 11380 *
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
10990 11381 * Context:
10991 11382 * Kernel context.
10992 11383 */
10993 11384 int
10994 11385 ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
10995 11386 {
10996 11387 uint8_t flash_data;
10997 11388 uint32_t cnt;
10998 11389 int rval = QL_FUNCTION_FAILED;
10999 11390
11000 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11391 + QL_PRINT_3(ha, "started\n");
11001 11392
11002 11393 poll_data = (uint8_t)(poll_data & BIT_7);
11003 11394
11004 11395 /* Wait for 30 seconds for command to finish. */
11005 11396 for (cnt = 30000000; cnt; cnt--) {
11006 11397 flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
11007 11398
11008 11399 if ((flash_data & BIT_7) == poll_data) {
11009 11400 rval = QL_SUCCESS;
11010 11401 break;
11011 11402 }
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
11012 11403 if (flash_data & BIT_5 && cnt > 2) {
11013 11404 cnt = 2;
11014 11405 }
11015 11406 drv_usecwait(1);
11016 11407 }
11017 11408
11018 11409 if (rval != QL_SUCCESS) {
11019 11410 EL(ha, "failed=%xh\n", rval);
11020 11411 } else {
11021 11412 /*EMPTY*/
11022 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11413 + QL_PRINT_3(ha, "done\n");
11023 11414 }
11024 11415 return (rval);
11025 11416 }
11026 11417
11027 11418 /*
11028 11419 * ql_flash_enable
11029 11420 * Setup flash for reading/writing.
11030 11421 *
11031 11422 * Input:
11032 11423 * ha = adapter state pointer.
11033 11424 *
11034 11425 * Context:
11035 11426 * Kernel context.
11036 11427 */
11037 11428 void
11038 11429 ql_flash_enable(ql_adapter_state_t *ha)
11039 11430 {
11040 11431 uint16_t data;
11041 11432
11042 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11433 + QL_PRINT_3(ha, "started\n");
11043 11434
11044 11435 /* Enable Flash Read/Write. */
11045 11436 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11046 11437 data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11047 11438 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11048 11439 data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
11049 11440 ddi_put16(ha->sbus_fpga_dev_handle,
11050 11441 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11051 11442 /* Read reset command sequence */
11052 11443 ql_write_flash_byte(ha, 0xaaa, 0xaa);
11053 11444 ql_write_flash_byte(ha, 0x555, 0x55);
11054 11445 ql_write_flash_byte(ha, 0xaaa, 0x20);
11055 11446 ql_write_flash_byte(ha, 0x555, 0xf0);
11056 11447 } else {
11057 11448 data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
11058 11449 ISP_FLASH_ENABLE);
11059 11450 WRT16_IO_REG(ha, ctrl_status, data);
11060 11451
11061 11452 /* Read/Reset Command Sequence */
11062 11453 ql_write_flash_byte(ha, 0x5555, 0xaa);
11063 11454 ql_write_flash_byte(ha, 0x2aaa, 0x55);
11064 11455 ql_write_flash_byte(ha, 0x5555, 0xf0);
11065 11456 }
11066 11457 (void) ql_read_flash_byte(ha, 0);
11067 11458
11068 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11459 + QL_PRINT_3(ha, "done\n");
11069 11460 }
11070 11461
11071 11462 /*
11072 11463 * ql_flash_disable
11073 11464 * Disable flash and allow RISC to run.
11074 11465 *
11075 11466 * Input:
11076 11467 * ha = adapter state pointer.
11077 11468 *
11078 11469 * Context:
11079 11470 * Kernel context.
11080 11471 */
11081 11472 void
11082 11473 ql_flash_disable(ql_adapter_state_t *ha)
11083 11474 {
11084 11475 uint16_t data;
11085 11476
11086 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11477 + QL_PRINT_3(ha, "started\n");
11087 11478
11088 11479 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11089 11480 /*
11090 11481 * Lock the flash back up.
11091 11482 */
11092 11483 ql_write_flash_byte(ha, 0x555, 0x90);
11093 11484 ql_write_flash_byte(ha, 0x555, 0x0);
11094 11485
11095 11486 data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11096 11487 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11097 11488 data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
11098 11489 ddi_put16(ha->sbus_fpga_dev_handle,
11099 11490 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11100 11491 } else {
11101 11492 data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
11102 11493 ~ISP_FLASH_ENABLE);
11103 11494 WRT16_IO_REG(ha, ctrl_status, data);
11104 11495 }
11105 11496
11106 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11497 + QL_PRINT_3(ha, "done\n");
11107 11498 }
11108 11499
11109 11500 /*
11110 11501 * ql_write_flash_byte
11111 11502 * Write byte to flash.
11112 11503 *
11113 11504 * Input:
11114 11505 * ha = adapter state pointer.
11115 11506 * addr = flash byte address.
11116 11507 * data = data to be written.
11117 11508 *
11118 11509 * Context:
11119 11510 * Kernel context.
11120 11511 */
11121 11512 void
11122 11513 ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
11123 11514 {
11124 11515 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11125 11516 ddi_put16(ha->sbus_fpga_dev_handle,
11126 11517 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11127 11518 LSW(addr));
11128 11519 ddi_put16(ha->sbus_fpga_dev_handle,
11129 11520 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
11130 11521 MSW(addr));
11131 11522 ddi_put16(ha->sbus_fpga_dev_handle,
11132 11523 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
11133 11524 (uint16_t)data);
11134 11525 } else {
11135 11526 uint16_t bank_select;
11136 11527
11137 11528 /* Setup bit 16 of flash address. */
11138 11529 bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
11139 11530
11140 - if (CFG_IST(ha, CFG_CTRL_6322)) {
11531 + if (ha->device_id == 0x2322 || ha->device_id == 0x6322) {
11141 11532 bank_select = (uint16_t)(bank_select & ~0xf0);
11142 11533 bank_select = (uint16_t)(bank_select |
11143 11534 ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11144 11535 WRT16_IO_REG(ha, ctrl_status, bank_select);
11145 11536 } else {
11146 11537 if (addr & BIT_16 && !(bank_select &
11147 11538 ISP_FLASH_64K_BANK)) {
11148 11539 bank_select = (uint16_t)(bank_select |
11149 11540 ISP_FLASH_64K_BANK);
11150 11541 WRT16_IO_REG(ha, ctrl_status, bank_select);
11151 11542 } else if (!(addr & BIT_16) && bank_select &
11152 11543 ISP_FLASH_64K_BANK) {
11153 11544 bank_select = (uint16_t)(bank_select &
11154 11545 ~ISP_FLASH_64K_BANK);
11155 11546 WRT16_IO_REG(ha, ctrl_status, bank_select);
11156 11547 }
11157 11548 }
11158 11549
11159 11550 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11160 11551 WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
11161 11552 WRT16_IO_REG(ha, flash_data, (uint16_t)data);
11162 11553 } else {
11163 11554 WRT16_IOMAP_REG(ha, flash_address, addr);
11164 11555 WRT16_IOMAP_REG(ha, flash_data, data);
11165 11556 }
11166 11557 }
11167 11558 }
11168 11559
11169 11560 /*
11170 11561 * ql_read_flash_byte
11171 11562 * Reads byte from flash, but must read a word from chip.
11172 11563 *
11173 11564 * Input:
11174 11565 * ha = adapter state pointer.
11175 11566 * addr = flash byte address.
11176 11567 *
11177 11568 * Returns:
11178 11569 * byte from flash.
11179 11570 *
11180 11571 * Context:
11181 11572 * Kernel context.
11182 11573 */
11183 11574 uint8_t
11184 11575 ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
11185 11576 {
11186 11577 uint8_t data;
11187 11578
11188 11579 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11189 11580 ddi_put16(ha->sbus_fpga_dev_handle,
11190 11581 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11191 11582 LSW(addr));
|
↓ open down ↓ |
41 lines elided |
↑ open up ↑ |
11192 11583 ddi_put16(ha->sbus_fpga_dev_handle,
11193 11584 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11194 11585 MSW(addr));
11195 11586 data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
11196 11587 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
11197 11588 } else {
11198 11589 uint16_t bank_select;
11199 11590
11200 11591 /* Setup bit 16 of flash address. */
11201 11592 bank_select = RD16_IO_REG(ha, ctrl_status);
11202 - if (CFG_IST(ha, CFG_CTRL_6322)) {
11593 + if (ha->device_id == 0x2322 || ha->device_id == 0x6322) {
11203 11594 bank_select = (uint16_t)(bank_select & ~0xf0);
11204 11595 bank_select = (uint16_t)(bank_select |
11205 11596 ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11206 11597 WRT16_IO_REG(ha, ctrl_status, bank_select);
11207 11598 } else {
11208 11599 if (addr & BIT_16 &&
11209 11600 !(bank_select & ISP_FLASH_64K_BANK)) {
11210 11601 bank_select = (uint16_t)(bank_select |
11211 11602 ISP_FLASH_64K_BANK);
11212 11603 WRT16_IO_REG(ha, ctrl_status, bank_select);
11213 11604 } else if (!(addr & BIT_16) &&
11214 11605 bank_select & ISP_FLASH_64K_BANK) {
11215 11606 bank_select = (uint16_t)(bank_select &
11216 11607 ~ISP_FLASH_64K_BANK);
11217 11608 WRT16_IO_REG(ha, ctrl_status, bank_select);
11218 11609 }
11219 11610 }
11220 11611
11221 11612 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11222 11613 WRT16_IO_REG(ha, flash_address, addr);
11223 11614 data = (uint8_t)RD16_IO_REG(ha, flash_data);
11224 11615 } else {
11225 11616 WRT16_IOMAP_REG(ha, flash_address, addr);
11226 11617 data = (uint8_t)RD16_IOMAP_REG(ha, flash_data);
11227 11618 }
11228 11619 }
11229 11620
11230 11621 return (data);
11231 11622 }
11232 11623
11233 11624 /*
11234 11625 * ql_24xx_flash_id
11235 11626 * Get flash IDs.
11236 11627 *
11237 11628 * Input:
11238 11629 * ha: adapter state pointer.
11239 11630 *
11240 11631 * Returns:
11241 11632 * ql local function return status code.
11242 11633 *
11243 11634 * Context:
|
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
11244 11635 * Kernel context.
11245 11636 */
11246 11637 int
11247 11638 ql_24xx_flash_id(ql_adapter_state_t *vha)
11248 11639 {
11249 11640 int rval;
11250 11641 uint32_t fdata = 0;
11251 11642 ql_adapter_state_t *ha = vha->pha;
11252 11643 ql_xioctl_t *xp = ha->xioctl;
11253 11644
11254 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11645 + QL_PRINT_3(ha, "started\n");
11255 11646
11256 11647 rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
11257 -
11258 - if (rval != QL_SUCCESS || fdata == 0 || CFG_IST(ha, CFG_CTRL_2581)) {
11648 + if (CFG_IST(ha, CFG_CTRL_24XX)) {
11649 + if (rval != QL_SUCCESS || fdata == 0) {
11650 + fdata = 0;
11651 + rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x39F,
11652 + &fdata);
11653 + }
11654 + } else {
11259 11655 fdata = 0;
11260 11656 rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
11261 - (CFG_IST(ha, CFG_CTRL_2422) ? 0x39F : 0x49F), &fdata);
11657 + (CFG_IST(ha, CFG_CTRL_25XX) ? 0x49F : 0x39F), &fdata);
11262 11658 }
11263 11659
11264 11660 if (rval != QL_SUCCESS) {
11265 11661 EL(ha, "24xx read_flash failed=%xh\n", rval);
11266 11662 } else if (fdata != 0) {
11267 11663 xp->fdesc.flash_manuf = LSB(LSW(fdata));
11268 11664 xp->fdesc.flash_id = MSB(LSW(fdata));
11269 11665 xp->fdesc.flash_len = LSB(MSW(fdata));
11270 11666 } else {
11271 11667 xp->fdesc.flash_manuf = ATMEL_FLASH;
11272 11668 xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
11273 11669 xp->fdesc.flash_len = 0;
11274 11670 }
11275 11671
11276 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11672 + QL_PRINT_3(ha, "done\n");
11277 11673
11278 11674 return (rval);
11279 11675 }
11280 11676
11281 11677 /*
11282 11678 * ql_24xx_load_flash
11283 11679 * Loads flash.
11284 11680 *
11285 11681 * Input:
11286 11682 * ha = adapter state pointer.
11287 11683 * dp = data pointer.
11288 11684 * size = data length in bytes.
11289 11685 * faddr = 32bit word flash byte address.
11290 11686 *
11291 11687 * Returns:
11292 11688 * ql local function return status code.
11293 11689 *
11294 11690 * Context:
11295 11691 * Kernel context.
11296 11692 */
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
11297 11693 int
11298 11694 ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
11299 11695 uint32_t faddr)
11300 11696 {
11301 11697 int rval;
11302 11698 uint32_t cnt, rest_addr, fdata, wc;
11303 11699 dma_mem_t dmabuf = {0};
11304 11700 ql_adapter_state_t *ha = vha->pha;
11305 11701 ql_xioctl_t *xp = ha->xioctl;
11306 11702
11307 - QL_PRINT_3(CE_CONT, "(%d): started, faddr=%xh, size=%xh\n",
11703 + QL_PRINT_3(ha, "started, faddr=%xh, size=%xh\n",
11308 11704 ha->instance, faddr, size);
11309 11705
11310 11706 /* start address must be 32 bit word aligned */
11311 11707 if ((faddr & 0x3) != 0) {
11312 11708 EL(ha, "incorrect buffer size alignment\n");
11313 11709 return (QL_FUNCTION_PARAMETER_ERROR);
11314 11710 }
11315 11711
11316 11712 /* Allocate DMA buffer */
11317 - if (CFG_IST(ha, CFG_CTRL_2581)) {
11713 + if (CFG_IST(ha, CFG_FLASH_DMA_SUPPORT)) {
11318 11714 if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
11319 11715 LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN)) !=
11320 11716 QL_SUCCESS) {
11321 11717 EL(ha, "dma alloc failed, rval=%xh\n", rval);
11322 11718 return (rval);
11323 11719 }
11324 11720 }
11325 11721
11326 - GLOBAL_HW_LOCK();
11327 -
11328 11722 /* Enable flash write */
11329 11723 if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
11330 - GLOBAL_HW_UNLOCK();
11331 11724 EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
11332 11725 ql_free_phys(ha, &dmabuf);
11333 11726 return (rval);
11334 11727 }
11335 11728
11336 11729 /* setup mask of address range within a sector */
11337 11730 rest_addr = (xp->fdesc.block_size - 1) >> 2;
11338 11731
11339 11732 faddr = faddr >> 2; /* flash gets 32 bit words */
11340 11733
11341 11734 /*
11342 11735 * Write data to flash.
11343 11736 */
11344 11737 cnt = 0;
11345 11738 size = (size + 3) >> 2; /* Round up & convert to dwords */
11346 11739
11347 11740 while (cnt < size) {
11348 11741 /* Beginning of a sector? */
11349 11742 if ((faddr & rest_addr) == 0) {
11350 - if (CFG_IST(ha, CFG_CTRL_8021)) {
11743 + if (CFG_IST(ha, CFG_CTRL_82XX)) {
11351 11744 fdata = ha->flash_data_addr | faddr;
11352 11745 rval = ql_8021_rom_erase(ha, fdata);
11353 11746 if (rval != QL_SUCCESS) {
11354 11747 EL(ha, "8021 erase sector status="
11355 11748 "%xh, start=%xh, end=%xh"
11356 11749 "\n", rval, fdata,
11357 11750 fdata + rest_addr);
11358 11751 break;
11359 11752 }
11360 - } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11753 + } else if (CFG_IST(ha, CFG_FLASH_ACC_SUPPORT)) {
11361 11754 fdata = ha->flash_data_addr | faddr;
11362 11755 rval = ql_flash_access(ha,
11363 11756 FAC_ERASE_SECTOR, fdata, fdata +
11364 11757 rest_addr, 0);
11365 11758 if (rval != QL_SUCCESS) {
11366 11759 EL(ha, "erase sector status="
11367 11760 "%xh, start=%xh, end=%xh"
11368 11761 "\n", rval, fdata,
11369 11762 fdata + rest_addr);
11370 11763 break;
11371 11764 }
11372 11765 } else {
11373 11766 fdata = (faddr & ~rest_addr) << 2;
11374 11767 fdata = (fdata & 0xff00) |
11375 11768 (fdata << 16 & 0xff0000) |
11376 11769 (fdata >> 16 & 0xff);
11377 11770
11378 11771 if (rest_addr == 0x1fff) {
11379 11772 /* 32kb sector block erase */
11380 11773 rval = ql_24xx_write_flash(ha,
11381 11774 FLASH_CONF_ADDR | 0x0352,
11382 11775 fdata);
11383 11776 } else {
11384 11777 /* 64kb sector block erase */
11385 11778 rval = ql_24xx_write_flash(ha,
11386 11779 FLASH_CONF_ADDR | 0x03d8,
11387 11780 fdata);
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
11388 11781 }
11389 11782 if (rval != QL_SUCCESS) {
11390 11783 EL(ha, "Unable to flash sector"
11391 11784 ": address=%xh\n", faddr);
11392 11785 break;
11393 11786 }
11394 11787 }
11395 11788 }
11396 11789
11397 11790 /* Write data */
11398 - if (CFG_IST(ha, CFG_CTRL_2581) &&
11791 + if (CFG_IST(ha, CFG_FLASH_DMA_SUPPORT) &&
11399 11792 ((faddr & 0x3f) == 0)) {
11400 11793 /*
11401 11794 * Limit write up to sector boundary.
11402 11795 */
11403 11796 wc = ((~faddr & (rest_addr>>1)) + 1);
11404 11797
11405 11798 if (size - cnt < wc) {
11406 11799 wc = size - cnt;
11407 11800 }
11408 11801
11409 11802 ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
11410 11803 (uint8_t *)dmabuf.bp, wc<<2,
11411 11804 DDI_DEV_AUTOINCR);
11412 11805
11413 11806 rval = ql_wrt_risc_ram(ha, ha->flash_data_addr |
11414 11807 faddr, dmabuf.cookie.dmac_laddress, wc);
11415 11808 if (rval != QL_SUCCESS) {
11416 11809 EL(ha, "unable to dma to flash "
11417 11810 "address=%xh\n", faddr << 2);
11418 11811 break;
11419 11812 }
11420 11813
11421 11814 cnt += wc;
11422 11815 faddr += wc;
11423 11816 dp += wc << 2;
11424 11817 } else {
11425 11818 fdata = *dp++;
11426 11819 fdata |= *dp++ << 8;
11427 11820 fdata |= *dp++ << 16;
11428 11821 fdata |= *dp++ << 24;
11429 11822 rval = ql_24xx_write_flash(ha,
11430 11823 ha->flash_data_addr | faddr, fdata);
11431 11824 if (rval != QL_SUCCESS) {
11432 11825 EL(ha, "Unable to program flash "
11433 11826 "address=%xh data=%xh\n", faddr,
11434 11827 *dp);
11435 11828 break;
11436 11829 }
11437 11830 cnt++;
11438 11831 faddr++;
|
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
11439 11832
11440 11833 /* Allow other system activity. */
11441 11834 if (cnt % 0x1000 == 0) {
11442 11835 ql_delay(ha, 10000);
11443 11836 }
11444 11837 }
11445 11838 }
11446 11839
11447 11840 ql_24xx_protect_flash(ha);
11448 11841
11449 - ql_free_phys(ha, &dmabuf);
11842 + if (CFG_IST(ha, CFG_FLASH_DMA_SUPPORT)) {
11843 + ql_free_phys(ha, &dmabuf);
11844 + }
11450 11845
11451 - GLOBAL_HW_UNLOCK();
11452 -
11453 11846 if (rval != QL_SUCCESS) {
11454 11847 EL(ha, "failed=%xh\n", rval);
11455 11848 } else {
11456 11849 /*EMPTY*/
11457 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11850 + QL_PRINT_3(ha, "done\n");
11458 11851 }
11459 11852 return (rval);
11460 11853 }
11461 11854
11462 11855 /*
11463 11856 * ql_24xx_read_flash
11464 11857 * Reads a 32bit word from ISP24xx NVRAM/FLASH.
11465 11858 *
11466 11859 * Input:
11467 11860 * ha: adapter state pointer.
11468 11861 * faddr: NVRAM/FLASH address.
11469 11862 * bp: data pointer.
11470 11863 *
11471 11864 * Returns:
11472 11865 * ql local function return status code.
11473 11866 *
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
11474 11867 * Context:
11475 11868 * Kernel context.
11476 11869 */
11477 11870 int
11478 11871 ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
11479 11872 {
11480 11873 uint32_t timer;
11481 11874 int rval = QL_SUCCESS;
11482 11875 ql_adapter_state_t *ha = vha->pha;
11483 11876
11484 - if (CFG_IST(ha, CFG_CTRL_8021)) {
11877 + if (CFG_IST(ha, CFG_CTRL_82XX)) {
11485 11878 if ((rval = ql_8021_rom_read(ha, faddr, bp)) != QL_SUCCESS) {
11486 11879 EL(ha, "8021 access error\n");
11487 11880 }
11488 11881 return (rval);
11489 11882 }
11490 11883
11491 11884 /* Clear access error flag */
11492 11885 WRT32_IO_REG(ha, ctrl_status,
11493 11886 RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11494 11887
11495 11888 WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
11496 11889
11497 11890 /* Wait for READ cycle to complete. */
11498 11891 for (timer = 300000; timer; timer--) {
11499 11892 if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
11500 11893 break;
11501 11894 }
11502 11895 drv_usecwait(10);
11503 11896 }
11504 11897
11505 11898 if (timer == 0) {
11506 11899 EL(ha, "failed, timeout\n");
11507 11900 rval = QL_FUNCTION_TIMEOUT;
11508 11901 } else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11509 11902 EL(ha, "failed, access error\n");
11510 11903 rval = QL_FUNCTION_FAILED;
11511 11904 }
11512 11905
11513 11906 *bp = RD32_IO_REG(ha, flash_data);
11514 11907
11515 11908 return (rval);
11516 11909 }
11517 11910
11518 11911 /*
11519 11912 * ql_24xx_write_flash
11520 11913 * Writes a 32bit word to ISP24xx NVRAM/FLASH.
11521 11914 *
11522 11915 * Input:
11523 11916 * ha: adapter state pointer.
11524 11917 * addr: NVRAM/FLASH address.
11525 11918 * value: data.
11526 11919 *
11527 11920 * Returns:
11528 11921 * ql local function return status code.
11529 11922 *
|
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
11530 11923 * Context:
11531 11924 * Kernel context.
11532 11925 */
11533 11926 int
11534 11927 ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11535 11928 {
11536 11929 uint32_t timer, fdata;
11537 11930 int rval = QL_SUCCESS;
11538 11931 ql_adapter_state_t *ha = vha->pha;
11539 11932
11540 - if (CFG_IST(ha, CFG_CTRL_8021)) {
11933 + if (CFG_IST(ha, CFG_CTRL_82XX)) {
11541 11934 if ((rval = ql_8021_rom_write(ha, addr, data)) != QL_SUCCESS) {
11542 11935 EL(ha, "8021 access error\n");
11543 11936 }
11544 11937 return (rval);
11545 11938 }
11546 11939 /* Clear access error flag */
11547 11940 WRT32_IO_REG(ha, ctrl_status,
11548 11941 RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11549 11942
11550 11943 WRT32_IO_REG(ha, flash_data, data);
11551 11944 RD32_IO_REG(ha, flash_data); /* PCI Posting. */
11552 11945 WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11553 11946
11554 11947 /* Wait for Write cycle to complete. */
11555 11948 for (timer = 3000000; timer; timer--) {
11556 11949 if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11557 11950 /* Check flash write in progress. */
11558 11951 if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11559 11952 (void) ql_24xx_read_flash(ha,
11560 - FLASH_CONF_ADDR | 0x005, &fdata);
11953 + FLASH_CONF_ADDR | 0x105, &fdata);
11561 11954 if (!(fdata & BIT_0)) {
11562 11955 break;
11563 11956 }
11564 11957 } else {
11565 11958 break;
11566 11959 }
11567 11960 }
11568 11961 drv_usecwait(10);
11569 11962 }
11570 11963 if (timer == 0) {
11571 11964 EL(ha, "failed, timeout\n");
11572 11965 rval = QL_FUNCTION_TIMEOUT;
11573 11966 } else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11574 11967 EL(ha, "access error\n");
11575 11968 rval = QL_FUNCTION_FAILED;
11576 11969 }
11577 11970
11578 11971 return (rval);
11579 11972 }
11580 11973 /*
11581 11974 * ql_24xx_unprotect_flash
11582 11975 * Enable writes
11583 11976 *
11584 11977 * Input:
11585 11978 * ha: adapter state pointer.
11586 11979 *
|
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
11587 11980 * Returns:
11588 11981 * ql local function return status code.
11589 11982 *
11590 11983 * Context:
11591 11984 * Kernel context.
11592 11985 */
11593 11986 int
11594 11987 ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11595 11988 {
11596 11989 int rval;
11597 - uint32_t fdata;
11990 + uint32_t fdata, timer;
11598 11991 ql_adapter_state_t *ha = vha->pha;
11599 11992 ql_xioctl_t *xp = ha->xioctl;
11600 11993
11601 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11994 + QL_PRINT_3(ha, "started\n");
11602 11995
11603 - if (CFG_IST(ha, CFG_CTRL_8021)) {
11996 + if (CFG_IST(ha, CFG_CTRL_82XX)) {
11604 11997 (void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11605 11998 rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11606 11999 if (rval != QL_SUCCESS) {
11607 12000 EL(ha, "8021 access error\n");
11608 12001 }
11609 12002 return (rval);
11610 12003 }
11611 - if (CFG_IST(ha, CFG_CTRL_81XX)) {
12004 + if (CFG_IST(ha, CFG_FLASH_ACC_SUPPORT)) {
11612 12005 if (ha->task_daemon_flags & FIRMWARE_UP) {
11613 - if ((rval = ql_flash_access(ha, FAC_WRT_ENABLE, 0, 0,
11614 - 0)) != QL_SUCCESS) {
11615 - EL(ha, "status=%xh\n", rval);
12006 + for (timer = 3000; timer; timer--) {
12007 + if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
12008 + EL(ha, "ISP_ABORT_NEEDED done\n");
12009 + return (QL_ABORTED);
12010 + }
12011 + rval = ql_flash_access(ha, FAC_SEMA_LOCK,
12012 + 0, 0, NULL);
12013 + if (rval == QL_SUCCESS ||
12014 + rval == QL_FUNCTION_TIMEOUT) {
12015 + EL(ha, "lock status=%xh\n", rval);
12016 + break;
12017 + }
12018 + delay(1);
11616 12019 }
11617 - QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11618 - ha->instance);
11619 - return (rval);
12020 +
12021 + if (rval == QL_SUCCESS &&
12022 + (rval = ql_flash_access(ha, FAC_WRT_ENABLE, 0,
12023 + 0, NULL)) != QL_SUCCESS) {
12024 + EL(ha, "WRT_ENABLE status=%xh\n", rval);
12025 + (void) ql_flash_access(ha, FAC_SEMA_UNLOCK,
12026 + 0, 0, NULL);
12027 + }
12028 + } else {
12029 + rval = QL_SUCCESS;
11620 12030 }
12031 + QL_PRINT_3(ha, "CFG_FLASH_ACC_SUPPORT done\n");
12032 + return (rval);
11621 12033 } else {
11622 12034 /* Enable flash write. */
11623 12035 WRT32_IO_REG(ha, ctrl_status,
11624 12036 RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11625 12037 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
11626 12038 }
11627 12039
12040 + /* Sector/Block Protection Register Lock (SST, ST, ATMEL). */
12041 + (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
12042 + xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
12043 +
11628 12044 /*
11629 - * Remove block write protection (SST and ST) and
11630 - * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11631 - * Unprotect sectors.
12045 + * Remove block write protection (SST and ST)
12046 + * Global unprotect sectors (ATMEL).
11632 12047 */
11633 12048 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
11634 12049 xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
11635 12050
11636 12051 if (xp->fdesc.unprotect_sector_cmd != 0) {
11637 12052 for (fdata = 0; fdata < 0x10; fdata++) {
11638 12053 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11639 12054 0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
11640 12055 }
11641 12056
11642 12057 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11643 12058 xp->fdesc.unprotect_sector_cmd, 0x00400f);
11644 12059 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11645 12060 xp->fdesc.unprotect_sector_cmd, 0x00600f);
11646 12061 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11647 12062 xp->fdesc.unprotect_sector_cmd, 0x00800f);
11648 12063 }
11649 12064
11650 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
12065 + QL_PRINT_3(ha, "done\n");
11651 12066
11652 12067 return (QL_SUCCESS);
11653 12068 }
11654 12069
11655 12070 /*
11656 12071 * ql_24xx_protect_flash
11657 12072 * Disable writes
11658 12073 *
11659 12074 * Input:
11660 12075 * ha: adapter state pointer.
11661 12076 *
11662 12077 * Context:
11663 12078 * Kernel context.
11664 12079 */
11665 12080 void
11666 12081 ql_24xx_protect_flash(ql_adapter_state_t *vha)
11667 12082 {
11668 12083 int rval;
11669 - uint32_t fdata;
12084 + uint32_t fdata, timer;
11670 12085 ql_adapter_state_t *ha = vha->pha;
11671 12086 ql_xioctl_t *xp = ha->xioctl;
11672 12087
11673 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12088 + QL_PRINT_3(ha, "started\n");
11674 12089
11675 - if (CFG_IST(ha, CFG_CTRL_8021)) {
12090 + if (CFG_IST(ha, CFG_CTRL_82XX)) {
11676 12091 (void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11677 12092 rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_disable_bits);
11678 12093 if (rval != QL_SUCCESS) {
11679 12094 EL(ha, "8021 access error\n");
11680 12095 }
11681 12096 return;
11682 12097 }
11683 - if (CFG_IST(ha, CFG_CTRL_81XX)) {
12098 + if (CFG_IST(ha, CFG_FLASH_ACC_SUPPORT)) {
11684 12099 if (ha->task_daemon_flags & FIRMWARE_UP) {
11685 - if ((rval = ql_flash_access(ha, FAC_WRT_PROTECT, 0, 0,
11686 - 0)) != QL_SUCCESS) {
11687 - EL(ha, "status=%xh\n", rval);
12100 + for (timer = 3000; timer; timer--) {
12101 + if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
12102 + EL(ha, "ISP_ABORT_NEEDED done\n");
12103 + return;
12104 + }
12105 + rval = ql_flash_access(ha, FAC_SEMA_LOCK,
12106 + 0, 0, NULL);
12107 + if (rval == QL_SUCCESS ||
12108 + rval == QL_FUNCTION_TIMEOUT) {
12109 + if (rval != QL_SUCCESS) {
12110 + EL(ha, "lock status=%xh\n",
12111 + rval);
12112 + }
12113 + break;
12114 + }
12115 + delay(1);
11688 12116 }
11689 - QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11690 - ha->instance);
12117 +
12118 + if (rval == QL_SUCCESS &&
12119 + (rval = ql_flash_access(ha, FAC_WRT_PROTECT, 0,
12120 + 0, NULL)) != QL_SUCCESS) {
12121 + EL(ha, "protect status=%xh\n", rval);
12122 + (void) ql_flash_access(ha, FAC_SEMA_UNLOCK, 0,
12123 + 0, NULL);
12124 + }
12125 + QL_PRINT_3(ha, "CFG_FLASH_ACC_SUPPORT done\n");
11691 12126 return;
11692 12127 }
11693 12128 } else {
11694 12129 /* Enable flash write. */
11695 12130 WRT32_IO_REG(ha, ctrl_status,
11696 12131 RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11697 12132 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
11698 12133 }
11699 12134
11700 12135 /*
11701 12136 * Protect sectors.
11702 12137 * Set block write protection (SST and ST) and
11703 12138 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11704 12139 */
11705 12140 if (xp->fdesc.protect_sector_cmd != 0) {
11706 12141 for (fdata = 0; fdata < 0x10; fdata++) {
11707 12142 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11708 - 0x330 | xp->fdesc.protect_sector_cmd, fdata);
12143 + 0x300 | xp->fdesc.protect_sector_cmd, fdata);
11709 12144 }
11710 - (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
12145 + (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11711 12146 xp->fdesc.protect_sector_cmd, 0x00400f);
11712 - (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
12147 + (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11713 12148 xp->fdesc.protect_sector_cmd, 0x00600f);
11714 - (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
12149 + (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11715 12150 xp->fdesc.protect_sector_cmd, 0x00800f);
11716 -
11717 - /* TODO: ??? */
11718 - (void) ql_24xx_write_flash(ha,
11719 - FLASH_CONF_ADDR | 0x101, 0x80);
11720 - } else {
11721 - (void) ql_24xx_write_flash(ha,
11722 - FLASH_CONF_ADDR | 0x101, 0x9c);
11723 12151 }
11724 12152
12153 + /* Remove Sector Protection Registers Locked (SPRL) bit. */
12154 + (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
12155 + xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
12156 +
12157 + (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
12158 + xp->fdesc.write_statusreg_cmd, xp->fdesc.write_disable_bits);
12159 +
11725 12160 /* Disable flash write. */
11726 - if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
12161 + if (!CFG_IST(ha, CFG_FLASH_ACC_SUPPORT)) {
11727 12162 WRT32_IO_REG(ha, ctrl_status,
11728 12163 RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
11729 12164 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
11730 12165 }
11731 12166
11732 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
12167 + QL_PRINT_3(ha, "done\n");
11733 12168 }
11734 12169
11735 12170 /*
11736 12171 * ql_dump_firmware
11737 12172 * Save RISC code state information.
11738 12173 *
11739 12174 * Input:
11740 12175 * ha = adapter state pointer.
11741 12176 *
11742 12177 * Returns:
11743 12178 * QL local function return status code.
11744 12179 *
11745 12180 * Context:
11746 12181 * Kernel context.
11747 12182 */
11748 -static int
12183 +int
11749 12184 ql_dump_firmware(ql_adapter_state_t *vha)
11750 12185 {
11751 12186 int rval;
11752 12187 clock_t timer = drv_usectohz(30000000);
11753 12188 ql_adapter_state_t *ha = vha->pha;
11754 12189
11755 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12190 + QL_PRINT_3(ha, "started\n");
11756 12191
11757 12192 QL_DUMP_LOCK(ha);
11758 12193
11759 12194 if (ha->ql_dump_state & QL_DUMPING ||
11760 12195 (ha->ql_dump_state & QL_DUMP_VALID &&
11761 12196 !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11762 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
12197 + QL_PRINT_3(ha, "done\n");
11763 12198 QL_DUMP_UNLOCK(ha);
11764 12199 return (QL_SUCCESS);
11765 12200 }
11766 12201
11767 12202 QL_DUMP_UNLOCK(ha);
11768 12203
11769 - ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
12204 + (void) ql_stall_driver(ha, 0);
11770 12205
11771 - /*
11772 - * Wait for all outstanding commands to complete
11773 - */
11774 - (void) ql_wait_outstanding(ha);
11775 -
11776 12206 /* Dump firmware. */
11777 - rval = ql_binary_fw_dump(ha, TRUE);
12207 + if (CFG_IST(ha, CFG_CTRL_82XX)) {
12208 + rval = ql_binary_fw_dump(ha, FALSE);
12209 + } else {
12210 + rval = ql_binary_fw_dump(ha, TRUE);
12211 + }
11778 12212
11779 12213 /* Do abort to force restart. */
11780 - ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, DRIVER_STALL);
12214 + ql_restart_driver(ha);
12215 + ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
11781 12216 EL(ha, "restarting, isp_abort_needed\n");
11782 12217
11783 12218 /* Acquire task daemon lock. */
11784 12219 TASK_DAEMON_LOCK(ha);
11785 12220
11786 12221 /* Wait for suspension to end. */
11787 - while (ha->task_daemon_flags & QL_SUSPENDED) {
12222 + while (DRIVER_SUSPENDED(ha)) {
11788 12223 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
11789 12224
11790 12225 /* 30 seconds from now */
11791 12226 if (cv_reltimedwait(&ha->cv_dr_suspended,
11792 12227 &ha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
11793 12228 /*
11794 12229 * The timeout time 'timer' was
11795 12230 * reached without the condition
11796 12231 * being signaled.
11797 12232 */
11798 12233 break;
11799 12234 }
11800 12235 }
11801 12236
11802 12237 /* Release task daemon lock. */
11803 12238 TASK_DAEMON_UNLOCK(ha);
11804 12239
11805 12240 if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
11806 12241 /*EMPTY*/
11807 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
12242 + QL_PRINT_3(ha, "done\n");
11808 12243 } else {
11809 12244 EL(ha, "failed, rval = %xh\n", rval);
11810 12245 }
11811 12246 return (rval);
11812 12247 }
11813 12248
11814 12249 /*
11815 12250 * ql_binary_fw_dump
11816 12251 * Dumps binary data from firmware.
11817 12252 *
11818 12253 * Input:
11819 12254 * ha = adapter state pointer.
11820 12255 * lock_needed = mailbox lock needed.
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
11821 12256 *
11822 12257 * Returns:
11823 12258 * ql local function return status code.
11824 12259 *
11825 12260 * Context:
11826 12261 * Interrupt or Kernel context, no mailbox commands allowed.
11827 12262 */
11828 12263 int
11829 12264 ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
11830 12265 {
12266 + uint32_t cnt, index;
11831 12267 clock_t timer;
11832 - mbx_cmd_t mc;
11833 - mbx_cmd_t *mcp = &mc;
11834 12268 int rval = QL_SUCCESS;
11835 12269 ql_adapter_state_t *ha = vha->pha;
11836 12270
11837 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12271 + QL_PRINT_3(ha, "started\n");
11838 12272
11839 - if (CFG_IST(ha, CFG_CTRL_8021)) {
12273 + ADAPTER_STATE_LOCK(ha);
12274 + ha->flags &= ~FW_DUMP_NEEDED;
12275 + ADAPTER_STATE_UNLOCK(ha);
12276 +
12277 + if (CFG_IST(ha, CFG_CTRL_82XX) && ha->md_capture_size == 0) {
11840 12278 EL(ha, "8021 not supported\n");
11841 12279 return (QL_NOT_SUPPORTED);
11842 12280 }
11843 12281
11844 12282 QL_DUMP_LOCK(ha);
11845 12283
11846 12284 if (ha->ql_dump_state & QL_DUMPING ||
11847 12285 (ha->ql_dump_state & QL_DUMP_VALID &&
11848 12286 !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11849 12287 EL(ha, "dump already done, qds=%x\n", ha->ql_dump_state);
11850 12288 QL_DUMP_UNLOCK(ha);
11851 12289 return (QL_DATA_EXISTS);
11852 12290 }
11853 12291
11854 12292 ha->ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
11855 12293 ha->ql_dump_state |= QL_DUMPING;
11856 12294
11857 12295 QL_DUMP_UNLOCK(ha);
11858 12296
11859 12297 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE)) {
11860 -
11861 12298 /* Insert Time Stamp */
11862 12299 rval = ql_fw_etrace(ha, &ha->fwexttracebuf,
11863 - FTO_INSERT_TIME_STAMP);
12300 + FTO_INSERT_TIME_STAMP, NULL);
11864 12301 if (rval != QL_SUCCESS) {
11865 12302 EL(ha, "f/w extended trace insert"
11866 12303 "time stamp failed: %xh\n", rval);
11867 12304 }
11868 12305 }
11869 12306
11870 12307 if (lock_needed == TRUE) {
11871 12308 /* Acquire mailbox register lock. */
11872 12309 MBX_REGISTER_LOCK(ha);
11873 - timer = (ha->mcp->timeout + 2) * drv_usectohz(1000000);
12310 + timer = ((MAILBOX_TOV + 6) * drv_usectohz(1000000));
11874 12311
11875 12312 /* Check for mailbox available, if not wait for signal. */
11876 12313 while (ha->mailbox_flags & MBX_BUSY_FLG) {
11877 12314 ha->mailbox_flags = (uint8_t)
11878 12315 (ha->mailbox_flags | MBX_WANT_FLG);
11879 12316
11880 12317 /* 30 seconds from now */
11881 12318 if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
11882 12319 timer, TR_CLOCK_TICK) == -1) {
11883 12320 /*
11884 12321 * The timeout time 'timer' was
11885 12322 * reached without the condition
11886 12323 * being signaled.
11887 12324 */
11888 12325
11889 12326 /* Release mailbox register lock. */
11890 12327 MBX_REGISTER_UNLOCK(ha);
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
11891 12328
11892 12329 EL(ha, "failed, rval = %xh\n",
11893 12330 QL_FUNCTION_TIMEOUT);
11894 12331 return (QL_FUNCTION_TIMEOUT);
11895 12332 }
11896 12333 }
11897 12334
11898 12335 /* Set busy flag. */
11899 12336 ha->mailbox_flags = (uint8_t)
11900 12337 (ha->mailbox_flags | MBX_BUSY_FLG);
11901 - mcp->timeout = 120;
11902 - ha->mcp = mcp;
11903 12338
11904 12339 /* Release mailbox register lock. */
11905 12340 MBX_REGISTER_UNLOCK(ha);
11906 12341 }
11907 12342
11908 12343 /* Free previous dump buffer. */
11909 12344 if (ha->ql_dump_ptr != NULL) {
11910 12345 kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11911 12346 ha->ql_dump_ptr = NULL;
11912 12347 }
11913 12348
11914 - if (CFG_IST(ha, CFG_CTRL_2422)) {
12349 + if (CFG_IST(ha, CFG_CTRL_24XX)) {
11915 12350 ha->ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
11916 12351 ha->fw_ext_memory_size);
11917 12352 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
12353 + cnt = ha->rsp_queues_cnt > 1 ? ha->req_q[0]->req_ring.size +
12354 + ha->req_q[1]->req_ring.size : ha->req_q[0]->req_ring.size;
12355 + index = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
12356 +
11918 12357 ha->ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
11919 - ha->fw_ext_memory_size);
12358 + cnt + index + ha->fw_ext_memory_size +
12359 + (ha->rsp_queues_cnt * 16));
12360 +
11920 12361 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
12362 + cnt = ha->rsp_queues_cnt > 1 ? ha->req_q[0]->req_ring.size +
12363 + ha->req_q[1]->req_ring.size : ha->req_q[0]->req_ring.size;
12364 + index = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
12365 +
11921 12366 ha->ql_dump_size = (uint32_t)(sizeof (ql_81xx_fw_dump_t) +
11922 - ha->fw_ext_memory_size);
12367 + cnt + index + ha->fw_ext_memory_size +
12368 + (ha->rsp_queues_cnt * 16));
12369 +
12370 + } else if (CFG_IST(ha, CFG_CTRL_83XX)) {
12371 + cnt = ha->rsp_queues_cnt > 1 ? ha->req_q[0]->req_ring.size +
12372 + ha->req_q[1]->req_ring.size : ha->req_q[0]->req_ring.size;
12373 + index = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
12374 +
12375 + ha->ql_dump_size = (uint32_t)(sizeof (ql_83xx_fw_dump_t) +
12376 + cnt + index + ha->fw_ext_memory_size +
12377 + (ha->rsp_queues_cnt * 16));
12378 + } else if (CFG_IST(ha, CFG_CTRL_82XX)) {
12379 + ha->ql_dump_size = ha->md_capture_size;
11923 12380 } else {
11924 12381 ha->ql_dump_size = sizeof (ql_fw_dump_t);
11925 12382 }
11926 12383
11927 - if ((ha->ql_dump_ptr = kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) ==
11928 - NULL) {
11929 - rval = QL_MEMORY_ALLOC_FAILED;
12384 + if (CFG_IST(ha, CFG_CTRL_27XX)) {
12385 + rval = ql_27xx_binary_fw_dump(ha);
11930 12386 } else {
11931 - if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11932 - rval = ql_2300_binary_fw_dump(ha, ha->ql_dump_ptr);
11933 - } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11934 - rval = ql_81xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11935 - } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11936 - rval = ql_25xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11937 - } else if (CFG_IST(ha, CFG_CTRL_2422)) {
11938 - rval = ql_24xx_binary_fw_dump(ha, ha->ql_dump_ptr);
12387 + if ((ha->ql_dump_ptr =
12388 + kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) == NULL) {
12389 + rval = QL_MEMORY_ALLOC_FAILED;
11939 12390 } else {
11940 - rval = ql_2200_binary_fw_dump(ha, ha->ql_dump_ptr);
12391 + if (CFG_IST(ha, CFG_CTRL_2363)) {
12392 + rval = ql_2300_binary_fw_dump(ha,
12393 + ha->ql_dump_ptr);
12394 + } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
12395 + rval = ql_81xx_binary_fw_dump(ha,
12396 + ha->ql_dump_ptr);
12397 + } else if (CFG_IST(ha, CFG_CTRL_83XX)) {
12398 + rval = ql_83xx_binary_fw_dump(ha,
12399 + ha->ql_dump_ptr);
12400 + } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
12401 + rval = ql_25xx_binary_fw_dump(ha,
12402 + ha->ql_dump_ptr);
12403 + } else if (CFG_IST(ha, CFG_CTRL_24XX)) {
12404 + rval = ql_24xx_binary_fw_dump(ha,
12405 + ha->ql_dump_ptr);
12406 + } else if (CFG_IST(ha, CFG_CTRL_82XX)) {
12407 + (void) ql_8021_reset_fw(ha);
12408 + rval = QL_SUCCESS;
12409 + } else {
12410 + rval = ql_2200_binary_fw_dump(ha,
12411 + ha->ql_dump_ptr);
12412 + }
11941 12413 }
11942 12414 }
11943 12415
11944 12416 /* Reset ISP chip. */
11945 12417 ql_reset_chip(ha);
11946 12418
11947 12419 QL_DUMP_LOCK(ha);
11948 12420
11949 12421 if (rval != QL_SUCCESS) {
11950 12422 if (ha->ql_dump_ptr != NULL) {
11951 12423 kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11952 12424 ha->ql_dump_ptr = NULL;
11953 12425 }
11954 12426 ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
11955 12427 QL_DUMP_UPLOADED);
11956 12428 EL(ha, "failed, rval = %xh\n", rval);
11957 12429 } else {
11958 12430 ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
11959 12431 ha->ql_dump_state |= QL_DUMP_VALID;
11960 12432 EL(ha, "done\n");
11961 12433 }
11962 12434
11963 12435 QL_DUMP_UNLOCK(ha);
11964 12436
11965 12437 return (rval);
11966 12438 }
11967 12439
11968 12440 /*
11969 12441 * ql_ascii_fw_dump
11970 12442 * Converts firmware binary dump to ascii.
11971 12443 *
11972 12444 * Input:
11973 12445 * ha = adapter state pointer.
11974 12446 * bptr = buffer pointer.
11975 12447 *
11976 12448 * Returns:
11977 12449 * Amount of data buffer used.
11978 12450 *
11979 12451 * Context:
11980 12452 * Kernel context.
|
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
11981 12453 */
11982 12454 size_t
11983 12455 ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
11984 12456 {
11985 12457 uint32_t cnt;
11986 12458 caddr_t bp;
11987 12459 int mbox_cnt;
11988 12460 ql_adapter_state_t *ha = vha->pha;
11989 12461 ql_fw_dump_t *fw = ha->ql_dump_ptr;
11990 12462
11991 - if (CFG_IST(ha, CFG_CTRL_2422)) {
12463 + if (CFG_IST(ha, CFG_CTRL_24XX)) {
11992 12464 return (ql_24xx_ascii_fw_dump(ha, bufp));
11993 - } else if (CFG_IST(ha, CFG_CTRL_2581)) {
11994 - return (ql_2581_ascii_fw_dump(ha, bufp));
12465 + } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
12466 + return (ql_25xx_ascii_fw_dump(ha, bufp));
12467 + } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
12468 + return (ql_81xx_ascii_fw_dump(ha, bufp));
12469 + } else if (CFG_IST(ha, CFG_CTRL_82XX)) {
12470 + return (ql_8021_ascii_fw_dump(ha, bufp));
12471 + } else if (CFG_IST(ha, CFG_CTRL_83XX)) {
12472 + return (ql_83xx_ascii_fw_dump(ha, bufp));
12473 + } else if (CFG_IST(ha, CFG_CTRL_27XX)) {
12474 + return (ql_27xx_ascii_fw_dump(ha, bufp));
11995 12475 }
11996 12476
11997 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12477 + QL_PRINT_3(ha, "started\n");
11998 12478
11999 - if (CFG_IST(ha, CFG_CTRL_2300)) {
12479 + if (CFG_IST(ha, CFG_CTRL_23XX)) {
12000 12480 (void) sprintf(bufp, "\nISP 2300IP ");
12001 - } else if (CFG_IST(ha, CFG_CTRL_6322)) {
12002 - (void) sprintf(bufp, "\nISP 6322FLX ");
12481 + } else if (CFG_IST(ha, CFG_CTRL_63XX)) {
12482 + (void) sprintf(bufp, "\nISP 2322/6322FLX ");
12003 12483 } else {
12004 12484 (void) sprintf(bufp, "\nISP 2200IP ");
12005 12485 }
12006 12486
12007 12487 bp = bufp + strlen(bufp);
12008 12488 (void) sprintf(bp, "Firmware Version %d.%d.%d\n",
12009 12489 ha->fw_major_version, ha->fw_minor_version,
12010 12490 ha->fw_subminor_version);
12011 12491
12012 12492 (void) strcat(bufp, "\nPBIU Registers:");
12013 12493 bp = bufp + strlen(bufp);
12014 12494 for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
12015 12495 if (cnt % 8 == 0) {
12016 12496 *bp++ = '\n';
12017 12497 }
12018 12498 (void) sprintf(bp, "%04x ", fw->pbiu_reg[cnt]);
12019 12499 bp = bp + 6;
12020 12500 }
12021 12501
12022 - if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12502 + if (CFG_IST(ha, CFG_CTRL_2363)) {
12023 12503 (void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
12024 12504 "registers:");
12025 12505 bp = bufp + strlen(bufp);
12026 12506 for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
12027 12507 if (cnt % 8 == 0) {
12028 12508 *bp++ = '\n';
12029 12509 }
12030 12510 (void) sprintf(bp, "%04x ", fw->risc_host_reg[cnt]);
12031 12511 bp = bp + 6;
12032 12512 }
12033 12513 }
12034 12514
12035 12515 (void) strcat(bp, "\n\nMailbox Registers:");
12036 12516 bp = bufp + strlen(bufp);
12037 - mbox_cnt = (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) ? 16 : 8;
12517 + mbox_cnt = CFG_IST(ha, CFG_CTRL_2363) ? 16 : 8;
12038 12518 for (cnt = 0; cnt < mbox_cnt; cnt++) {
12039 12519 if (cnt % 8 == 0) {
12040 12520 *bp++ = '\n';
12041 12521 }
12042 12522 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12043 12523 bp = bp + 6;
12044 12524 }
12045 12525
12046 - if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12526 + if (CFG_IST(ha, CFG_CTRL_2363)) {
12047 12527 (void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
12048 12528 bp = bufp + strlen(bufp);
12049 12529 for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
12050 12530 if (cnt % 8 == 0) {
12051 12531 *bp++ = '\n';
12052 12532 }
12053 12533 (void) sprintf(bp, "%04x ", fw->resp_dma_reg[cnt]);
12054 12534 bp = bp + 6;
12055 12535 }
12056 12536 }
12057 12537
12058 12538 (void) strcat(bp, "\n\nDMA Registers:");
12059 12539 bp = bufp + strlen(bufp);
12060 12540 for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
12061 12541 if (cnt % 8 == 0) {
12062 12542 *bp++ = '\n';
12063 12543 }
12064 12544 (void) sprintf(bp, "%04x ", fw->dma_reg[cnt]);
12065 12545 bp = bp + 6;
12066 12546 }
12067 12547
12068 12548 (void) strcat(bp, "\n\nRISC Hardware Registers:");
12069 12549 bp = bufp + strlen(bufp);
12070 12550 for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
12071 12551 if (cnt % 8 == 0) {
12072 12552 *bp++ = '\n';
12073 12553 }
12074 12554 (void) sprintf(bp, "%04x ", fw->risc_hdw_reg[cnt]);
12075 12555 bp = bp + 6;
12076 12556 }
12077 12557
12078 12558 (void) strcat(bp, "\n\nRISC GP0 Registers:");
12079 12559 bp = bufp + strlen(bufp);
12080 12560 for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
12081 12561 if (cnt % 8 == 0) {
12082 12562 *bp++ = '\n';
12083 12563 }
12084 12564 (void) sprintf(bp, "%04x ", fw->risc_gp0_reg[cnt]);
12085 12565 bp = bp + 6;
12086 12566 }
12087 12567
12088 12568 (void) strcat(bp, "\n\nRISC GP1 Registers:");
12089 12569 bp = bufp + strlen(bufp);
12090 12570 for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
12091 12571 if (cnt % 8 == 0) {
12092 12572 *bp++ = '\n';
12093 12573 }
12094 12574 (void) sprintf(bp, "%04x ", fw->risc_gp1_reg[cnt]);
12095 12575 bp = bp + 6;
12096 12576 }
12097 12577
12098 12578 (void) strcat(bp, "\n\nRISC GP2 Registers:");
12099 12579 bp = bufp + strlen(bufp);
12100 12580 for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
12101 12581 if (cnt % 8 == 0) {
12102 12582 *bp++ = '\n';
12103 12583 }
12104 12584 (void) sprintf(bp, "%04x ", fw->risc_gp2_reg[cnt]);
12105 12585 bp = bp + 6;
12106 12586 }
12107 12587
12108 12588 (void) strcat(bp, "\n\nRISC GP3 Registers:");
12109 12589 bp = bufp + strlen(bufp);
12110 12590 for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
12111 12591 if (cnt % 8 == 0) {
12112 12592 *bp++ = '\n';
12113 12593 }
12114 12594 (void) sprintf(bp, "%04x ", fw->risc_gp3_reg[cnt]);
12115 12595 bp = bp + 6;
12116 12596 }
12117 12597
12118 12598 (void) strcat(bp, "\n\nRISC GP4 Registers:");
12119 12599 bp = bufp + strlen(bufp);
12120 12600 for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
12121 12601 if (cnt % 8 == 0) {
12122 12602 *bp++ = '\n';
12123 12603 }
12124 12604 (void) sprintf(bp, "%04x ", fw->risc_gp4_reg[cnt]);
12125 12605 bp = bp + 6;
12126 12606 }
12127 12607
12128 12608 (void) strcat(bp, "\n\nRISC GP5 Registers:");
12129 12609 bp = bufp + strlen(bufp);
12130 12610 for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
12131 12611 if (cnt % 8 == 0) {
12132 12612 *bp++ = '\n';
12133 12613 }
12134 12614 (void) sprintf(bp, "%04x ", fw->risc_gp5_reg[cnt]);
12135 12615 bp = bp + 6;
12136 12616 }
12137 12617
12138 12618 (void) strcat(bp, "\n\nRISC GP6 Registers:");
12139 12619 bp = bufp + strlen(bufp);
12140 12620 for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
12141 12621 if (cnt % 8 == 0) {
12142 12622 *bp++ = '\n';
12143 12623 }
12144 12624 (void) sprintf(bp, "%04x ", fw->risc_gp6_reg[cnt]);
12145 12625 bp = bp + 6;
12146 12626 }
12147 12627
12148 12628 (void) strcat(bp, "\n\nRISC GP7 Registers:");
12149 12629 bp = bufp + strlen(bufp);
12150 12630 for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
|
↓ open down ↓ |
94 lines elided |
↑ open up ↑ |
12151 12631 if (cnt % 8 == 0) {
12152 12632 *bp++ = '\n';
12153 12633 }
12154 12634 (void) sprintf(bp, "%04x ", fw->risc_gp7_reg[cnt]);
12155 12635 bp = bp + 6;
12156 12636 }
12157 12637
12158 12638 (void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
12159 12639 bp = bufp + strlen(bufp);
12160 12640 for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
12161 - if ((cnt == 16) && ((CFG_IST(ha, (CFG_CTRL_2300 |
12162 - CFG_CTRL_6322)) == 0))) {
12641 + if (cnt == 16 && !CFG_IST(ha, CFG_CTRL_2363)) {
12163 12642 break;
12164 12643 }
12165 12644 if (cnt % 8 == 0) {
12166 12645 *bp++ = '\n';
12167 12646 }
12168 12647 (void) sprintf(bp, "%04x ", fw->frame_buf_hdw_reg[cnt]);
12169 12648 bp = bp + 6;
12170 12649 }
12171 12650
12172 12651 (void) strcat(bp, "\n\nFPM B0 Registers:");
12173 12652 bp = bufp + strlen(bufp);
12174 12653 for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
12175 12654 if (cnt % 8 == 0) {
12176 12655 *bp++ = '\n';
12177 12656 }
12178 12657 (void) sprintf(bp, "%04x ", fw->fpm_b0_reg[cnt]);
12179 12658 bp = bp + 6;
12180 12659 }
12181 12660
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
12182 12661 (void) strcat(bp, "\n\nFPM B1 Registers:");
12183 12662 bp = bufp + strlen(bufp);
12184 12663 for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
12185 12664 if (cnt % 8 == 0) {
12186 12665 *bp++ = '\n';
12187 12666 }
12188 12667 (void) sprintf(bp, "%04x ", fw->fpm_b1_reg[cnt]);
12189 12668 bp = bp + 6;
12190 12669 }
12191 12670
12192 - if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12671 + if (CFG_IST(ha, CFG_CTRL_2363)) {
12193 12672 (void) strcat(bp, "\n\nCode RAM Dump:");
12194 12673 bp = bufp + strlen(bufp);
12195 12674 for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
12196 12675 if (cnt % 8 == 0) {
12197 12676 (void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
12198 12677 bp = bp + 8;
12199 12678 }
12200 12679 (void) sprintf(bp, "%04x ", fw->risc_ram[cnt]);
12201 12680 bp = bp + 6;
12202 12681 }
12203 12682
12204 12683 (void) strcat(bp, "\n\nStack RAM Dump:");
12205 12684 bp = bufp + strlen(bufp);
12206 12685 for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
12207 12686 if (cnt % 8 == 0) {
12208 12687 (void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
12209 12688 bp = bp + 8;
12210 12689 }
12211 12690 (void) sprintf(bp, "%04x ", fw->stack_ram[cnt]);
12212 12691 bp = bp + 6;
12213 12692 }
12214 12693
12215 12694 (void) strcat(bp, "\n\nData RAM Dump:");
12216 12695 bp = bufp + strlen(bufp);
12217 12696 for (cnt = 0; cnt < sizeof (fw->data_ram) / 2; cnt++) {
12218 12697 if (cnt % 8 == 0) {
12219 12698 (void) sprintf(bp, "\n%05x: ", cnt + 0x010800);
12220 12699 bp = bp + 8;
12221 12700 }
12222 12701 (void) sprintf(bp, "%04x ", fw->data_ram[cnt]);
12223 12702 bp = bp + 6;
12224 12703 }
12225 12704 } else {
12226 12705 (void) strcat(bp, "\n\nRISC SRAM:");
12227 12706 bp = bufp + strlen(bufp);
12228 12707 for (cnt = 0; cnt < 0xf000; cnt++) {
12229 12708 if (cnt % 8 == 0) {
12230 12709 (void) sprintf(bp, "\n%04x: ", cnt + 0x1000);
12231 12710 bp = bp + 7;
12232 12711 }
12233 12712 (void) sprintf(bp, "%04x ", fw->risc_ram[cnt]);
12234 12713 bp = bp + 6;
12235 12714 }
12236 12715 }
12237 12716
12238 12717 (void) strcat(bp, "\n\n[<==END] ISP Debug Dump.");
12239 12718 bp += strlen(bp);
12240 12719
12241 12720 (void) sprintf(bp, "\n\nRequest Queue");
12242 12721 bp += strlen(bp);
12243 12722 for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12244 12723 if (cnt % 8 == 0) {
12245 12724 (void) sprintf(bp, "\n%08x: ", cnt);
12246 12725 bp += strlen(bp);
12247 12726 }
12248 12727 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12249 12728 bp += strlen(bp);
12250 12729 }
12251 12730
12252 12731 (void) sprintf(bp, "\n\nResponse Queue");
12253 12732 bp += strlen(bp);
12254 12733 for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
|
↓ open down ↓ |
52 lines elided |
↑ open up ↑ |
12255 12734 if (cnt % 8 == 0) {
12256 12735 (void) sprintf(bp, "\n%08x: ", cnt);
12257 12736 bp += strlen(bp);
12258 12737 }
12259 12738 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12260 12739 bp += strlen(bp);
12261 12740 }
12262 12741
12263 12742 (void) sprintf(bp, "\n");
12264 12743
12265 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
12744 + QL_PRINT_10(ha, "done, size=0x%x\n", strlen(bufp));
12266 12745
12267 12746 return (strlen(bufp));
12268 12747 }
12269 12748
12270 12749 /*
12271 12750 * ql_24xx_ascii_fw_dump
12272 12751 * Converts ISP24xx firmware binary dump to ascii.
12273 12752 *
12274 12753 * Input:
12275 12754 * ha = adapter state pointer.
12276 12755 * bptr = buffer pointer.
12277 12756 *
12278 12757 * Returns:
12279 12758 * Amount of data buffer used.
12280 12759 *
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
12281 12760 * Context:
12282 12761 * Kernel context.
12283 12762 */
12284 12763 static size_t
12285 12764 ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12286 12765 {
12287 12766 uint32_t cnt;
12288 12767 caddr_t bp = bufp;
12289 12768 ql_24xx_fw_dump_t *fw = ha->ql_dump_ptr;
12290 12769
12291 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12770 + QL_PRINT_3(ha, "started\n");
12292 12771
12293 12772 (void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
12294 12773 ha->fw_major_version, ha->fw_minor_version,
12295 12774 ha->fw_subminor_version, ha->fw_attributes);
12296 12775 bp += strlen(bp);
12297 12776
12298 12777 (void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
12299 12778
12300 12779 (void) strcat(bp, "\nHost Interface Registers");
12301 12780 bp += strlen(bp);
12302 12781 for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12303 12782 if (cnt % 8 == 0) {
12304 12783 (void) sprintf(bp++, "\n");
12305 12784 }
12306 12785
12307 12786 (void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12308 12787 bp += 9;
12309 12788 }
12310 12789
12311 12790 (void) sprintf(bp, "\n\nMailbox Registers");
12312 12791 bp += strlen(bp);
12313 12792 for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12314 12793 if (cnt % 16 == 0) {
12315 12794 (void) sprintf(bp++, "\n");
12316 12795 }
12317 12796
12318 12797 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12319 12798 bp += 5;
12320 12799 }
12321 12800
12322 12801 (void) sprintf(bp, "\n\nXSEQ GP Registers");
12323 12802 bp += strlen(bp);
12324 12803 for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12325 12804 if (cnt % 8 == 0) {
12326 12805 (void) sprintf(bp++, "\n");
12327 12806 }
12328 12807
12329 12808 (void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12330 12809 bp += 9;
12331 12810 }
12332 12811
12333 12812 (void) sprintf(bp, "\n\nXSEQ-0 Registers");
12334 12813 bp += strlen(bp);
12335 12814 for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12336 12815 if (cnt % 8 == 0) {
12337 12816 (void) sprintf(bp++, "\n");
12338 12817 }
12339 12818
12340 12819 (void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12341 12820 bp += 9;
12342 12821 }
12343 12822
12344 12823 (void) sprintf(bp, "\n\nXSEQ-1 Registers");
12345 12824 bp += strlen(bp);
12346 12825 for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12347 12826 if (cnt % 8 == 0) {
12348 12827 (void) sprintf(bp++, "\n");
12349 12828 }
12350 12829
12351 12830 (void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12352 12831 bp += 9;
12353 12832 }
12354 12833
12355 12834 (void) sprintf(bp, "\n\nRSEQ GP Registers");
12356 12835 bp += strlen(bp);
12357 12836 for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12358 12837 if (cnt % 8 == 0) {
12359 12838 (void) sprintf(bp++, "\n");
12360 12839 }
12361 12840
12362 12841 (void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12363 12842 bp += 9;
12364 12843 }
12365 12844
12366 12845 (void) sprintf(bp, "\n\nRSEQ-0 Registers");
12367 12846 bp += strlen(bp);
12368 12847 for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12369 12848 if (cnt % 8 == 0) {
12370 12849 (void) sprintf(bp++, "\n");
12371 12850 }
12372 12851
12373 12852 (void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12374 12853 bp += 9;
12375 12854 }
12376 12855
12377 12856 (void) sprintf(bp, "\n\nRSEQ-1 Registers");
12378 12857 bp += strlen(bp);
12379 12858 for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12380 12859 if (cnt % 8 == 0) {
12381 12860 (void) sprintf(bp++, "\n");
12382 12861 }
12383 12862
12384 12863 (void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12385 12864 bp += 9;
12386 12865 }
12387 12866
12388 12867 (void) sprintf(bp, "\n\nRSEQ-2 Registers");
12389 12868 bp += strlen(bp);
12390 12869 for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12391 12870 if (cnt % 8 == 0) {
12392 12871 (void) sprintf(bp++, "\n");
12393 12872 }
12394 12873
12395 12874 (void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12396 12875 bp += 9;
12397 12876 }
12398 12877
12399 12878 (void) sprintf(bp, "\n\nCommand DMA Registers");
12400 12879 bp += strlen(bp);
12401 12880 for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12402 12881 if (cnt % 8 == 0) {
12403 12882 (void) sprintf(bp++, "\n");
12404 12883 }
12405 12884
12406 12885 (void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12407 12886 bp += 9;
12408 12887 }
12409 12888
12410 12889 (void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12411 12890 bp += strlen(bp);
12412 12891 for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12413 12892 if (cnt % 8 == 0) {
12414 12893 (void) sprintf(bp++, "\n");
12415 12894 }
12416 12895
12417 12896 (void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12418 12897 bp += 9;
12419 12898 }
12420 12899
12421 12900 (void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12422 12901 bp += strlen(bp);
12423 12902 for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12424 12903 if (cnt % 8 == 0) {
12425 12904 (void) sprintf(bp++, "\n");
12426 12905 }
12427 12906
12428 12907 (void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12429 12908 bp += 9;
12430 12909 }
12431 12910
12432 12911 (void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12433 12912 bp += strlen(bp);
12434 12913 for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12435 12914 if (cnt % 8 == 0) {
12436 12915 (void) sprintf(bp++, "\n");
12437 12916 }
12438 12917
12439 12918 (void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12440 12919 bp += 9;
12441 12920 }
12442 12921
12443 12922 (void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12444 12923 bp += strlen(bp);
12445 12924 for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12446 12925 if (cnt % 8 == 0) {
12447 12926 (void) sprintf(bp++, "\n");
12448 12927 }
12449 12928
12450 12929 (void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12451 12930 bp += 9;
12452 12931 }
12453 12932
12454 12933 (void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12455 12934 bp += strlen(bp);
12456 12935 for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12457 12936 if (cnt % 8 == 0) {
12458 12937 (void) sprintf(bp++, "\n");
12459 12938 }
12460 12939
12461 12940 (void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12462 12941 bp += 9;
12463 12942 }
12464 12943
12465 12944 (void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12466 12945 bp += strlen(bp);
12467 12946 for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12468 12947 if (cnt % 8 == 0) {
12469 12948 (void) sprintf(bp++, "\n");
12470 12949 }
12471 12950
12472 12951 (void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12473 12952 bp += 9;
12474 12953 }
12475 12954
12476 12955 (void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12477 12956 bp += strlen(bp);
12478 12957 for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12479 12958 if (cnt % 8 == 0) {
12480 12959 (void) sprintf(bp++, "\n");
12481 12960 }
12482 12961
12483 12962 (void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12484 12963 bp += 9;
12485 12964 }
12486 12965
12487 12966 (void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12488 12967 bp += strlen(bp);
12489 12968 for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12490 12969 if (cnt % 8 == 0) {
12491 12970 (void) sprintf(bp++, "\n");
12492 12971 }
12493 12972
12494 12973 (void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12495 12974 bp += 9;
12496 12975 }
12497 12976
12498 12977 (void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12499 12978 bp += strlen(bp);
12500 12979 for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12501 12980 if (cnt % 8 == 0) {
12502 12981 (void) sprintf(bp++, "\n");
12503 12982 }
12504 12983
12505 12984 (void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12506 12985 bp += 9;
12507 12986 }
12508 12987
12509 12988 (void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12510 12989 bp += strlen(bp);
12511 12990 for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12512 12991 if (cnt % 8 == 0) {
12513 12992 (void) sprintf(bp++, "\n");
12514 12993 }
12515 12994
12516 12995 (void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12517 12996 bp += 9;
12518 12997 }
12519 12998
12520 12999 (void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12521 13000 bp += strlen(bp);
12522 13001 for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12523 13002 if (cnt % 8 == 0) {
12524 13003 (void) sprintf(bp++, "\n");
12525 13004 }
12526 13005
12527 13006 (void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12528 13007 bp += 9;
12529 13008 }
12530 13009
12531 13010 (void) sprintf(bp, "\n\nRISC GP Registers");
12532 13011 bp += strlen(bp);
12533 13012 for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12534 13013 if (cnt % 8 == 0) {
12535 13014 (void) sprintf(bp++, "\n");
12536 13015 }
12537 13016
12538 13017 (void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12539 13018 bp += 9;
12540 13019 }
12541 13020
12542 13021 (void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12543 13022 bp += strlen(bp);
12544 13023 for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12545 13024 if (cnt % 8 == 0) {
12546 13025 (void) sprintf(bp++, "\n");
12547 13026 }
12548 13027
12549 13028 (void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12550 13029 bp += 9;
12551 13030 }
12552 13031
12553 13032 (void) sprintf(bp, "\n\nLMC Registers");
12554 13033 bp += strlen(bp);
12555 13034 for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12556 13035 if (cnt % 8 == 0) {
12557 13036 (void) sprintf(bp++, "\n");
12558 13037 }
12559 13038
12560 13039 (void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12561 13040 bp += 9;
12562 13041 }
12563 13042
12564 13043 (void) sprintf(bp, "\n\nFPM Hardware Registers");
12565 13044 bp += strlen(bp);
12566 13045 for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12567 13046 if (cnt % 8 == 0) {
12568 13047 (void) sprintf(bp++, "\n");
12569 13048 }
12570 13049
12571 13050 (void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12572 13051 bp += 9;
12573 13052 }
12574 13053
12575 13054 (void) sprintf(bp, "\n\nFB Hardware Registers");
12576 13055 bp += strlen(bp);
12577 13056 for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12578 13057 if (cnt % 8 == 0) {
12579 13058 (void) sprintf(bp++, "\n");
12580 13059 }
12581 13060
12582 13061 (void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12583 13062 bp += 9;
12584 13063 }
12585 13064
12586 13065 (void) sprintf(bp, "\n\nCode RAM");
12587 13066 bp += strlen(bp);
12588 13067 for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12589 13068 if (cnt % 8 == 0) {
12590 13069 (void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12591 13070 bp += 11;
12592 13071 }
12593 13072
12594 13073 (void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12595 13074 bp += 9;
12596 13075 }
12597 13076
12598 13077 (void) sprintf(bp, "\n\nExternal Memory");
12599 13078 bp += strlen(bp);
12600 13079 for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12601 13080 if (cnt % 8 == 0) {
12602 13081 (void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12603 13082 bp += 11;
12604 13083 }
12605 13084 (void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12606 13085 bp += 9;
12607 13086 }
12608 13087
12609 13088 (void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12610 13089 bp += strlen(bp);
12611 13090
12612 13091 (void) sprintf(bp, "\n\nRequest Queue");
12613 13092 bp += strlen(bp);
12614 13093 for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12615 13094 if (cnt % 8 == 0) {
12616 13095 (void) sprintf(bp, "\n%08x: ", cnt);
12617 13096 bp += strlen(bp);
12618 13097 }
12619 13098 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12620 13099 bp += strlen(bp);
12621 13100 }
12622 13101
12623 13102 (void) sprintf(bp, "\n\nResponse Queue");
12624 13103 bp += strlen(bp);
12625 13104 for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12626 13105 if (cnt % 8 == 0) {
12627 13106 (void) sprintf(bp, "\n%08x: ", cnt);
12628 13107 bp += strlen(bp);
12629 13108 }
12630 13109 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12631 13110 bp += strlen(bp);
12632 13111 }
12633 13112
12634 13113 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12635 13114 (ha->fwexttracebuf.bp != NULL)) {
12636 13115 uint32_t cnt_b = 0;
12637 13116 uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12638 13117
12639 13118 (void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12640 13119 bp += strlen(bp);
12641 13120 /* show data address as a byte address, data as long words */
12642 13121 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12643 13122 cnt_b = cnt * 4;
12644 13123 if (cnt_b % 32 == 0) {
12645 13124 (void) sprintf(bp, "\n%08x: ",
12646 13125 (int)(w64 + cnt_b));
12647 13126 bp += 11;
12648 13127 }
12649 13128 (void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12650 13129 bp += 9;
12651 13130 }
12652 13131 }
12653 13132
12654 13133 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12655 13134 (ha->fwfcetracebuf.bp != NULL)) {
12656 13135 uint32_t cnt_b = 0;
12657 13136 uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12658 13137
12659 13138 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12660 13139 bp += strlen(bp);
12661 13140 /* show data address as a byte address, data as long words */
12662 13141 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12663 13142 cnt_b = cnt * 4;
12664 13143 if (cnt_b % 32 == 0) {
12665 13144 (void) sprintf(bp, "\n%08x: ",
12666 13145 (int)(w64 + cnt_b));
12667 13146 bp += 11;
12668 13147 }
|
↓ open down ↓ |
367 lines elided |
↑ open up ↑ |
12669 13148 (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12670 13149 bp += 9;
12671 13150 }
12672 13151 }
12673 13152
12674 13153 (void) sprintf(bp, "\n\n");
12675 13154 bp += strlen(bp);
12676 13155
12677 13156 cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12678 13157
12679 - QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
13158 + QL_PRINT_10(ha, "done=%xh\n", cnt);
12680 13159
12681 13160 return (cnt);
12682 13161 }
12683 13162
12684 13163 /*
12685 - * ql_2581_ascii_fw_dump
12686 - * Converts ISP25xx or ISP81xx firmware binary dump to ascii.
13164 + * ql_25xx_ascii_fw_dump
13165 + * Converts ISP25xx firmware binary dump to ascii.
12687 13166 *
12688 13167 * Input:
12689 13168 * ha = adapter state pointer.
12690 13169 * bptr = buffer pointer.
12691 13170 *
12692 13171 * Returns:
12693 13172 * Amount of data buffer used.
12694 13173 *
12695 13174 * Context:
12696 13175 * Kernel context.
12697 13176 */
12698 13177 static size_t
12699 -ql_2581_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
13178 +ql_25xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12700 13179 {
12701 - uint32_t cnt;
12702 - uint32_t cnt1;
13180 + uint32_t cnt, cnt1, *dp, *dp2;
12703 13181 caddr_t bp = bufp;
12704 13182 ql_25xx_fw_dump_t *fw = ha->ql_dump_ptr;
12705 13183
12706 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13184 + QL_PRINT_3(ha, "started\n");
12707 13185
12708 13186 (void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
12709 13187 ha->fw_major_version, ha->fw_minor_version,
12710 13188 ha->fw_subminor_version, ha->fw_attributes);
12711 13189 bp += strlen(bp);
12712 13190
13191 + (void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
13192 + bp += strlen(bp);
13193 +
12713 13194 (void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
12714 13195 bp += strlen(bp);
12715 13196
13197 + (void) sprintf(bp, "\nAER Uncorrectable Error Status Register\n%08x\n",
13198 + fw->aer_ues);
13199 + bp += strlen(bp);
13200 +
12716 13201 (void) sprintf(bp, "\nHostRisc Registers");
12717 13202 bp += strlen(bp);
12718 13203 for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
12719 13204 if (cnt % 8 == 0) {
12720 13205 (void) sprintf(bp++, "\n");
12721 13206 }
12722 13207 (void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
12723 13208 bp += 9;
12724 13209 }
12725 13210
12726 13211 (void) sprintf(bp, "\n\nPCIe Registers");
12727 13212 bp += strlen(bp);
12728 13213 for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
12729 13214 if (cnt % 8 == 0) {
12730 13215 (void) sprintf(bp++, "\n");
12731 13216 }
12732 13217 (void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
12733 13218 bp += 9;
12734 13219 }
12735 13220
12736 13221 (void) strcat(bp, "\n\nHost Interface Registers");
12737 13222 bp += strlen(bp);
12738 13223 for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12739 13224 if (cnt % 8 == 0) {
12740 13225 (void) sprintf(bp++, "\n");
12741 13226 }
12742 13227 (void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12743 13228 bp += 9;
12744 13229 }
12745 13230
12746 13231 (void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12747 13232 bp += strlen(bp);
12748 13233 for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12749 13234 if (cnt % 8 == 0) {
12750 13235 (void) sprintf(bp++, "\n");
12751 13236 }
12752 13237 (void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12753 13238 bp += 9;
12754 13239 }
12755 13240
12756 13241 (void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
12757 13242 fw->risc_io);
12758 13243 bp += strlen(bp);
12759 13244
12760 13245 (void) sprintf(bp, "\n\nMailbox Registers");
12761 13246 bp += strlen(bp);
12762 13247 for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12763 13248 if (cnt % 16 == 0) {
12764 13249 (void) sprintf(bp++, "\n");
12765 13250 }
12766 13251 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12767 13252 bp += 5;
12768 13253 }
12769 13254
12770 13255 (void) sprintf(bp, "\n\nXSEQ GP Registers");
12771 13256 bp += strlen(bp);
12772 13257 for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12773 13258 if (cnt % 8 == 0) {
12774 13259 (void) sprintf(bp++, "\n");
12775 13260 }
12776 13261 (void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12777 13262 bp += 9;
12778 13263 }
12779 13264
12780 13265 (void) sprintf(bp, "\n\nXSEQ-0 Registers");
12781 13266 bp += strlen(bp);
12782 13267 for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12783 13268 if (cnt % 8 == 0) {
12784 13269 (void) sprintf(bp++, "\n");
12785 13270 }
12786 13271 (void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12787 13272 bp += 9;
12788 13273 }
12789 13274
12790 13275 (void) sprintf(bp, "\n\nXSEQ-1 Registers");
12791 13276 bp += strlen(bp);
12792 13277 for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12793 13278 if (cnt % 8 == 0) {
12794 13279 (void) sprintf(bp++, "\n");
12795 13280 }
12796 13281 (void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12797 13282 bp += 9;
12798 13283 }
12799 13284
12800 13285 (void) sprintf(bp, "\n\nRSEQ GP Registers");
12801 13286 bp += strlen(bp);
12802 13287 for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12803 13288 if (cnt % 8 == 0) {
12804 13289 (void) sprintf(bp++, "\n");
12805 13290 }
12806 13291 (void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12807 13292 bp += 9;
12808 13293 }
12809 13294
12810 13295 (void) sprintf(bp, "\n\nRSEQ-0 Registers");
12811 13296 bp += strlen(bp);
12812 13297 for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12813 13298 if (cnt % 8 == 0) {
12814 13299 (void) sprintf(bp++, "\n");
12815 13300 }
12816 13301 (void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12817 13302 bp += 9;
12818 13303 }
12819 13304
12820 13305 (void) sprintf(bp, "\n\nRSEQ-1 Registers");
12821 13306 bp += strlen(bp);
12822 13307 for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12823 13308 if (cnt % 8 == 0) {
12824 13309 (void) sprintf(bp++, "\n");
12825 13310 }
12826 13311 (void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12827 13312 bp += 9;
12828 13313 }
12829 13314
12830 13315 (void) sprintf(bp, "\n\nRSEQ-2 Registers");
12831 13316 bp += strlen(bp);
12832 13317 for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12833 13318 if (cnt % 8 == 0) {
12834 13319 (void) sprintf(bp++, "\n");
12835 13320 }
12836 13321 (void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12837 13322 bp += 9;
12838 13323 }
12839 13324
12840 13325 (void) sprintf(bp, "\n\nASEQ GP Registers");
12841 13326 bp += strlen(bp);
12842 13327 for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
12843 13328 if (cnt % 8 == 0) {
12844 13329 (void) sprintf(bp++, "\n");
12845 13330 }
12846 13331 (void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
12847 13332 bp += 9;
12848 13333 }
12849 13334
12850 13335 (void) sprintf(bp, "\n\nASEQ-0 Registers");
12851 13336 bp += strlen(bp);
12852 13337 for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
12853 13338 if (cnt % 8 == 0) {
12854 13339 (void) sprintf(bp++, "\n");
12855 13340 }
12856 13341 (void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
12857 13342 bp += 9;
12858 13343 }
12859 13344
12860 13345 (void) sprintf(bp, "\n\nASEQ-1 Registers");
12861 13346 bp += strlen(bp);
12862 13347 for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
12863 13348 if (cnt % 8 == 0) {
12864 13349 (void) sprintf(bp++, "\n");
12865 13350 }
12866 13351 (void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
12867 13352 bp += 9;
12868 13353 }
12869 13354
12870 13355 (void) sprintf(bp, "\n\nASEQ-2 Registers");
12871 13356 bp += strlen(bp);
12872 13357 for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
12873 13358 if (cnt % 8 == 0) {
12874 13359 (void) sprintf(bp++, "\n");
12875 13360 }
|
↓ open down ↓ |
150 lines elided |
↑ open up ↑ |
12876 13361 (void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
12877 13362 bp += 9;
12878 13363 }
12879 13364
12880 13365 (void) sprintf(bp, "\n\nCommand DMA Registers");
12881 13366 bp += strlen(bp);
12882 13367 for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12883 13368 if (cnt % 8 == 0) {
12884 13369 (void) sprintf(bp++, "\n");
12885 13370 }
12886 - (void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
13371 + (void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12887 13372 bp += 9;
12888 13373 }
12889 13374
12890 13375 (void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12891 13376 bp += strlen(bp);
12892 13377 for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12893 13378 if (cnt % 8 == 0) {
12894 13379 (void) sprintf(bp++, "\n");
12895 13380 }
12896 13381 (void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12897 13382 bp += 9;
12898 13383 }
12899 13384
12900 13385 (void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12901 13386 bp += strlen(bp);
12902 13387 for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12903 13388 if (cnt % 8 == 0) {
12904 13389 (void) sprintf(bp++, "\n");
12905 13390 }
12906 13391 (void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12907 13392 bp += 9;
12908 13393 }
12909 13394
12910 13395 (void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12911 13396 bp += strlen(bp);
12912 13397 for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12913 13398 if (cnt % 8 == 0) {
12914 13399 (void) sprintf(bp++, "\n");
12915 13400 }
12916 13401 (void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12917 13402 bp += 9;
12918 13403 }
12919 13404
12920 13405 (void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12921 13406 bp += strlen(bp);
12922 13407 for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12923 13408 if (cnt % 8 == 0) {
12924 13409 (void) sprintf(bp++, "\n");
12925 13410 }
12926 13411 (void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12927 13412 bp += 9;
12928 13413 }
12929 13414
12930 13415 (void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12931 13416 bp += strlen(bp);
12932 13417 for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12933 13418 if (cnt % 8 == 0) {
12934 13419 (void) sprintf(bp++, "\n");
12935 13420 }
12936 13421 (void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12937 13422 bp += 9;
12938 13423 }
12939 13424
12940 13425 (void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12941 13426 bp += strlen(bp);
12942 13427 for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12943 13428 if (cnt % 8 == 0) {
12944 13429 (void) sprintf(bp++, "\n");
12945 13430 }
12946 13431 (void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12947 13432 bp += 9;
12948 13433 }
12949 13434
12950 13435 (void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12951 13436 bp += strlen(bp);
12952 13437 for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12953 13438 if (cnt % 8 == 0) {
12954 13439 (void) sprintf(bp++, "\n");
12955 13440 }
12956 13441 (void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12957 13442 bp += 9;
12958 13443 }
12959 13444
12960 13445 (void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12961 13446 bp += strlen(bp);
12962 13447 for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12963 13448 if (cnt % 8 == 0) {
12964 13449 (void) sprintf(bp++, "\n");
12965 13450 }
12966 13451 (void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12967 13452 bp += 9;
12968 13453 }
12969 13454
12970 13455 (void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12971 13456 bp += strlen(bp);
12972 13457 for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12973 13458 if (cnt % 8 == 0) {
12974 13459 (void) sprintf(bp++, "\n");
12975 13460 }
12976 13461 (void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12977 13462 bp += 9;
12978 13463 }
12979 13464
12980 13465 (void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12981 13466 bp += strlen(bp);
12982 13467 for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12983 13468 if (cnt % 8 == 0) {
12984 13469 (void) sprintf(bp++, "\n");
12985 13470 }
12986 13471 (void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12987 13472 bp += 9;
12988 13473 }
12989 13474
12990 13475 (void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12991 13476 bp += strlen(bp);
12992 13477 for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12993 13478 if (cnt % 8 == 0) {
12994 13479 (void) sprintf(bp++, "\n");
12995 13480 }
12996 13481 (void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12997 13482 bp += 9;
12998 13483 }
12999 13484
13000 13485 (void) sprintf(bp, "\n\nRISC GP Registers");
13001 13486 bp += strlen(bp);
13002 13487 for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
13003 13488 if (cnt % 8 == 0) {
13004 13489 (void) sprintf(bp++, "\n");
13005 13490 }
13006 13491 (void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
13007 13492 bp += 9;
13008 13493 }
13009 13494
13010 13495 (void) sprintf(bp, "\n\nLMC Registers");
13011 13496 bp += strlen(bp);
|
↓ open down ↓ |
115 lines elided |
↑ open up ↑ |
13012 13497 for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
13013 13498 if (cnt % 8 == 0) {
13014 13499 (void) sprintf(bp++, "\n");
13015 13500 }
13016 13501 (void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
13017 13502 bp += 9;
13018 13503 }
13019 13504
13020 13505 (void) sprintf(bp, "\n\nFPM Hardware Registers");
13021 13506 bp += strlen(bp);
13022 - cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
13023 - (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fpm_hdw_reg)) :
13024 - (uint32_t)(sizeof (fw->fpm_hdw_reg));
13507 + cnt1 = sizeof (fw->fpm_hdw_reg);
13025 13508 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13026 13509 if (cnt % 8 == 0) {
13027 13510 (void) sprintf(bp++, "\n");
13028 13511 }
13029 13512 (void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
13030 13513 bp += 9;
13031 13514 }
13032 13515
13033 13516 (void) sprintf(bp, "\n\nFB Hardware Registers");
13034 13517 bp += strlen(bp);
13035 - cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
13036 - (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fb_hdw_reg)) :
13037 - (uint32_t)(sizeof (fw->fb_hdw_reg));
13518 + cnt1 = sizeof (fw->fb_hdw_reg);
13038 13519 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13039 13520 if (cnt % 8 == 0) {
13040 13521 (void) sprintf(bp++, "\n");
13041 13522 }
13042 13523 (void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
13043 13524 bp += 9;
13044 13525 }
13045 13526
13046 13527 (void) sprintf(bp, "\n\nCode RAM");
13047 13528 bp += strlen(bp);
13048 13529 for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
13049 13530 if (cnt % 8 == 0) {
13050 13531 (void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
13051 13532 bp += 11;
13052 13533 }
13053 13534 (void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
13054 13535 bp += 9;
13055 13536 }
13056 13537
13057 13538 (void) sprintf(bp, "\n\nExternal Memory");
13058 13539 bp += strlen(bp);
13540 + dp = (uint32_t *)((caddr_t)fw->req_rsp_ext_mem + fw->req_q_size[0] +
13541 + fw->req_q_size[1] + fw->rsp_q_size + (ha->rsp_queues_cnt * 16));
13059 13542 for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
13060 13543 if (cnt % 8 == 0) {
13061 13544 (void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
13062 13545 bp += 11;
13063 13546 }
13064 - (void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
13547 + (void) sprintf(bp, "%08x ", *dp++);
13065 13548 bp += 9;
13066 13549 }
13067 13550
13068 13551 (void) sprintf(bp, "\n[<==END] ISP Debug Dump");
13069 13552 bp += strlen(bp);
13070 13553
13071 - (void) sprintf(bp, "\n\nRequest Queue");
13072 - bp += strlen(bp);
13073 - for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
13074 - if (cnt % 8 == 0) {
13075 - (void) sprintf(bp, "\n%08x: ", cnt);
13554 + dp = fw->req_rsp_ext_mem + (ha->rsp_queues_cnt * 4);
13555 + for (cnt = 0; cnt < 2 && fw->req_q_size[cnt]; cnt++) {
13556 + dp2 = dp;
13557 + for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
13558 + if (*dp2++) {
13559 + break;
13560 + }
13561 + }
13562 + if (cnt1 == fw->req_q_size[cnt] / 4) {
13563 + dp = dp2;
13564 + continue;
13565 + }
13566 + (void) sprintf(bp, "\n\nRequest Queue\nQueue %d:", cnt);
13567 + bp += strlen(bp);
13568 + for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
13569 + if (cnt1 % 8 == 0) {
13570 + (void) sprintf(bp, "\n%08x: ", cnt1);
13571 + bp += strlen(bp);
13572 + }
13573 + (void) sprintf(bp, "%08x ", *dp++);
13076 13574 bp += strlen(bp);
13077 13575 }
13078 - (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
13576 + }
13577 +
13578 + for (cnt = 0; cnt < ha->rsp_queues_cnt && cnt < 16; cnt++) {
13579 + dp2 = dp;
13580 + for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
13581 + cnt1++) {
13582 + if (*dp2++) {
13583 + break;
13584 + }
13585 + }
13586 + if (cnt1 == ha->rsp_queues[cnt]->rsp_ring.size / 4) {
13587 + dp = dp2;
13588 + continue;
13589 + }
13590 + (void) sprintf(bp, "\n\nResponse Queue\nQueue %d:", cnt);
13079 13591 bp += strlen(bp);
13592 + for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
13593 + cnt1++) {
13594 + if (cnt1 % 8 == 0) {
13595 + (void) sprintf(bp, "\n%08x: ", cnt1);
13596 + bp += strlen(bp);
13597 + }
13598 + (void) sprintf(bp, "%08x ", *dp++);
13599 + bp += strlen(bp);
13600 + }
13080 13601 }
13081 13602
13082 - (void) sprintf(bp, "\n\nResponse Queue");
13603 + if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13604 + (ha->fwexttracebuf.bp != NULL)) {
13605 + uint32_t cnt_b = 0;
13606 + uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
13607 +
13608 + (void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
13609 + bp += strlen(bp);
13610 + /* show data address as a byte address, data as long words */
13611 + for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13612 + cnt_b = cnt * 4;
13613 + if (cnt_b % 32 == 0) {
13614 + (void) sprintf(bp, "\n%08x: ",
13615 + (int)(w64 + cnt_b));
13616 + bp += 11;
13617 + }
13618 + (void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
13619 + bp += 9;
13620 + }
13621 + }
13622 +
13623 + if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13624 + (ha->fwfcetracebuf.bp != NULL)) {
13625 + uint32_t cnt_b = 0;
13626 + uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
13627 +
13628 + (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
13629 + bp += strlen(bp);
13630 + /* show data address as a byte address, data as long words */
13631 + for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13632 + cnt_b = cnt * 4;
13633 + if (cnt_b % 32 == 0) {
13634 + (void) sprintf(bp, "\n%08x: ",
13635 + (int)(w64 + cnt_b));
13636 + bp += 11;
13637 + }
13638 + (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
13639 + bp += 9;
13640 + }
13641 + }
13642 +
13643 + (void) sprintf(bp, "\n\n");
13083 13644 bp += strlen(bp);
13084 - for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
13645 +
13646 + cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
13647 +
13648 + QL_PRINT_10(ha, "done=%xh\n", cnt);
13649 +
13650 + return (cnt);
13651 +}
13652 +
13653 +/*
13654 + * ql_81xx_ascii_fw_dump
13655 + * Converts ISP81xx firmware binary dump to ascii.
13656 + *
13657 + * Input:
13658 + * ha = adapter state pointer.
13659 + * bptr = buffer pointer.
13660 + *
13661 + * Returns:
13662 + * Amount of data buffer used.
13663 + *
13664 + * Context:
13665 + * Kernel context.
13666 + */
13667 +static size_t
13668 +ql_81xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
13669 +{
13670 + uint32_t cnt, cnt1, *dp, *dp2;
13671 + caddr_t bp = bufp;
13672 + ql_81xx_fw_dump_t *fw = ha->ql_dump_ptr;
13673 +
13674 + QL_PRINT_3(ha, "started\n");
13675 +
13676 + (void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
13677 + ha->fw_major_version, ha->fw_minor_version,
13678 + ha->fw_subminor_version, ha->fw_attributes);
13679 + bp += strlen(bp);
13680 +
13681 + (void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
13682 + bp += strlen(bp);
13683 +
13684 + (void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
13685 + bp += strlen(bp);
13686 +
13687 + (void) sprintf(bp, "\nAER Uncorrectable Error Status Register\n%08x\n",
13688 + fw->aer_ues);
13689 + bp += strlen(bp);
13690 +
13691 + (void) sprintf(bp, "\nHostRisc Registers");
13692 + bp += strlen(bp);
13693 + for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
13085 13694 if (cnt % 8 == 0) {
13086 - (void) sprintf(bp, "\n%08x: ", cnt);
13695 + (void) sprintf(bp++, "\n");
13696 + }
13697 + (void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
13698 + bp += 9;
13699 + }
13700 +
13701 + (void) sprintf(bp, "\n\nPCIe Registers");
13702 + bp += strlen(bp);
13703 + for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
13704 + if (cnt % 8 == 0) {
13705 + (void) sprintf(bp++, "\n");
13706 + }
13707 + (void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
13708 + bp += 9;
13709 + }
13710 +
13711 + (void) strcat(bp, "\n\nHost Interface Registers");
13712 + bp += strlen(bp);
13713 + for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
13714 + if (cnt % 8 == 0) {
13715 + (void) sprintf(bp++, "\n");
13716 + }
13717 + (void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
13718 + bp += 9;
13719 + }
13720 +
13721 + (void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
13722 + bp += strlen(bp);
13723 + for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
13724 + if (cnt % 8 == 0) {
13725 + (void) sprintf(bp++, "\n");
13726 + }
13727 + (void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
13728 + bp += 9;
13729 + }
13730 +
13731 + (void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
13732 + fw->risc_io);
13733 + bp += strlen(bp);
13734 +
13735 + (void) sprintf(bp, "\n\nMailbox Registers");
13736 + bp += strlen(bp);
13737 + for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
13738 + if (cnt % 16 == 0) {
13739 + (void) sprintf(bp++, "\n");
13740 + }
13741 + (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
13742 + bp += 5;
13743 + }
13744 +
13745 + (void) sprintf(bp, "\n\nXSEQ GP Registers");
13746 + bp += strlen(bp);
13747 + for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
13748 + if (cnt % 8 == 0) {
13749 + (void) sprintf(bp++, "\n");
13750 + }
13751 + (void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
13752 + bp += 9;
13753 + }
13754 +
13755 + (void) sprintf(bp, "\n\nXSEQ-0 Registers");
13756 + bp += strlen(bp);
13757 + for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
13758 + if (cnt % 8 == 0) {
13759 + (void) sprintf(bp++, "\n");
13760 + }
13761 + (void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
13762 + bp += 9;
13763 + }
13764 +
13765 + (void) sprintf(bp, "\n\nXSEQ-1 Registers");
13766 + bp += strlen(bp);
13767 + for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
13768 + if (cnt % 8 == 0) {
13769 + (void) sprintf(bp++, "\n");
13770 + }
13771 + (void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
13772 + bp += 9;
13773 + }
13774 +
13775 + (void) sprintf(bp, "\n\nRSEQ GP Registers");
13776 + bp += strlen(bp);
13777 + for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
13778 + if (cnt % 8 == 0) {
13779 + (void) sprintf(bp++, "\n");
13780 + }
13781 + (void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
13782 + bp += 9;
13783 + }
13784 +
13785 + (void) sprintf(bp, "\n\nRSEQ-0 Registers");
13786 + bp += strlen(bp);
13787 + for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
13788 + if (cnt % 8 == 0) {
13789 + (void) sprintf(bp++, "\n");
13790 + }
13791 + (void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
13792 + bp += 9;
13793 + }
13794 +
13795 + (void) sprintf(bp, "\n\nRSEQ-1 Registers");
13796 + bp += strlen(bp);
13797 + for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
13798 + if (cnt % 8 == 0) {
13799 + (void) sprintf(bp++, "\n");
13800 + }
13801 + (void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
13802 + bp += 9;
13803 + }
13804 +
13805 + (void) sprintf(bp, "\n\nRSEQ-2 Registers");
13806 + bp += strlen(bp);
13807 + for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
13808 + if (cnt % 8 == 0) {
13809 + (void) sprintf(bp++, "\n");
13810 + }
13811 + (void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
13812 + bp += 9;
13813 + }
13814 +
13815 + (void) sprintf(bp, "\n\nASEQ GP Registers");
13816 + bp += strlen(bp);
13817 + for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
13818 + if (cnt % 8 == 0) {
13819 + (void) sprintf(bp++, "\n");
13820 + }
13821 + (void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
13822 + bp += 9;
13823 + }
13824 +
13825 + (void) sprintf(bp, "\n\nASEQ-0 Registers");
13826 + bp += strlen(bp);
13827 + for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
13828 + if (cnt % 8 == 0) {
13829 + (void) sprintf(bp++, "\n");
13830 + }
13831 + (void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
13832 + bp += 9;
13833 + }
13834 +
13835 + (void) sprintf(bp, "\n\nASEQ-1 Registers");
13836 + bp += strlen(bp);
13837 + for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
13838 + if (cnt % 8 == 0) {
13839 + (void) sprintf(bp++, "\n");
13840 + }
13841 + (void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
13842 + bp += 9;
13843 + }
13844 +
13845 + (void) sprintf(bp, "\n\nASEQ-2 Registers");
13846 + bp += strlen(bp);
13847 + for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
13848 + if (cnt % 8 == 0) {
13849 + (void) sprintf(bp++, "\n");
13850 + }
13851 + (void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
13852 + bp += 9;
13853 + }
13854 +
13855 + (void) sprintf(bp, "\n\nCommand DMA Registers");
13856 + bp += strlen(bp);
13857 + for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
13858 + if (cnt % 8 == 0) {
13859 + (void) sprintf(bp++, "\n");
13860 + }
13861 + (void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
13862 + bp += 9;
13863 + }
13864 +
13865 + (void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
13866 + bp += strlen(bp);
13867 + for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
13868 + if (cnt % 8 == 0) {
13869 + (void) sprintf(bp++, "\n");
13870 + }
13871 + (void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
13872 + bp += 9;
13873 + }
13874 +
13875 + (void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
13876 + bp += strlen(bp);
13877 + for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
13878 + if (cnt % 8 == 0) {
13879 + (void) sprintf(bp++, "\n");
13880 + }
13881 + (void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
13882 + bp += 9;
13883 + }
13884 +
13885 + (void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
13886 + bp += strlen(bp);
13887 + for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
13888 + if (cnt % 8 == 0) {
13889 + (void) sprintf(bp++, "\n");
13890 + }
13891 + (void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
13892 + bp += 9;
13893 + }
13894 +
13895 + (void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
13896 + bp += strlen(bp);
13897 + for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
13898 + if (cnt % 8 == 0) {
13899 + (void) sprintf(bp++, "\n");
13900 + }
13901 + (void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
13902 + bp += 9;
13903 + }
13904 +
13905 + (void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
13906 + bp += strlen(bp);
13907 + for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
13908 + if (cnt % 8 == 0) {
13909 + (void) sprintf(bp++, "\n");
13910 + }
13911 + (void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
13912 + bp += 9;
13913 + }
13914 +
13915 + (void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
13916 + bp += strlen(bp);
13917 + for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
13918 + if (cnt % 8 == 0) {
13919 + (void) sprintf(bp++, "\n");
13920 + }
13921 + (void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
13922 + bp += 9;
13923 + }
13924 +
13925 + (void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
13926 + bp += strlen(bp);
13927 + for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
13928 + if (cnt % 8 == 0) {
13929 + (void) sprintf(bp++, "\n");
13930 + }
13931 + (void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
13932 + bp += 9;
13933 + }
13934 +
13935 + (void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
13936 + bp += strlen(bp);
13937 + for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
13938 + if (cnt % 8 == 0) {
13939 + (void) sprintf(bp++, "\n");
13940 + }
13941 + (void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
13942 + bp += 9;
13943 + }
13944 +
13945 + (void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
13946 + bp += strlen(bp);
13947 + for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
13948 + if (cnt % 8 == 0) {
13949 + (void) sprintf(bp++, "\n");
13950 + }
13951 + (void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
13952 + bp += 9;
13953 + }
13954 +
13955 + (void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
13956 + bp += strlen(bp);
13957 + for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
13958 + if (cnt % 8 == 0) {
13959 + (void) sprintf(bp++, "\n");
13960 + }
13961 + (void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
13962 + bp += 9;
13963 + }
13964 +
13965 + (void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
13966 + bp += strlen(bp);
13967 + for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
13968 + if (cnt % 8 == 0) {
13969 + (void) sprintf(bp++, "\n");
13970 + }
13971 + (void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
13972 + bp += 9;
13973 + }
13974 +
13975 + (void) sprintf(bp, "\n\nRISC GP Registers");
13976 + bp += strlen(bp);
13977 + for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
13978 + if (cnt % 8 == 0) {
13979 + (void) sprintf(bp++, "\n");
13980 + }
13981 + (void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
13982 + bp += 9;
13983 + }
13984 +
13985 + (void) sprintf(bp, "\n\nLMC Registers");
13986 + bp += strlen(bp);
13987 + for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
13988 + if (cnt % 8 == 0) {
13989 + (void) sprintf(bp++, "\n");
13990 + }
13991 + (void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
13992 + bp += 9;
13993 + }
13994 +
13995 + (void) sprintf(bp, "\n\nFPM Hardware Registers");
13996 + bp += strlen(bp);
13997 + cnt1 = sizeof (fw->fpm_hdw_reg);
13998 + for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13999 + if (cnt % 8 == 0) {
14000 + (void) sprintf(bp++, "\n");
14001 + }
14002 + (void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
14003 + bp += 9;
14004 + }
14005 +
14006 + (void) sprintf(bp, "\n\nFB Hardware Registers");
14007 + bp += strlen(bp);
14008 + cnt1 = sizeof (fw->fb_hdw_reg);
14009 + for (cnt = 0; cnt < cnt1 / 4; cnt++) {
14010 + if (cnt % 8 == 0) {
14011 + (void) sprintf(bp++, "\n");
14012 + }
14013 + (void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
14014 + bp += 9;
14015 + }
14016 +
14017 + (void) sprintf(bp, "\n\nCode RAM");
14018 + bp += strlen(bp);
14019 + for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
14020 + if (cnt % 8 == 0) {
14021 + (void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
14022 + bp += 11;
14023 + }
14024 + (void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
14025 + bp += 9;
14026 + }
14027 +
14028 + (void) sprintf(bp, "\n\nExternal Memory");
14029 + bp += strlen(bp);
14030 + dp = (uint32_t *)((caddr_t)fw->req_rsp_ext_mem + fw->req_q_size[0] +
14031 + fw->req_q_size[1] + fw->rsp_q_size + (ha->rsp_queues_cnt * 16));
14032 + for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
14033 + if (cnt % 8 == 0) {
14034 + (void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
14035 + bp += 11;
14036 + }
14037 + (void) sprintf(bp, "%08x ", *dp++);
14038 + bp += 9;
14039 + }
14040 +
14041 + (void) sprintf(bp, "\n[<==END] ISP Debug Dump");
14042 + bp += strlen(bp);
14043 +
14044 + dp = fw->req_rsp_ext_mem + (ha->rsp_queues_cnt * 4);
14045 + for (cnt = 0; cnt < 2 && fw->req_q_size[cnt]; cnt++) {
14046 + dp2 = dp;
14047 + for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
14048 + if (*dp2++) {
14049 + break;
14050 + }
14051 + }
14052 + if (cnt1 == fw->req_q_size[cnt] / 4) {
14053 + dp = dp2;
14054 + continue;
14055 + }
14056 + (void) sprintf(bp, "\n\nRequest Queue\nQueue %d:", cnt);
14057 + bp += strlen(bp);
14058 + for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
14059 + if (cnt1 % 8 == 0) {
14060 + (void) sprintf(bp, "\n%08x: ", cnt1);
14061 + bp += strlen(bp);
14062 + }
14063 + (void) sprintf(bp, "%08x ", *dp++);
13087 14064 bp += strlen(bp);
13088 14065 }
13089 - (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
14066 + }
14067 +
14068 + for (cnt = 0; cnt < ha->rsp_queues_cnt && cnt < 16; cnt++) {
14069 + dp2 = dp;
14070 + for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
14071 + cnt1++) {
14072 + if (*dp2++) {
14073 + break;
14074 + }
14075 + }
14076 + if (cnt1 == ha->rsp_queues[cnt]->rsp_ring.size / 4) {
14077 + dp = dp2;
14078 + continue;
14079 + }
14080 + (void) sprintf(bp, "\n\nResponse Queue\nQueue %d:", cnt);
13090 14081 bp += strlen(bp);
14082 + for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
14083 + cnt1++) {
14084 + if (cnt1 % 8 == 0) {
14085 + (void) sprintf(bp, "\n%08x: ", cnt1);
14086 + bp += strlen(bp);
14087 + }
14088 + (void) sprintf(bp, "%08x ", *dp++);
14089 + bp += strlen(bp);
14090 + }
13091 14091 }
13092 14092
13093 14093 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13094 14094 (ha->fwexttracebuf.bp != NULL)) {
13095 14095 uint32_t cnt_b = 0;
13096 14096 uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
13097 14097
13098 14098 (void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
13099 14099 bp += strlen(bp);
13100 14100 /* show data address as a byte address, data as long words */
13101 14101 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13102 14102 cnt_b = cnt * 4;
13103 14103 if (cnt_b % 32 == 0) {
13104 14104 (void) sprintf(bp, "\n%08x: ",
13105 14105 (int)(w64 + cnt_b));
13106 14106 bp += 11;
13107 14107 }
13108 14108 (void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
13109 14109 bp += 9;
13110 14110 }
13111 14111 }
13112 14112
13113 14113 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13114 14114 (ha->fwfcetracebuf.bp != NULL)) {
13115 14115 uint32_t cnt_b = 0;
13116 14116 uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
13117 14117
13118 14118 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
13119 14119 bp += strlen(bp);
13120 14120 /* show data address as a byte address, data as long words */
13121 14121 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13122 14122 cnt_b = cnt * 4;
13123 14123 if (cnt_b % 32 == 0) {
13124 14124 (void) sprintf(bp, "\n%08x: ",
13125 14125 (int)(w64 + cnt_b));
13126 14126 bp += 11;
13127 14127 }
|
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
13128 14128 (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
13129 14129 bp += 9;
13130 14130 }
13131 14131 }
13132 14132
13133 14133 (void) sprintf(bp, "\n\n");
13134 14134 bp += strlen(bp);
13135 14135
13136 14136 cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
13137 14137
13138 - QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
14138 + QL_PRINT_10(ha, "done=%xh\n", cnt);
13139 14139
13140 14140 return (cnt);
13141 14141 }
13142 14142
13143 14143 /*
14144 + * ql_8021_ascii_fw_dump
14145 + * Converts ISP8021 firmware binary dump to ascii.
14146 + *
14147 + * Input:
14148 + * ha = adapter state pointer.
14149 + * bptr = buffer pointer.
14150 + *
14151 + * Returns:
14152 + * Amount of data buffer used.
14153 + *
14154 + * Context:
14155 + * Kernel context.
14156 + */
14157 +static size_t
14158 +ql_8021_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
14159 +{
14160 + uint32_t cnt;
14161 + caddr_t bp = bufp;
14162 + uint8_t *fw = ha->ql_dump_ptr;
14163 +
14164 + /*
14165 + * 2 ascii bytes per binary byte + a space and
14166 + * a newline every 16 binary bytes
14167 + */
14168 + cnt = 0;
14169 + while (cnt < ha->ql_dump_size) {
14170 + (void) sprintf(bp, "%02x ", *fw++);
14171 + bp += strlen(bp);
14172 + if (++cnt % 16 == 0) {
14173 + (void) sprintf(bp, "\n");
14174 + bp += strlen(bp);
14175 + }
14176 + }
14177 + if (cnt % 16 != 0) {
14178 + (void) sprintf(bp, "\n");
14179 + bp += strlen(bp);
14180 + }
14181 + cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
14182 + QL_PRINT_10(ha, "done=%xh\n", cnt);
14183 + return (cnt);
14184 +}
14185 +
14186 +/*
13144 14187 * ql_2200_binary_fw_dump
13145 14188 *
13146 14189 * Input:
13147 14190 * ha: adapter state pointer.
13148 14191 * fw: firmware dump context pointer.
13149 14192 *
13150 14193 * Returns:
13151 14194 * ql local function return status code.
13152 14195 *
13153 14196 * Context:
13154 14197 * Interrupt or Kernel context, no mailbox commands allowed.
13155 14198 */
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
13156 14199 static int
13157 14200 ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13158 14201 {
13159 14202 uint32_t cnt;
13160 14203 uint16_t risc_address;
13161 14204 clock_t timer;
13162 14205 mbx_cmd_t mc;
13163 14206 mbx_cmd_t *mcp = &mc;
13164 14207 int rval = QL_SUCCESS;
13165 14208
13166 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14209 + QL_PRINT_3(ha, "started\n");
13167 14210
13168 14211 /* Disable ISP interrupts. */
13169 - WRT16_IO_REG(ha, ictrl, 0);
13170 - ADAPTER_STATE_LOCK(ha);
13171 - ha->flags &= ~INTERRUPTS_ENABLED;
13172 - ADAPTER_STATE_UNLOCK(ha);
14212 + ql_disable_intr(ha);
13173 14213
13174 14214 /* Release mailbox registers. */
13175 14215 WRT16_IO_REG(ha, semaphore, 0);
13176 14216
13177 14217 /* Pause RISC. */
13178 14218 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13179 14219 timer = 30000;
13180 14220 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13181 14221 if (timer-- != 0) {
13182 14222 drv_usecwait(MILLISEC);
13183 14223 } else {
13184 14224 rval = QL_FUNCTION_TIMEOUT;
13185 14225 break;
13186 14226 }
13187 14227 }
13188 14228
13189 14229 if (rval == QL_SUCCESS) {
13190 14230 (void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13191 14231 sizeof (fw->pbiu_reg) / 2, 16);
13192 14232
13193 14233 /* In 2200 we only read 8 mailboxes */
13194 14234 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x10,
13195 14235 8, 16);
13196 14236
13197 14237 (void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x20,
13198 14238 sizeof (fw->dma_reg) / 2, 16);
13199 14239
13200 14240 WRT16_IO_REG(ha, ctrl_status, 0);
13201 14241 (void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13202 14242 sizeof (fw->risc_hdw_reg) / 2, 16);
13203 14243
13204 14244 WRT16_IO_REG(ha, pcr, 0x2000);
13205 14245 (void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13206 14246 sizeof (fw->risc_gp0_reg) / 2, 16);
13207 14247
13208 14248 WRT16_IO_REG(ha, pcr, 0x2100);
13209 14249 (void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13210 14250 sizeof (fw->risc_gp1_reg) / 2, 16);
13211 14251
13212 14252 WRT16_IO_REG(ha, pcr, 0x2200);
13213 14253 (void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13214 14254 sizeof (fw->risc_gp2_reg) / 2, 16);
13215 14255
13216 14256 WRT16_IO_REG(ha, pcr, 0x2300);
13217 14257 (void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13218 14258 sizeof (fw->risc_gp3_reg) / 2, 16);
13219 14259
13220 14260 WRT16_IO_REG(ha, pcr, 0x2400);
13221 14261 (void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13222 14262 sizeof (fw->risc_gp4_reg) / 2, 16);
13223 14263
13224 14264 WRT16_IO_REG(ha, pcr, 0x2500);
13225 14265 (void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13226 14266 sizeof (fw->risc_gp5_reg) / 2, 16);
13227 14267
13228 14268 WRT16_IO_REG(ha, pcr, 0x2600);
13229 14269 (void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13230 14270 sizeof (fw->risc_gp6_reg) / 2, 16);
13231 14271
13232 14272 WRT16_IO_REG(ha, pcr, 0x2700);
13233 14273 (void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13234 14274 sizeof (fw->risc_gp7_reg) / 2, 16);
13235 14275
13236 14276 WRT16_IO_REG(ha, ctrl_status, 0x10);
13237 14277 /* 2200 has only 16 registers */
13238 14278 (void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13239 14279 ha->iobase + 0x80, 16, 16);
13240 14280
13241 14281 WRT16_IO_REG(ha, ctrl_status, 0x20);
13242 14282 (void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13243 14283 sizeof (fw->fpm_b0_reg) / 2, 16);
13244 14284
13245 14285 WRT16_IO_REG(ha, ctrl_status, 0x30);
13246 14286 (void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13247 14287 sizeof (fw->fpm_b1_reg) / 2, 16);
13248 14288
13249 14289 /* Select FPM registers. */
13250 14290 WRT16_IO_REG(ha, ctrl_status, 0x20);
13251 14291
13252 14292 /* FPM Soft Reset. */
13253 14293 WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13254 14294
13255 14295 /* Select frame buffer registers. */
13256 14296 WRT16_IO_REG(ha, ctrl_status, 0x10);
13257 14297
13258 14298 /* Reset frame buffer FIFOs. */
13259 14299 WRT16_IO_REG(ha, fb_cmd, 0xa000);
13260 14300
13261 14301 /* Select RISC module registers. */
13262 14302 WRT16_IO_REG(ha, ctrl_status, 0);
13263 14303
13264 14304 /* Reset RISC module. */
|
↓ open down ↓ |
82 lines elided |
↑ open up ↑ |
13265 14305 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13266 14306
13267 14307 /* Reset ISP semaphore. */
13268 14308 WRT16_IO_REG(ha, semaphore, 0);
13269 14309
13270 14310 /* Release RISC module. */
13271 14311 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13272 14312
13273 14313 /* Wait for RISC to recover from reset. */
13274 14314 timer = 30000;
13275 - while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_BUSY) {
14315 + while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_ROM_BUSY) {
13276 14316 if (timer-- != 0) {
13277 14317 drv_usecwait(MILLISEC);
13278 14318 } else {
13279 14319 rval = QL_FUNCTION_TIMEOUT;
13280 14320 break;
13281 14321 }
13282 14322 }
13283 14323
13284 14324 /* Disable RISC pause on FPM parity error. */
13285 14325 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13286 14326 }
13287 14327
13288 14328 if (rval == QL_SUCCESS) {
13289 14329 /* Pause RISC. */
13290 14330 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13291 14331 timer = 30000;
13292 14332 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13293 14333 if (timer-- != 0) {
13294 14334 drv_usecwait(MILLISEC);
13295 14335 } else {
13296 14336 rval = QL_FUNCTION_TIMEOUT;
13297 14337 break;
13298 14338 }
13299 14339 }
13300 14340 }
13301 14341
13302 14342 if (rval == QL_SUCCESS) {
13303 14343 /* Set memory configuration and timing. */
13304 14344 WRT16_IO_REG(ha, mctr, 0xf2);
13305 14345
13306 14346 /* Release RISC. */
13307 14347 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13308 14348
13309 14349 /* Get RISC SRAM. */
13310 14350 risc_address = 0x1000;
13311 14351 WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_WORD);
13312 14352 for (cnt = 0; cnt < 0xf000; cnt++) {
13313 14353 WRT16_IO_REG(ha, mailbox_in[1], risc_address++);
13314 14354 WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
13315 14355 for (timer = 6000000; timer != 0; timer--) {
13316 14356 /* Check for pending interrupts. */
13317 14357 if (INTERRUPT_PENDING(ha)) {
13318 14358 if (RD16_IO_REG(ha, semaphore) &
13319 14359 BIT_0) {
13320 14360 WRT16_IO_REG(ha, hccr,
13321 14361 HC_CLR_RISC_INT);
13322 14362 mcp->mb[0] = RD16_IO_REG(ha,
13323 14363 mailbox_out[0]);
13324 14364 fw->risc_ram[cnt] =
13325 14365 RD16_IO_REG(ha,
13326 14366 mailbox_out[2]);
13327 14367 WRT16_IO_REG(ha,
13328 14368 semaphore, 0);
13329 14369 break;
13330 14370 }
13331 14371 WRT16_IO_REG(ha, hccr,
13332 14372 HC_CLR_RISC_INT);
13333 14373 }
13334 14374 drv_usecwait(5);
13335 14375 }
13336 14376
13337 14377 if (timer == 0) {
13338 14378 rval = QL_FUNCTION_TIMEOUT;
|
↓ open down ↓ |
53 lines elided |
↑ open up ↑ |
13339 14379 } else {
13340 14380 rval = mcp->mb[0];
13341 14381 }
13342 14382
13343 14383 if (rval != QL_SUCCESS) {
13344 14384 break;
13345 14385 }
13346 14386 }
13347 14387 }
13348 14388
13349 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14389 + QL_PRINT_3(ha, "done\n");
13350 14390
13351 14391 return (rval);
13352 14392 }
13353 14393
13354 14394 /*
13355 14395 * ql_2300_binary_fw_dump
13356 14396 *
13357 14397 * Input:
13358 14398 * ha: adapter state pointer.
13359 14399 * fw: firmware dump context pointer.
13360 14400 *
13361 14401 * Returns:
13362 14402 * ql local function return status code.
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
13363 14403 *
13364 14404 * Context:
13365 14405 * Interrupt or Kernel context, no mailbox commands allowed.
13366 14406 */
13367 14407 static int
13368 14408 ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13369 14409 {
13370 14410 clock_t timer;
13371 14411 int rval = QL_SUCCESS;
13372 14412
13373 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14413 + QL_PRINT_3(ha, "started\n");
13374 14414
13375 14415 /* Disable ISP interrupts. */
13376 - WRT16_IO_REG(ha, ictrl, 0);
13377 - ADAPTER_STATE_LOCK(ha);
13378 - ha->flags &= ~INTERRUPTS_ENABLED;
13379 - ADAPTER_STATE_UNLOCK(ha);
14416 + ql_disable_intr(ha);
13380 14417
13381 14418 /* Release mailbox registers. */
13382 14419 WRT16_IO_REG(ha, semaphore, 0);
13383 14420
13384 14421 /* Pause RISC. */
13385 14422 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13386 14423 timer = 30000;
13387 14424 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13388 14425 if (timer-- != 0) {
13389 14426 drv_usecwait(MILLISEC);
13390 14427 } else {
13391 14428 rval = QL_FUNCTION_TIMEOUT;
13392 14429 break;
13393 14430 }
13394 14431 }
13395 14432
13396 14433 if (rval == QL_SUCCESS) {
13397 14434 (void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13398 14435 sizeof (fw->pbiu_reg) / 2, 16);
13399 14436
13400 14437 (void) ql_read_regs(ha, fw->risc_host_reg, ha->iobase + 0x10,
13401 14438 sizeof (fw->risc_host_reg) / 2, 16);
13402 14439
13403 14440 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x40,
13404 14441 sizeof (fw->mailbox_reg) / 2, 16);
13405 14442
13406 14443 WRT16_IO_REG(ha, ctrl_status, 0x40);
13407 14444 (void) ql_read_regs(ha, fw->resp_dma_reg, ha->iobase + 0x80,
13408 14445 sizeof (fw->resp_dma_reg) / 2, 16);
13409 14446
13410 14447 WRT16_IO_REG(ha, ctrl_status, 0x50);
13411 14448 (void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x80,
13412 14449 sizeof (fw->dma_reg) / 2, 16);
13413 14450
13414 14451 WRT16_IO_REG(ha, ctrl_status, 0);
13415 14452 (void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13416 14453 sizeof (fw->risc_hdw_reg) / 2, 16);
13417 14454
13418 14455 WRT16_IO_REG(ha, pcr, 0x2000);
13419 14456 (void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13420 14457 sizeof (fw->risc_gp0_reg) / 2, 16);
13421 14458
13422 14459 WRT16_IO_REG(ha, pcr, 0x2200);
13423 14460 (void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13424 14461 sizeof (fw->risc_gp1_reg) / 2, 16);
13425 14462
13426 14463 WRT16_IO_REG(ha, pcr, 0x2400);
13427 14464 (void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13428 14465 sizeof (fw->risc_gp2_reg) / 2, 16);
13429 14466
13430 14467 WRT16_IO_REG(ha, pcr, 0x2600);
13431 14468 (void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13432 14469 sizeof (fw->risc_gp3_reg) / 2, 16);
13433 14470
13434 14471 WRT16_IO_REG(ha, pcr, 0x2800);
13435 14472 (void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13436 14473 sizeof (fw->risc_gp4_reg) / 2, 16);
13437 14474
13438 14475 WRT16_IO_REG(ha, pcr, 0x2A00);
13439 14476 (void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13440 14477 sizeof (fw->risc_gp5_reg) / 2, 16);
13441 14478
13442 14479 WRT16_IO_REG(ha, pcr, 0x2C00);
13443 14480 (void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13444 14481 sizeof (fw->risc_gp6_reg) / 2, 16);
13445 14482
13446 14483 WRT16_IO_REG(ha, pcr, 0x2E00);
13447 14484 (void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13448 14485 sizeof (fw->risc_gp7_reg) / 2, 16);
13449 14486
13450 14487 WRT16_IO_REG(ha, ctrl_status, 0x10);
13451 14488 (void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13452 14489 ha->iobase + 0x80, sizeof (fw->frame_buf_hdw_reg) / 2, 16);
13453 14490
13454 14491 WRT16_IO_REG(ha, ctrl_status, 0x20);
13455 14492 (void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13456 14493 sizeof (fw->fpm_b0_reg) / 2, 16);
13457 14494
13458 14495 WRT16_IO_REG(ha, ctrl_status, 0x30);
13459 14496 (void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13460 14497 sizeof (fw->fpm_b1_reg) / 2, 16);
13461 14498
13462 14499 /* Select FPM registers. */
13463 14500 WRT16_IO_REG(ha, ctrl_status, 0x20);
13464 14501
13465 14502 /* FPM Soft Reset. */
13466 14503 WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13467 14504
13468 14505 /* Select frame buffer registers. */
13469 14506 WRT16_IO_REG(ha, ctrl_status, 0x10);
13470 14507
13471 14508 /* Reset frame buffer FIFOs. */
13472 14509 WRT16_IO_REG(ha, fb_cmd, 0xa000);
13473 14510
13474 14511 /* Select RISC module registers. */
13475 14512 WRT16_IO_REG(ha, ctrl_status, 0);
13476 14513
13477 14514 /* Reset RISC module. */
|
↓ open down ↓ |
88 lines elided |
↑ open up ↑ |
13478 14515 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13479 14516
13480 14517 /* Reset ISP semaphore. */
13481 14518 WRT16_IO_REG(ha, semaphore, 0);
13482 14519
13483 14520 /* Release RISC module. */
13484 14521 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13485 14522
13486 14523 /* Wait for RISC to recover from reset. */
13487 14524 timer = 30000;
13488 - while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_BUSY) {
14525 + while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_ROM_BUSY) {
13489 14526 if (timer-- != 0) {
13490 14527 drv_usecwait(MILLISEC);
13491 14528 } else {
13492 14529 rval = QL_FUNCTION_TIMEOUT;
13493 14530 break;
13494 14531 }
13495 14532 }
13496 14533
13497 14534 /* Disable RISC pause on FPM parity error. */
13498 14535 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13499 14536 }
13500 14537
13501 14538 /* Get RISC SRAM. */
13502 14539 if (rval == QL_SUCCESS) {
13503 14540 rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
13504 14541 }
13505 14542 /* Get STACK SRAM. */
13506 14543 if (rval == QL_SUCCESS) {
13507 14544 rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
13508 14545 }
13509 14546 /* Get DATA SRAM. */
13510 14547 if (rval == QL_SUCCESS) {
13511 14548 rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
13512 14549 }
13513 14550
13514 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14551 + QL_PRINT_3(ha, "done\n");
13515 14552
13516 14553 return (rval);
13517 14554 }
13518 14555
13519 14556 /*
13520 14557 * ql_24xx_binary_fw_dump
13521 14558 *
13522 14559 * Input:
13523 14560 * ha: adapter state pointer.
13524 14561 * fw: firmware dump context pointer.
13525 14562 *
13526 14563 * Returns:
13527 14564 * ql local function return status code.
13528 14565 *
13529 14566 * Context:
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
13530 14567 * Interrupt or Kernel context, no mailbox commands allowed.
13531 14568 */
13532 14569 static int
13533 14570 ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
13534 14571 {
13535 14572 uint32_t *reg32;
13536 14573 void *bp;
13537 14574 clock_t timer;
13538 14575 int rval = QL_SUCCESS;
13539 14576
13540 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14577 + QL_PRINT_3(ha, "started\n");
13541 14578
13542 14579 fw->hccr = RD32_IO_REG(ha, hccr);
13543 14580
13544 14581 /* Pause RISC. */
13545 14582 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
13546 14583 /* Disable ISP interrupts. */
13547 - WRT16_IO_REG(ha, ictrl, 0);
14584 + ql_disable_intr(ha);
13548 14585
13549 14586 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13550 14587 for (timer = 30000;
13551 14588 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
13552 14589 rval == QL_SUCCESS; timer--) {
13553 14590 if (timer) {
13554 14591 drv_usecwait(100);
13555 14592 } else {
13556 14593 rval = QL_FUNCTION_TIMEOUT;
13557 14594 }
13558 14595 }
13559 14596 }
13560 14597
13561 14598 if (rval == QL_SUCCESS) {
13562 14599 /* Host interface registers. */
13563 14600 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13564 14601 sizeof (fw->host_reg) / 4, 32);
13565 14602
13566 14603 /* Disable ISP interrupts. */
13567 - WRT32_IO_REG(ha, ictrl, 0);
13568 - RD32_IO_REG(ha, ictrl);
13569 - ADAPTER_STATE_LOCK(ha);
13570 - ha->flags &= ~INTERRUPTS_ENABLED;
13571 - ADAPTER_STATE_UNLOCK(ha);
14604 + ql_disable_intr(ha);
13572 14605
13573 14606 /* Shadow registers. */
13574 14607
13575 14608 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13576 14609 RD32_IO_REG(ha, io_base_addr);
13577 14610
13578 14611 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13579 14612 WRT_REG_DWORD(ha, reg32, 0xB0000000);
13580 14613 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13581 14614 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13582 14615
13583 14616 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13584 14617 WRT_REG_DWORD(ha, reg32, 0xB0100000);
13585 14618 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13586 14619 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13587 14620
13588 14621 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13589 14622 WRT_REG_DWORD(ha, reg32, 0xB0200000);
13590 14623 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13591 14624 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13592 14625
13593 14626 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13594 14627 WRT_REG_DWORD(ha, reg32, 0xB0300000);
13595 14628 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13596 14629 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13597 14630
13598 14631 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13599 14632 WRT_REG_DWORD(ha, reg32, 0xB0400000);
13600 14633 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13601 14634 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13602 14635
13603 14636 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13604 14637 WRT_REG_DWORD(ha, reg32, 0xB0500000);
13605 14638 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13606 14639 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13607 14640
13608 14641 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13609 14642 WRT_REG_DWORD(ha, reg32, 0xB0600000);
13610 14643 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13611 14644 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13612 14645
13613 14646 /* Mailbox registers. */
13614 14647 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13615 14648 sizeof (fw->mailbox_reg) / 2, 16);
13616 14649
13617 14650 /* Transfer sequence registers. */
13618 14651
13619 14652 /* XSEQ GP */
13620 14653 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13621 14654 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13622 14655 16, 32);
13623 14656 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13624 14657 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13625 14658 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13626 14659 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13627 14660 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13628 14661 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13629 14662 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13630 14663 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13631 14664 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13632 14665 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13633 14666 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13634 14667 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13635 14668 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13636 14669 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13637 14670
13638 14671 /* XSEQ-0 */
13639 14672 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13640 14673 (void) ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13641 14674 sizeof (fw->xseq_0_reg) / 4, 32);
13642 14675
13643 14676 /* XSEQ-1 */
13644 14677 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13645 14678 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13646 14679 sizeof (fw->xseq_1_reg) / 4, 32);
13647 14680
13648 14681 /* Receive sequence registers. */
13649 14682
13650 14683 /* RSEQ GP */
13651 14684 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13652 14685 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13653 14686 16, 32);
13654 14687 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13655 14688 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13656 14689 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13657 14690 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13658 14691 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13659 14692 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13660 14693 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13661 14694 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13662 14695 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13663 14696 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13664 14697 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13665 14698 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13666 14699 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13667 14700 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13668 14701
13669 14702 /* RSEQ-0 */
13670 14703 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13671 14704 (void) ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13672 14705 sizeof (fw->rseq_0_reg) / 4, 32);
13673 14706
13674 14707 /* RSEQ-1 */
13675 14708 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13676 14709 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13677 14710 sizeof (fw->rseq_1_reg) / 4, 32);
13678 14711
13679 14712 /* RSEQ-2 */
13680 14713 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13681 14714 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13682 14715 sizeof (fw->rseq_2_reg) / 4, 32);
13683 14716
13684 14717 /* Command DMA registers. */
13685 14718
13686 14719 WRT32_IO_REG(ha, io_base_addr, 0x7100);
13687 14720 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13688 14721 sizeof (fw->cmd_dma_reg) / 4, 32);
13689 14722
13690 14723 /* Queues. */
13691 14724
13692 14725 /* RequestQ0 */
13693 14726 WRT32_IO_REG(ha, io_base_addr, 0x7200);
13694 14727 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13695 14728 8, 32);
13696 14729 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13697 14730
13698 14731 /* ResponseQ0 */
13699 14732 WRT32_IO_REG(ha, io_base_addr, 0x7300);
13700 14733 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13701 14734 8, 32);
13702 14735 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13703 14736
13704 14737 /* RequestQ1 */
13705 14738 WRT32_IO_REG(ha, io_base_addr, 0x7400);
13706 14739 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13707 14740 8, 32);
13708 14741 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13709 14742
13710 14743 /* Transmit DMA registers. */
13711 14744
13712 14745 /* XMT0 */
13713 14746 WRT32_IO_REG(ha, io_base_addr, 0x7600);
13714 14747 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13715 14748 16, 32);
13716 14749 WRT32_IO_REG(ha, io_base_addr, 0x7610);
13717 14750 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13718 14751
13719 14752 /* XMT1 */
13720 14753 WRT32_IO_REG(ha, io_base_addr, 0x7620);
13721 14754 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13722 14755 16, 32);
13723 14756 WRT32_IO_REG(ha, io_base_addr, 0x7630);
13724 14757 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13725 14758
13726 14759 /* XMT2 */
13727 14760 WRT32_IO_REG(ha, io_base_addr, 0x7640);
13728 14761 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13729 14762 16, 32);
13730 14763 WRT32_IO_REG(ha, io_base_addr, 0x7650);
13731 14764 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13732 14765
13733 14766 /* XMT3 */
13734 14767 WRT32_IO_REG(ha, io_base_addr, 0x7660);
13735 14768 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13736 14769 16, 32);
13737 14770 WRT32_IO_REG(ha, io_base_addr, 0x7670);
13738 14771 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13739 14772
13740 14773 /* XMT4 */
13741 14774 WRT32_IO_REG(ha, io_base_addr, 0x7680);
13742 14775 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13743 14776 16, 32);
13744 14777 WRT32_IO_REG(ha, io_base_addr, 0x7690);
13745 14778 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13746 14779
13747 14780 /* XMT Common */
13748 14781 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
13749 14782 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
13750 14783 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13751 14784
13752 14785 /* Receive DMA registers. */
13753 14786
13754 14787 /* RCVThread0 */
13755 14788 WRT32_IO_REG(ha, io_base_addr, 0x7700);
13756 14789 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
13757 14790 ha->iobase + 0xC0, 16, 32);
13758 14791 WRT32_IO_REG(ha, io_base_addr, 0x7710);
13759 14792 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13760 14793
13761 14794 /* RCVThread1 */
13762 14795 WRT32_IO_REG(ha, io_base_addr, 0x7720);
13763 14796 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
13764 14797 ha->iobase + 0xC0, 16, 32);
13765 14798 WRT32_IO_REG(ha, io_base_addr, 0x7730);
13766 14799 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13767 14800
13768 14801 /* RISC registers. */
13769 14802
13770 14803 /* RISC GP */
13771 14804 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
13772 14805 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
13773 14806 16, 32);
13774 14807 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
13775 14808 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13776 14809 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
13777 14810 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13778 14811 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
13779 14812 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13780 14813 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
13781 14814 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13782 14815 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
13783 14816 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13784 14817 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
13785 14818 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13786 14819 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13787 14820 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13788 14821
13789 14822 /* Local memory controller registers. */
13790 14823
13791 14824 /* LMC */
13792 14825 WRT32_IO_REG(ha, io_base_addr, 0x3000);
13793 14826 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
13794 14827 16, 32);
13795 14828 WRT32_IO_REG(ha, io_base_addr, 0x3010);
13796 14829 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13797 14830 WRT32_IO_REG(ha, io_base_addr, 0x3020);
13798 14831 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13799 14832 WRT32_IO_REG(ha, io_base_addr, 0x3030);
13800 14833 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13801 14834 WRT32_IO_REG(ha, io_base_addr, 0x3040);
13802 14835 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13803 14836 WRT32_IO_REG(ha, io_base_addr, 0x3050);
13804 14837 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13805 14838 WRT32_IO_REG(ha, io_base_addr, 0x3060);
13806 14839 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13807 14840
13808 14841 /* Fibre Protocol Module registers. */
13809 14842
13810 14843 /* FPM hardware */
13811 14844 WRT32_IO_REG(ha, io_base_addr, 0x4000);
13812 14845 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
13813 14846 16, 32);
13814 14847 WRT32_IO_REG(ha, io_base_addr, 0x4010);
13815 14848 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13816 14849 WRT32_IO_REG(ha, io_base_addr, 0x4020);
13817 14850 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13818 14851 WRT32_IO_REG(ha, io_base_addr, 0x4030);
13819 14852 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13820 14853 WRT32_IO_REG(ha, io_base_addr, 0x4040);
13821 14854 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13822 14855 WRT32_IO_REG(ha, io_base_addr, 0x4050);
13823 14856 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13824 14857 WRT32_IO_REG(ha, io_base_addr, 0x4060);
13825 14858 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13826 14859 WRT32_IO_REG(ha, io_base_addr, 0x4070);
13827 14860 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13828 14861 WRT32_IO_REG(ha, io_base_addr, 0x4080);
13829 14862 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13830 14863 WRT32_IO_REG(ha, io_base_addr, 0x4090);
13831 14864 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13832 14865 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
13833 14866 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13834 14867 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
13835 14868 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13836 14869
13837 14870 /* Frame Buffer registers. */
13838 14871
13839 14872 /* FB hardware */
13840 14873 WRT32_IO_REG(ha, io_base_addr, 0x6000);
13841 14874 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
13842 14875 16, 32);
13843 14876 WRT32_IO_REG(ha, io_base_addr, 0x6010);
13844 14877 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13845 14878 WRT32_IO_REG(ha, io_base_addr, 0x6020);
13846 14879 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13847 14880 WRT32_IO_REG(ha, io_base_addr, 0x6030);
13848 14881 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13849 14882 WRT32_IO_REG(ha, io_base_addr, 0x6040);
13850 14883 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13851 14884 WRT32_IO_REG(ha, io_base_addr, 0x6100);
13852 14885 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13853 14886 WRT32_IO_REG(ha, io_base_addr, 0x6130);
13854 14887 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13855 14888 WRT32_IO_REG(ha, io_base_addr, 0x6150);
13856 14889 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13857 14890 WRT32_IO_REG(ha, io_base_addr, 0x6170);
|
↓ open down ↓ |
276 lines elided |
↑ open up ↑ |
13858 14891 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13859 14892 WRT32_IO_REG(ha, io_base_addr, 0x6190);
13860 14893 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13861 14894 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
13862 14895 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13863 14896 }
13864 14897
13865 14898 /* Get the request queue */
13866 14899 if (rval == QL_SUCCESS) {
13867 14900 uint32_t cnt;
13868 - uint32_t *w32 = (uint32_t *)ha->request_ring_bp;
14901 + uint32_t *w32 = (uint32_t *)ha->req_q[0]->req_ring.bp;
13869 14902
13870 14903 /* Sync DMA buffer. */
13871 - (void) ddi_dma_sync(ha->hba_buf.dma_handle,
13872 - REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
13873 - DDI_DMA_SYNC_FORKERNEL);
14904 + (void) ddi_dma_sync(ha->req_q[0]->req_ring.dma_handle,
14905 + 0, sizeof (fw->req_q), DDI_DMA_SYNC_FORKERNEL);
13874 14906
13875 14907 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
13876 14908 fw->req_q[cnt] = *w32++;
13877 14909 LITTLE_ENDIAN_32(&fw->req_q[cnt]);
13878 14910 }
13879 14911 }
13880 14912
13881 14913 /* Get the response queue */
13882 14914 if (rval == QL_SUCCESS) {
13883 14915 uint32_t cnt;
13884 - uint32_t *w32 = (uint32_t *)ha->response_ring_bp;
14916 + uint32_t *w32 =
14917 + (uint32_t *)ha->rsp_queues[0]->rsp_ring.bp;
13885 14918
13886 14919 /* Sync DMA buffer. */
13887 - (void) ddi_dma_sync(ha->hba_buf.dma_handle,
13888 - RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
13889 - DDI_DMA_SYNC_FORKERNEL);
14920 + (void) ddi_dma_sync(ha->rsp_queues[0]->rsp_ring.dma_handle,
14921 + 0, sizeof (fw->rsp_q), DDI_DMA_SYNC_FORKERNEL);
13890 14922
13891 14923 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
13892 14924 fw->rsp_q[cnt] = *w32++;
13893 14925 LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
13894 14926 }
13895 14927 }
13896 14928
13897 14929 /* Reset RISC. */
13898 14930 ql_reset_chip(ha);
13899 14931
13900 14932 /* Memory. */
13901 14933 if (rval == QL_SUCCESS) {
13902 14934 /* Code RAM. */
13903 14935 rval = ql_read_risc_ram(ha, 0x20000,
13904 14936 sizeof (fw->code_ram) / 4, fw->code_ram);
13905 14937 }
13906 14938 if (rval == QL_SUCCESS) {
13907 14939 /* External Memory. */
13908 14940 rval = ql_read_risc_ram(ha, 0x100000,
13909 14941 ha->fw_ext_memory_size / 4, fw->ext_mem);
13910 14942 }
13911 14943
13912 14944 /* Get the extended trace buffer */
13913 14945 if (rval == QL_SUCCESS) {
13914 14946 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13915 14947 (ha->fwexttracebuf.bp != NULL)) {
13916 14948 uint32_t cnt;
13917 14949 uint32_t *w32 = ha->fwexttracebuf.bp;
13918 14950
13919 14951 /* Sync DMA buffer. */
13920 14952 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
13921 14953 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
13922 14954
13923 14955 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13924 14956 fw->ext_trace_buf[cnt] = *w32++;
13925 14957 }
13926 14958 }
13927 14959 }
13928 14960
13929 14961 /* Get the FC event trace buffer */
13930 14962 if (rval == QL_SUCCESS) {
13931 14963 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13932 14964 (ha->fwfcetracebuf.bp != NULL)) {
13933 14965 uint32_t cnt;
13934 14966 uint32_t *w32 = ha->fwfcetracebuf.bp;
13935 14967
13936 14968 /* Sync DMA buffer. */
13937 14969 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
13938 14970 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
13939 14971
|
↓ open down ↓ |
40 lines elided |
↑ open up ↑ |
13940 14972 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13941 14973 fw->fce_trace_buf[cnt] = *w32++;
13942 14974 }
13943 14975 }
13944 14976 }
13945 14977
13946 14978 if (rval != QL_SUCCESS) {
13947 14979 EL(ha, "failed=%xh\n", rval);
13948 14980 } else {
13949 14981 /*EMPTY*/
13950 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14982 + QL_PRINT_3(ha, "done\n");
13951 14983 }
13952 14984
13953 14985 return (rval);
13954 14986 }
13955 14987
13956 14988 /*
13957 14989 * ql_25xx_binary_fw_dump
13958 14990 *
13959 14991 * Input:
13960 14992 * ha: adapter state pointer.
13961 14993 * fw: firmware dump context pointer.
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
13962 14994 *
13963 14995 * Returns:
13964 14996 * ql local function return status code.
13965 14997 *
13966 14998 * Context:
13967 14999 * Interrupt or Kernel context, no mailbox commands allowed.
13968 15000 */
13969 15001 static int
13970 15002 ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
13971 15003 {
13972 - uint32_t *reg32;
15004 + uint32_t *reg32, cnt, *w32ptr, index, *dp;
13973 15005 void *bp;
13974 15006 clock_t timer;
13975 15007 int rval = QL_SUCCESS;
13976 15008
13977 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15009 + QL_PRINT_3(ha, "started\n");
13978 15010
15011 + fw->req_q_size[0] = ha->req_q[0]->req_ring.size;
15012 + if (ha->req_q[1] != NULL) {
15013 + fw->req_q_size[1] = ha->req_q[1]->req_ring.size;
15014 + }
15015 + fw->rsp_q_size = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
15016 +
15017 + fw->hccr = RD32_IO_REG(ha, hccr);
13979 15018 fw->r2h_status = RD32_IO_REG(ha, risc2host);
15019 + fw->aer_ues = ql_pci_config_get32(ha, 0x104);
13980 15020
13981 15021 /* Pause RISC. */
13982 15022 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
13983 15023 /* Disable ISP interrupts. */
13984 - WRT16_IO_REG(ha, ictrl, 0);
15024 + ql_disable_intr(ha);
13985 15025
13986 15026 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13987 15027 for (timer = 30000;
13988 15028 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
13989 15029 rval == QL_SUCCESS; timer--) {
13990 15030 if (timer) {
13991 15031 drv_usecwait(100);
13992 15032 if (timer % 10000 == 0) {
13993 15033 EL(ha, "risc pause %d\n", timer);
13994 15034 }
13995 15035 } else {
13996 15036 EL(ha, "risc pause timeout\n");
13997 15037 rval = QL_FUNCTION_TIMEOUT;
13998 15038 }
13999 15039 }
14000 15040 }
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
14001 15041
14002 15042 if (rval == QL_SUCCESS) {
14003 15043
14004 15044 /* Host Interface registers */
14005 15045
14006 15046 /* HostRisc registers. */
14007 15047 WRT32_IO_REG(ha, io_base_addr, 0x7000);
14008 15048 bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
14009 15049 16, 32);
14010 15050 WRT32_IO_REG(ha, io_base_addr, 0x7010);
14011 - bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15051 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14012 15052
14013 15053 /* PCIe registers. */
14014 15054 WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14015 15055 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14016 15056 bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14017 15057 3, 32);
14018 15058 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14019 15059 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14020 15060
14021 15061 /* Host interface registers. */
14022 15062 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14023 15063 sizeof (fw->host_reg) / 4, 32);
14024 15064
14025 15065 /* Disable ISP interrupts. */
15066 + ql_disable_intr(ha);
14026 15067
14027 - WRT32_IO_REG(ha, ictrl, 0);
14028 - RD32_IO_REG(ha, ictrl);
14029 - ADAPTER_STATE_LOCK(ha);
14030 - ha->flags &= ~INTERRUPTS_ENABLED;
14031 - ADAPTER_STATE_UNLOCK(ha);
14032 -
14033 15068 /* Shadow registers. */
14034 15069
14035 15070 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14036 15071 RD32_IO_REG(ha, io_base_addr);
14037 15072
14038 15073 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14039 15074 WRT_REG_DWORD(ha, reg32, 0xB0000000);
14040 15075 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14041 15076 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14042 15077
14043 15078 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14044 15079 WRT_REG_DWORD(ha, reg32, 0xB0100000);
14045 15080 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14046 15081 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14047 15082
14048 15083 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14049 15084 WRT_REG_DWORD(ha, reg32, 0xB0200000);
14050 15085 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14051 15086 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14052 15087
14053 15088 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14054 15089 WRT_REG_DWORD(ha, reg32, 0xB0300000);
14055 15090 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14056 15091 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14057 15092
14058 15093 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14059 15094 WRT_REG_DWORD(ha, reg32, 0xB0400000);
14060 15095 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14061 15096 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14062 15097
14063 15098 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14064 15099 WRT_REG_DWORD(ha, reg32, 0xB0500000);
14065 15100 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14066 15101 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14067 15102
14068 15103 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14069 15104 WRT_REG_DWORD(ha, reg32, 0xB0600000);
14070 15105 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14071 15106 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14072 15107
14073 15108 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14074 15109 WRT_REG_DWORD(ha, reg32, 0xB0700000);
14075 15110 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14076 15111 fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
14077 15112
14078 15113 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14079 15114 WRT_REG_DWORD(ha, reg32, 0xB0800000);
14080 15115 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14081 15116 fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
14082 15117
14083 15118 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14084 15119 WRT_REG_DWORD(ha, reg32, 0xB0900000);
14085 15120 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14086 15121 fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
14087 15122
14088 15123 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14089 15124 WRT_REG_DWORD(ha, reg32, 0xB0A00000);
14090 15125 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14091 15126 fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
14092 15127
14093 15128 /* RISC I/O register. */
14094 15129
14095 15130 WRT32_IO_REG(ha, io_base_addr, 0x0010);
14096 15131 (void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
14097 15132 1, 32);
14098 15133
14099 15134 /* Mailbox registers. */
14100 15135
14101 15136 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14102 15137 sizeof (fw->mailbox_reg) / 2, 16);
14103 15138
14104 15139 /* Transfer sequence registers. */
14105 15140
14106 15141 /* XSEQ GP */
14107 15142 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14108 15143 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14109 15144 16, 32);
14110 15145 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14111 15146 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14112 15147 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14113 15148 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14114 15149 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14115 15150 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14116 15151 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14117 15152 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14118 15153 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14119 15154 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14120 15155 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14121 15156 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14122 15157 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14123 15158 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14124 15159
14125 15160 /* XSEQ-0 */
14126 15161 WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
14127 15162 bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14128 15163 16, 32);
14129 15164 WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
14130 15165 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14131 15166 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14132 15167 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14133 15168
14134 15169 /* XSEQ-1 */
14135 15170 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14136 15171 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14137 15172 16, 32);
14138 15173
14139 15174 /* Receive sequence registers. */
14140 15175
14141 15176 /* RSEQ GP */
14142 15177 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14143 15178 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14144 15179 16, 32);
14145 15180 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14146 15181 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14147 15182 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14148 15183 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14149 15184 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14150 15185 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14151 15186 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14152 15187 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14153 15188 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14154 15189 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14155 15190 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14156 15191 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14157 15192 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14158 15193 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14159 15194
14160 15195 /* RSEQ-0 */
14161 15196 WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
14162 15197 bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14163 15198 16, 32);
14164 15199 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14165 15200 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14166 15201
14167 15202 /* RSEQ-1 */
14168 15203 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14169 15204 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14170 15205 sizeof (fw->rseq_1_reg) / 4, 32);
14171 15206
14172 15207 /* RSEQ-2 */
14173 15208 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14174 15209 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14175 15210 sizeof (fw->rseq_2_reg) / 4, 32);
14176 15211
14177 15212 /* Auxiliary sequencer registers. */
14178 15213
14179 15214 /* ASEQ GP */
14180 15215 WRT32_IO_REG(ha, io_base_addr, 0xB000);
14181 15216 bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
14182 15217 16, 32);
14183 15218 WRT32_IO_REG(ha, io_base_addr, 0xB010);
14184 15219 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14185 15220 WRT32_IO_REG(ha, io_base_addr, 0xB020);
14186 15221 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14187 15222 WRT32_IO_REG(ha, io_base_addr, 0xB030);
14188 15223 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14189 15224 WRT32_IO_REG(ha, io_base_addr, 0xB040);
14190 15225 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14191 15226 WRT32_IO_REG(ha, io_base_addr, 0xB050);
14192 15227 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14193 15228 WRT32_IO_REG(ha, io_base_addr, 0xB060);
14194 15229 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14195 15230 WRT32_IO_REG(ha, io_base_addr, 0xB070);
14196 15231 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14197 15232
14198 15233 /* ASEQ-0 */
14199 15234 WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
14200 15235 bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
14201 15236 16, 32);
14202 15237 WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
14203 15238 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14204 15239
14205 15240 /* ASEQ-1 */
14206 15241 WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
14207 15242 (void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
14208 15243 16, 32);
14209 15244
14210 15245 /* ASEQ-2 */
14211 15246 WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
14212 15247 (void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
14213 15248 16, 32);
14214 15249
14215 15250 /* Command DMA registers. */
14216 15251
14217 15252 WRT32_IO_REG(ha, io_base_addr, 0x7100);
14218 15253 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14219 15254 sizeof (fw->cmd_dma_reg) / 4, 32);
14220 15255
14221 15256 /* Queues. */
14222 15257
14223 15258 /* RequestQ0 */
14224 15259 WRT32_IO_REG(ha, io_base_addr, 0x7200);
14225 15260 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14226 15261 8, 32);
14227 15262 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14228 15263
14229 15264 /* ResponseQ0 */
14230 15265 WRT32_IO_REG(ha, io_base_addr, 0x7300);
14231 15266 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14232 15267 8, 32);
14233 15268 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14234 15269
14235 15270 /* RequestQ1 */
14236 15271 WRT32_IO_REG(ha, io_base_addr, 0x7400);
14237 15272 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14238 15273 8, 32);
14239 15274 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14240 15275
14241 15276 /* Transmit DMA registers. */
14242 15277
14243 15278 /* XMT0 */
14244 15279 WRT32_IO_REG(ha, io_base_addr, 0x7600);
14245 15280 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14246 15281 16, 32);
14247 15282 WRT32_IO_REG(ha, io_base_addr, 0x7610);
14248 15283 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14249 15284
14250 15285 /* XMT1 */
14251 15286 WRT32_IO_REG(ha, io_base_addr, 0x7620);
14252 15287 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14253 15288 16, 32);
14254 15289 WRT32_IO_REG(ha, io_base_addr, 0x7630);
14255 15290 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14256 15291
14257 15292 /* XMT2 */
14258 15293 WRT32_IO_REG(ha, io_base_addr, 0x7640);
14259 15294 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14260 15295 16, 32);
14261 15296 WRT32_IO_REG(ha, io_base_addr, 0x7650);
14262 15297 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14263 15298
14264 15299 /* XMT3 */
14265 15300 WRT32_IO_REG(ha, io_base_addr, 0x7660);
14266 15301 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14267 15302 16, 32);
14268 15303 WRT32_IO_REG(ha, io_base_addr, 0x7670);
14269 15304 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14270 15305
14271 15306 /* XMT4 */
14272 15307 WRT32_IO_REG(ha, io_base_addr, 0x7680);
14273 15308 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14274 15309 16, 32);
14275 15310 WRT32_IO_REG(ha, io_base_addr, 0x7690);
14276 15311 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14277 15312
14278 15313 /* XMT Common */
14279 15314 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14280 15315 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14281 15316 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14282 15317
14283 15318 /* Receive DMA registers. */
14284 15319
14285 15320 /* RCVThread0 */
14286 15321 WRT32_IO_REG(ha, io_base_addr, 0x7700);
14287 15322 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14288 15323 ha->iobase + 0xC0, 16, 32);
14289 15324 WRT32_IO_REG(ha, io_base_addr, 0x7710);
14290 15325 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14291 15326
14292 15327 /* RCVThread1 */
14293 15328 WRT32_IO_REG(ha, io_base_addr, 0x7720);
14294 15329 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14295 15330 ha->iobase + 0xC0, 16, 32);
14296 15331 WRT32_IO_REG(ha, io_base_addr, 0x7730);
14297 15332 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14298 15333
14299 15334 /* RISC registers. */
14300 15335
14301 15336 /* RISC GP */
14302 15337 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14303 15338 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14304 15339 16, 32);
14305 15340 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14306 15341 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14307 15342 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14308 15343 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14309 15344 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14310 15345 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14311 15346 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14312 15347 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14313 15348 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14314 15349 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14315 15350 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14316 15351 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14317 15352 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14318 15353 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14319 15354
14320 15355 /* Local memory controller (LMC) registers. */
14321 15356
14322 15357 /* LMC */
14323 15358 WRT32_IO_REG(ha, io_base_addr, 0x3000);
14324 15359 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14325 15360 16, 32);
14326 15361 WRT32_IO_REG(ha, io_base_addr, 0x3010);
14327 15362 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14328 15363 WRT32_IO_REG(ha, io_base_addr, 0x3020);
14329 15364 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14330 15365 WRT32_IO_REG(ha, io_base_addr, 0x3030);
14331 15366 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14332 15367 WRT32_IO_REG(ha, io_base_addr, 0x3040);
14333 15368 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14334 15369 WRT32_IO_REG(ha, io_base_addr, 0x3050);
14335 15370 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14336 15371 WRT32_IO_REG(ha, io_base_addr, 0x3060);
14337 15372 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14338 15373 WRT32_IO_REG(ha, io_base_addr, 0x3070);
14339 15374 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14340 15375
14341 15376 /* Fibre Protocol Module registers. */
14342 15377
14343 15378 /* FPM hardware */
14344 15379 WRT32_IO_REG(ha, io_base_addr, 0x4000);
14345 15380 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14346 15381 16, 32);
14347 15382 WRT32_IO_REG(ha, io_base_addr, 0x4010);
14348 15383 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14349 15384 WRT32_IO_REG(ha, io_base_addr, 0x4020);
14350 15385 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14351 15386 WRT32_IO_REG(ha, io_base_addr, 0x4030);
14352 15387 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14353 15388 WRT32_IO_REG(ha, io_base_addr, 0x4040);
14354 15389 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14355 15390 WRT32_IO_REG(ha, io_base_addr, 0x4050);
14356 15391 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14357 15392 WRT32_IO_REG(ha, io_base_addr, 0x4060);
14358 15393 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14359 15394 WRT32_IO_REG(ha, io_base_addr, 0x4070);
14360 15395 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14361 15396 WRT32_IO_REG(ha, io_base_addr, 0x4080);
14362 15397 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14363 15398 WRT32_IO_REG(ha, io_base_addr, 0x4090);
14364 15399 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14365 15400 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14366 15401 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14367 15402 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14368 15403 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14369 15404
14370 15405 /* Frame Buffer registers. */
14371 15406
14372 15407 /* FB hardware */
14373 15408 WRT32_IO_REG(ha, io_base_addr, 0x6000);
14374 15409 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14375 15410 16, 32);
14376 15411 WRT32_IO_REG(ha, io_base_addr, 0x6010);
14377 15412 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14378 15413 WRT32_IO_REG(ha, io_base_addr, 0x6020);
14379 15414 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14380 15415 WRT32_IO_REG(ha, io_base_addr, 0x6030);
14381 15416 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14382 15417 WRT32_IO_REG(ha, io_base_addr, 0x6040);
14383 15418 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14384 15419 WRT32_IO_REG(ha, io_base_addr, 0x6100);
14385 15420 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14386 15421 WRT32_IO_REG(ha, io_base_addr, 0x6130);
14387 15422 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14388 15423 WRT32_IO_REG(ha, io_base_addr, 0x6150);
14389 15424 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
|
↓ open down ↓ |
347 lines elided |
↑ open up ↑ |
14390 15425 WRT32_IO_REG(ha, io_base_addr, 0x6170);
14391 15426 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14392 15427 WRT32_IO_REG(ha, io_base_addr, 0x6190);
14393 15428 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14394 15429 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14395 15430 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14396 15431 WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14397 15432 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14398 15433 }
14399 15434
14400 - /* Get the request queue */
14401 15435 if (rval == QL_SUCCESS) {
14402 - uint32_t cnt;
14403 - uint32_t *w32 = (uint32_t *)ha->request_ring_bp;
14404 -
14405 - /* Sync DMA buffer. */
14406 - (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14407 - REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14408 - DDI_DMA_SYNC_FORKERNEL);
14409 -
14410 - for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14411 - fw->req_q[cnt] = *w32++;
14412 - LITTLE_ENDIAN_32(&fw->req_q[cnt]);
15436 + /* Get the Queue Pointers */
15437 + dp = fw->req_rsp_ext_mem;
15438 + for (index = 0; index < ha->rsp_queues_cnt; index++) {
15439 + if (index == 0 && ha->flags & MULTI_QUEUE) {
15440 + *dp = RD32_MBAR_REG(ha,
15441 + ha->req_q[0]->mbar_req_in);
15442 + LITTLE_ENDIAN_32(dp);
15443 + dp++;
15444 + *dp = RD32_MBAR_REG(ha,
15445 + ha->req_q[0]->mbar_req_out);
15446 + LITTLE_ENDIAN_32(dp);
15447 + dp++;
15448 + } else if (index == 1 && ha->flags & MULTI_QUEUE) {
15449 + *dp = RD32_MBAR_REG(ha,
15450 + ha->req_q[1]->mbar_req_in);
15451 + LITTLE_ENDIAN_32(dp);
15452 + dp++;
15453 + *dp = RD32_MBAR_REG(ha,
15454 + ha->req_q[1]->mbar_req_out);
15455 + LITTLE_ENDIAN_32(dp);
15456 + dp++;
15457 + } else {
15458 + *dp++ = 0;
15459 + *dp++ = 0;
15460 + }
15461 + if (ha->flags & MULTI_QUEUE) {
15462 + *dp = RD32_MBAR_REG(ha,
15463 + ha->rsp_queues[index]->mbar_rsp_in);
15464 + LITTLE_ENDIAN_32(dp);
15465 + dp++;
15466 + *dp = RD32_MBAR_REG(ha,
15467 + ha->rsp_queues[index]->mbar_rsp_out);
15468 + LITTLE_ENDIAN_32(dp);
15469 + dp++;
15470 + } else {
15471 + *dp++ = 0;
15472 + *dp++ = 0;
15473 + }
14413 15474 }
14414 - }
15475 + /* Get the request queue */
15476 + (void) ddi_dma_sync(ha->req_q[0]->req_ring.dma_handle, 0, 0,
15477 + DDI_DMA_SYNC_FORCPU);
15478 + w32ptr = (uint32_t *)ha->req_q[0]->req_ring.bp;
15479 + for (cnt = 0; cnt < fw->req_q_size[0] / 4; cnt++) {
15480 + *dp = *w32ptr++;
15481 + LITTLE_ENDIAN_32(dp);
15482 + dp++;
15483 + }
15484 + if (ha->req_q[1] != NULL) {
15485 + (void) ddi_dma_sync(ha->req_q[1]->req_ring.dma_handle,
15486 + 0, 0, DDI_DMA_SYNC_FORCPU);
15487 + w32ptr = (uint32_t *)ha->req_q[1]->req_ring.bp;
15488 + for (cnt = 0; cnt < fw->req_q_size[1] / 4; cnt++) {
15489 + *dp = *w32ptr++;
15490 + LITTLE_ENDIAN_32(dp);
15491 + dp++;
15492 + }
15493 + }
14415 15494
14416 - /* Get the respons queue */
14417 - if (rval == QL_SUCCESS) {
14418 - uint32_t cnt;
14419 - uint32_t *w32 = (uint32_t *)ha->response_ring_bp;
14420 -
14421 - /* Sync DMA buffer. */
14422 - (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14423 - RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14424 - DDI_DMA_SYNC_FORKERNEL);
14425 -
14426 - for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14427 - fw->rsp_q[cnt] = *w32++;
14428 - LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
15495 + /* Get the response queues */
15496 + for (index = 0; index < ha->rsp_queues_cnt; index++) {
15497 + (void) ddi_dma_sync(
15498 + ha->rsp_queues[index]->rsp_ring.dma_handle,
15499 + 0, 0, DDI_DMA_SYNC_FORCPU);
15500 + w32ptr = (uint32_t *)
15501 + ha->rsp_queues[index]->rsp_ring.bp;
15502 + for (cnt = 0;
15503 + cnt < ha->rsp_queues[index]->rsp_ring.size / 4;
15504 + cnt++) {
15505 + *dp = *w32ptr++;
15506 + LITTLE_ENDIAN_32(dp);
15507 + dp++;
15508 + }
14429 15509 }
14430 15510 }
14431 15511
14432 15512 /* Reset RISC. */
14433 -
14434 15513 ql_reset_chip(ha);
14435 15514
14436 15515 /* Memory. */
14437 -
14438 15516 if (rval == QL_SUCCESS) {
14439 15517 /* Code RAM. */
14440 15518 rval = ql_read_risc_ram(ha, 0x20000,
14441 15519 sizeof (fw->code_ram) / 4, fw->code_ram);
14442 15520 }
14443 15521 if (rval == QL_SUCCESS) {
14444 15522 /* External Memory. */
14445 15523 rval = ql_read_risc_ram(ha, 0x100000,
14446 - ha->fw_ext_memory_size / 4, fw->ext_mem);
15524 + ha->fw_ext_memory_size / 4, dp);
14447 15525 }
14448 15526
14449 15527 /* Get the FC event trace buffer */
14450 15528 if (rval == QL_SUCCESS) {
14451 15529 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14452 15530 (ha->fwfcetracebuf.bp != NULL)) {
14453 15531 uint32_t cnt;
14454 15532 uint32_t *w32 = ha->fwfcetracebuf.bp;
14455 15533
14456 15534 /* Sync DMA buffer. */
14457 15535 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14458 15536 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14459 15537
14460 15538 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14461 15539 fw->fce_trace_buf[cnt] = *w32++;
14462 15540 }
14463 15541 }
14464 15542 }
14465 15543
14466 15544 /* Get the extended trace buffer */
14467 15545 if (rval == QL_SUCCESS) {
14468 15546 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14469 15547 (ha->fwexttracebuf.bp != NULL)) {
14470 15548 uint32_t cnt;
14471 15549 uint32_t *w32 = ha->fwexttracebuf.bp;
14472 15550
14473 15551 /* Sync DMA buffer. */
14474 15552 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14475 15553 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14476 15554
|
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
14477 15555 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14478 15556 fw->ext_trace_buf[cnt] = *w32++;
14479 15557 }
14480 15558 }
14481 15559 }
14482 15560
14483 15561 if (rval != QL_SUCCESS) {
14484 15562 EL(ha, "failed=%xh\n", rval);
14485 15563 } else {
14486 15564 /*EMPTY*/
14487 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15565 + QL_PRINT_3(ha, "done\n");
14488 15566 }
14489 15567
14490 15568 return (rval);
14491 15569 }
14492 15570
14493 15571 /*
14494 15572 * ql_81xx_binary_fw_dump
14495 15573 *
14496 15574 * Input:
14497 15575 * ha: adapter state pointer.
14498 15576 * fw: firmware dump context pointer.
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
14499 15577 *
14500 15578 * Returns:
14501 15579 * ql local function return status code.
14502 15580 *
14503 15581 * Context:
14504 15582 * Interrupt or Kernel context, no mailbox commands allowed.
14505 15583 */
14506 15584 static int
14507 15585 ql_81xx_binary_fw_dump(ql_adapter_state_t *ha, ql_81xx_fw_dump_t *fw)
14508 15586 {
14509 - uint32_t *reg32;
15587 + uint32_t *reg32, cnt, *w32ptr, index, *dp;
14510 15588 void *bp;
14511 15589 clock_t timer;
14512 15590 int rval = QL_SUCCESS;
14513 15591
14514 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15592 + QL_PRINT_3(ha, "started\n");
14515 15593
15594 + fw->req_q_size[0] = ha->req_q[0]->req_ring.size;
15595 + if (ha->req_q[1] != NULL) {
15596 + fw->req_q_size[1] = ha->req_q[1]->req_ring.size;
15597 + }
15598 + fw->rsp_q_size = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
15599 +
15600 + fw->hccr = RD32_IO_REG(ha, hccr);
14516 15601 fw->r2h_status = RD32_IO_REG(ha, risc2host);
15602 + fw->aer_ues = ql_pci_config_get32(ha, 0x104);
14517 15603
14518 15604 /* Pause RISC. */
14519 15605 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
14520 15606 /* Disable ISP interrupts. */
14521 - WRT16_IO_REG(ha, ictrl, 0);
15607 + ql_disable_intr(ha);
14522 15608
14523 15609 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
14524 15610 for (timer = 30000;
14525 15611 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
14526 15612 rval == QL_SUCCESS; timer--) {
14527 15613 if (timer) {
14528 15614 drv_usecwait(100);
14529 15615 if (timer % 10000 == 0) {
14530 15616 EL(ha, "risc pause %d\n", timer);
14531 15617 }
14532 15618 } else {
14533 15619 EL(ha, "risc pause timeout\n");
14534 15620 rval = QL_FUNCTION_TIMEOUT;
14535 15621 }
14536 15622 }
14537 15623 }
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
14538 15624
14539 15625 if (rval == QL_SUCCESS) {
14540 15626
14541 15627 /* Host Interface registers */
14542 15628
14543 15629 /* HostRisc registers. */
14544 15630 WRT32_IO_REG(ha, io_base_addr, 0x7000);
14545 15631 bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
14546 15632 16, 32);
14547 15633 WRT32_IO_REG(ha, io_base_addr, 0x7010);
14548 - bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15634 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14549 15635
14550 15636 /* PCIe registers. */
14551 15637 WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14552 15638 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14553 15639 bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14554 15640 3, 32);
14555 15641 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14556 15642 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14557 15643
14558 15644 /* Host interface registers. */
14559 15645 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14560 15646 sizeof (fw->host_reg) / 4, 32);
14561 15647
14562 15648 /* Disable ISP interrupts. */
15649 + ql_disable_intr(ha);
14563 15650
14564 - WRT32_IO_REG(ha, ictrl, 0);
14565 - RD32_IO_REG(ha, ictrl);
14566 - ADAPTER_STATE_LOCK(ha);
14567 - ha->flags &= ~INTERRUPTS_ENABLED;
14568 - ADAPTER_STATE_UNLOCK(ha);
14569 -
14570 15651 /* Shadow registers. */
14571 15652
14572 15653 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14573 15654 RD32_IO_REG(ha, io_base_addr);
14574 15655
14575 15656 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14576 15657 WRT_REG_DWORD(ha, reg32, 0xB0000000);
14577 15658 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14578 15659 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14579 15660
14580 15661 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14581 15662 WRT_REG_DWORD(ha, reg32, 0xB0100000);
14582 15663 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14583 15664 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14584 15665
14585 15666 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14586 15667 WRT_REG_DWORD(ha, reg32, 0xB0200000);
14587 15668 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14588 15669 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14589 15670
14590 15671 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14591 15672 WRT_REG_DWORD(ha, reg32, 0xB0300000);
14592 15673 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14593 15674 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14594 15675
14595 15676 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14596 15677 WRT_REG_DWORD(ha, reg32, 0xB0400000);
14597 15678 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14598 15679 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14599 15680
14600 15681 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14601 15682 WRT_REG_DWORD(ha, reg32, 0xB0500000);
14602 15683 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14603 15684 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14604 15685
14605 15686 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14606 15687 WRT_REG_DWORD(ha, reg32, 0xB0600000);
14607 15688 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14608 15689 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14609 15690
14610 15691 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14611 15692 WRT_REG_DWORD(ha, reg32, 0xB0700000);
14612 15693 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14613 15694 fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
14614 15695
14615 15696 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14616 15697 WRT_REG_DWORD(ha, reg32, 0xB0800000);
14617 15698 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14618 15699 fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
14619 15700
14620 15701 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14621 15702 WRT_REG_DWORD(ha, reg32, 0xB0900000);
14622 15703 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14623 15704 fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
14624 15705
14625 15706 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14626 15707 WRT_REG_DWORD(ha, reg32, 0xB0A00000);
14627 15708 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14628 15709 fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
14629 15710
14630 15711 /* RISC I/O register. */
14631 15712
14632 15713 WRT32_IO_REG(ha, io_base_addr, 0x0010);
14633 15714 (void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
14634 15715 1, 32);
14635 15716
14636 15717 /* Mailbox registers. */
14637 15718
14638 15719 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14639 15720 sizeof (fw->mailbox_reg) / 2, 16);
14640 15721
14641 15722 /* Transfer sequence registers. */
14642 15723
14643 15724 /* XSEQ GP */
14644 15725 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14645 15726 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14646 15727 16, 32);
14647 15728 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14648 15729 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14649 15730 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14650 15731 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14651 15732 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14652 15733 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14653 15734 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14654 15735 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14655 15736 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14656 15737 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14657 15738 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14658 15739 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14659 15740 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14660 15741 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14661 15742
14662 15743 /* XSEQ-0 */
14663 15744 WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
14664 15745 bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14665 15746 16, 32);
14666 15747 WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
14667 15748 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14668 15749 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14669 15750 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14670 15751
14671 15752 /* XSEQ-1 */
14672 15753 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14673 15754 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14674 15755 16, 32);
14675 15756
14676 15757 /* Receive sequence registers. */
14677 15758
14678 15759 /* RSEQ GP */
14679 15760 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14680 15761 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14681 15762 16, 32);
14682 15763 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14683 15764 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14684 15765 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14685 15766 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14686 15767 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14687 15768 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14688 15769 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14689 15770 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14690 15771 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14691 15772 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14692 15773 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14693 15774 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14694 15775 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14695 15776 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14696 15777
14697 15778 /* RSEQ-0 */
14698 15779 WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
14699 15780 bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14700 15781 16, 32);
14701 15782 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14702 15783 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14703 15784
14704 15785 /* RSEQ-1 */
14705 15786 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14706 15787 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14707 15788 sizeof (fw->rseq_1_reg) / 4, 32);
14708 15789
14709 15790 /* RSEQ-2 */
14710 15791 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14711 15792 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14712 15793 sizeof (fw->rseq_2_reg) / 4, 32);
14713 15794
14714 15795 /* Auxiliary sequencer registers. */
14715 15796
14716 15797 /* ASEQ GP */
14717 15798 WRT32_IO_REG(ha, io_base_addr, 0xB000);
14718 15799 bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
14719 15800 16, 32);
14720 15801 WRT32_IO_REG(ha, io_base_addr, 0xB010);
14721 15802 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14722 15803 WRT32_IO_REG(ha, io_base_addr, 0xB020);
14723 15804 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14724 15805 WRT32_IO_REG(ha, io_base_addr, 0xB030);
14725 15806 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14726 15807 WRT32_IO_REG(ha, io_base_addr, 0xB040);
14727 15808 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14728 15809 WRT32_IO_REG(ha, io_base_addr, 0xB050);
14729 15810 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14730 15811 WRT32_IO_REG(ha, io_base_addr, 0xB060);
14731 15812 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14732 15813 WRT32_IO_REG(ha, io_base_addr, 0xB070);
14733 15814 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14734 15815
14735 15816 /* ASEQ-0 */
14736 15817 WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
14737 15818 bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
14738 15819 16, 32);
14739 15820 WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
14740 15821 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14741 15822
14742 15823 /* ASEQ-1 */
14743 15824 WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
14744 15825 (void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
14745 15826 16, 32);
14746 15827
14747 15828 /* ASEQ-2 */
14748 15829 WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
14749 15830 (void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
14750 15831 16, 32);
14751 15832
14752 15833 /* Command DMA registers. */
14753 15834
14754 15835 WRT32_IO_REG(ha, io_base_addr, 0x7100);
14755 15836 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14756 15837 sizeof (fw->cmd_dma_reg) / 4, 32);
14757 15838
14758 15839 /* Queues. */
14759 15840
14760 15841 /* RequestQ0 */
14761 15842 WRT32_IO_REG(ha, io_base_addr, 0x7200);
14762 15843 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14763 15844 8, 32);
14764 15845 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14765 15846
14766 15847 /* ResponseQ0 */
14767 15848 WRT32_IO_REG(ha, io_base_addr, 0x7300);
14768 15849 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14769 15850 8, 32);
14770 15851 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14771 15852
14772 15853 /* RequestQ1 */
14773 15854 WRT32_IO_REG(ha, io_base_addr, 0x7400);
14774 15855 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14775 15856 8, 32);
14776 15857 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14777 15858
14778 15859 /* Transmit DMA registers. */
14779 15860
14780 15861 /* XMT0 */
14781 15862 WRT32_IO_REG(ha, io_base_addr, 0x7600);
14782 15863 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14783 15864 16, 32);
14784 15865 WRT32_IO_REG(ha, io_base_addr, 0x7610);
14785 15866 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14786 15867
14787 15868 /* XMT1 */
14788 15869 WRT32_IO_REG(ha, io_base_addr, 0x7620);
14789 15870 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14790 15871 16, 32);
14791 15872 WRT32_IO_REG(ha, io_base_addr, 0x7630);
14792 15873 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14793 15874
14794 15875 /* XMT2 */
14795 15876 WRT32_IO_REG(ha, io_base_addr, 0x7640);
14796 15877 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14797 15878 16, 32);
14798 15879 WRT32_IO_REG(ha, io_base_addr, 0x7650);
14799 15880 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14800 15881
14801 15882 /* XMT3 */
14802 15883 WRT32_IO_REG(ha, io_base_addr, 0x7660);
14803 15884 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14804 15885 16, 32);
14805 15886 WRT32_IO_REG(ha, io_base_addr, 0x7670);
14806 15887 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14807 15888
14808 15889 /* XMT4 */
14809 15890 WRT32_IO_REG(ha, io_base_addr, 0x7680);
14810 15891 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14811 15892 16, 32);
14812 15893 WRT32_IO_REG(ha, io_base_addr, 0x7690);
14813 15894 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14814 15895
14815 15896 /* XMT Common */
14816 15897 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14817 15898 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14818 15899 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14819 15900
14820 15901 /* Receive DMA registers. */
14821 15902
14822 15903 /* RCVThread0 */
14823 15904 WRT32_IO_REG(ha, io_base_addr, 0x7700);
14824 15905 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14825 15906 ha->iobase + 0xC0, 16, 32);
14826 15907 WRT32_IO_REG(ha, io_base_addr, 0x7710);
14827 15908 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14828 15909
14829 15910 /* RCVThread1 */
14830 15911 WRT32_IO_REG(ha, io_base_addr, 0x7720);
14831 15912 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14832 15913 ha->iobase + 0xC0, 16, 32);
14833 15914 WRT32_IO_REG(ha, io_base_addr, 0x7730);
14834 15915 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14835 15916
14836 15917 /* RISC registers. */
14837 15918
14838 15919 /* RISC GP */
14839 15920 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14840 15921 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14841 15922 16, 32);
14842 15923 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14843 15924 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14844 15925 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14845 15926 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14846 15927 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14847 15928 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14848 15929 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14849 15930 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14850 15931 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14851 15932 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14852 15933 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14853 15934 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14854 15935 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14855 15936 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14856 15937
14857 15938 /* Local memory controller (LMC) registers. */
14858 15939
14859 15940 /* LMC */
14860 15941 WRT32_IO_REG(ha, io_base_addr, 0x3000);
14861 15942 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14862 15943 16, 32);
14863 15944 WRT32_IO_REG(ha, io_base_addr, 0x3010);
14864 15945 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14865 15946 WRT32_IO_REG(ha, io_base_addr, 0x3020);
14866 15947 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14867 15948 WRT32_IO_REG(ha, io_base_addr, 0x3030);
14868 15949 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14869 15950 WRT32_IO_REG(ha, io_base_addr, 0x3040);
14870 15951 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14871 15952 WRT32_IO_REG(ha, io_base_addr, 0x3050);
14872 15953 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14873 15954 WRT32_IO_REG(ha, io_base_addr, 0x3060);
14874 15955 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14875 15956 WRT32_IO_REG(ha, io_base_addr, 0x3070);
14876 15957 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14877 15958
14878 15959 /* Fibre Protocol Module registers. */
14879 15960
14880 15961 /* FPM hardware */
14881 15962 WRT32_IO_REG(ha, io_base_addr, 0x4000);
14882 15963 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14883 15964 16, 32);
14884 15965 WRT32_IO_REG(ha, io_base_addr, 0x4010);
14885 15966 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14886 15967 WRT32_IO_REG(ha, io_base_addr, 0x4020);
14887 15968 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14888 15969 WRT32_IO_REG(ha, io_base_addr, 0x4030);
14889 15970 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14890 15971 WRT32_IO_REG(ha, io_base_addr, 0x4040);
14891 15972 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14892 15973 WRT32_IO_REG(ha, io_base_addr, 0x4050);
14893 15974 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14894 15975 WRT32_IO_REG(ha, io_base_addr, 0x4060);
14895 15976 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14896 15977 WRT32_IO_REG(ha, io_base_addr, 0x4070);
14897 15978 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14898 15979 WRT32_IO_REG(ha, io_base_addr, 0x4080);
14899 15980 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14900 15981 WRT32_IO_REG(ha, io_base_addr, 0x4090);
14901 15982 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14902 15983 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14903 15984 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14904 15985 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14905 15986 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14906 15987 WRT32_IO_REG(ha, io_base_addr, 0x40C0);
14907 15988 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14908 15989 WRT32_IO_REG(ha, io_base_addr, 0x40D0);
14909 15990 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14910 15991
14911 15992 /* Frame Buffer registers. */
14912 15993
14913 15994 /* FB hardware */
14914 15995 WRT32_IO_REG(ha, io_base_addr, 0x6000);
14915 15996 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14916 15997 16, 32);
14917 15998 WRT32_IO_REG(ha, io_base_addr, 0x6010);
14918 15999 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14919 16000 WRT32_IO_REG(ha, io_base_addr, 0x6020);
14920 16001 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14921 16002 WRT32_IO_REG(ha, io_base_addr, 0x6030);
14922 16003 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14923 16004 WRT32_IO_REG(ha, io_base_addr, 0x6040);
14924 16005 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14925 16006 WRT32_IO_REG(ha, io_base_addr, 0x6100);
14926 16007 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14927 16008 WRT32_IO_REG(ha, io_base_addr, 0x6130);
14928 16009 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14929 16010 WRT32_IO_REG(ha, io_base_addr, 0x6150);
14930 16011 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14931 16012 WRT32_IO_REG(ha, io_base_addr, 0x6170);
14932 16013 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
|
↓ open down ↓ |
353 lines elided |
↑ open up ↑ |
14933 16014 WRT32_IO_REG(ha, io_base_addr, 0x6190);
14934 16015 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14935 16016 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14936 16017 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14937 16018 WRT32_IO_REG(ha, io_base_addr, 0x61C0);
14938 16019 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14939 16020 WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14940 16021 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14941 16022 }
14942 16023
14943 - /* Get the request queue */
14944 16024 if (rval == QL_SUCCESS) {
14945 - uint32_t cnt;
14946 - uint32_t *w32 = (uint32_t *)ha->request_ring_bp;
14947 -
14948 - /* Sync DMA buffer. */
14949 - (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14950 - REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14951 - DDI_DMA_SYNC_FORKERNEL);
14952 -
14953 - for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14954 - fw->req_q[cnt] = *w32++;
14955 - LITTLE_ENDIAN_32(&fw->req_q[cnt]);
16025 + /* Get the Queue Pointers */
16026 + dp = fw->req_rsp_ext_mem;
16027 + for (index = 0; index < ha->rsp_queues_cnt; index++) {
16028 + if (index == 0 && ha->flags & MULTI_QUEUE) {
16029 + *dp = RD32_MBAR_REG(ha,
16030 + ha->req_q[0]->mbar_req_in);
16031 + LITTLE_ENDIAN_32(dp);
16032 + dp++;
16033 + *dp = RD32_MBAR_REG(ha,
16034 + ha->req_q[0]->mbar_req_out);
16035 + LITTLE_ENDIAN_32(dp);
16036 + dp++;
16037 + } else if (index == 1 && ha->flags & MULTI_QUEUE) {
16038 + *dp = RD32_MBAR_REG(ha,
16039 + ha->req_q[1]->mbar_req_in);
16040 + LITTLE_ENDIAN_32(dp);
16041 + dp++;
16042 + *dp = RD32_MBAR_REG(ha,
16043 + ha->req_q[1]->mbar_req_out);
16044 + LITTLE_ENDIAN_32(dp);
16045 + dp++;
16046 + } else {
16047 + *dp++ = 0;
16048 + *dp++ = 0;
16049 + }
16050 + if (ha->flags & MULTI_QUEUE) {
16051 + *dp = RD32_MBAR_REG(ha,
16052 + ha->rsp_queues[index]->mbar_rsp_in);
16053 + LITTLE_ENDIAN_32(dp);
16054 + dp++;
16055 + *dp = RD32_MBAR_REG(ha,
16056 + ha->rsp_queues[index]->mbar_rsp_out);
16057 + LITTLE_ENDIAN_32(dp);
16058 + dp++;
16059 + } else {
16060 + *dp++ = 0;
16061 + *dp++ = 0;
16062 + }
14956 16063 }
14957 - }
16064 + /* Get the request queue */
16065 + (void) ddi_dma_sync(ha->req_q[0]->req_ring.dma_handle, 0, 0,
16066 + DDI_DMA_SYNC_FORCPU);
16067 + w32ptr = (uint32_t *)ha->req_q[0]->req_ring.bp;
16068 + for (cnt = 0; cnt < fw->req_q_size[0] / 4; cnt++) {
16069 + *dp = *w32ptr++;
16070 + LITTLE_ENDIAN_32(dp);
16071 + dp++;
16072 + }
16073 + if (ha->req_q[1] != NULL) {
16074 + (void) ddi_dma_sync(ha->req_q[1]->req_ring.dma_handle,
16075 + 0, 0, DDI_DMA_SYNC_FORCPU);
16076 + w32ptr = (uint32_t *)ha->req_q[1]->req_ring.bp;
16077 + for (cnt = 0; cnt < fw->req_q_size[1] / 4; cnt++) {
16078 + *dp = *w32ptr++;
16079 + LITTLE_ENDIAN_32(dp);
16080 + dp++;
16081 + }
16082 + }
14958 16083
14959 - /* Get the response queue */
14960 - if (rval == QL_SUCCESS) {
14961 - uint32_t cnt;
14962 - uint32_t *w32 = (uint32_t *)ha->response_ring_bp;
14963 -
14964 - /* Sync DMA buffer. */
14965 - (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14966 - RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14967 - DDI_DMA_SYNC_FORKERNEL);
14968 -
14969 - for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14970 - fw->rsp_q[cnt] = *w32++;
14971 - LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
16084 + /* Get the response queues */
16085 + for (index = 0; index < ha->rsp_queues_cnt; index++) {
16086 + (void) ddi_dma_sync(
16087 + ha->rsp_queues[index]->rsp_ring.dma_handle,
16088 + 0, 0, DDI_DMA_SYNC_FORCPU);
16089 + w32ptr = (uint32_t *)
16090 + ha->rsp_queues[index]->rsp_ring.bp;
16091 + for (cnt = 0;
16092 + cnt < ha->rsp_queues[index]->rsp_ring.size / 4;
16093 + cnt++) {
16094 + *dp = *w32ptr++;
16095 + LITTLE_ENDIAN_32(dp);
16096 + dp++;
16097 + }
14972 16098 }
14973 16099 }
14974 16100
14975 16101 /* Reset RISC. */
14976 -
14977 16102 ql_reset_chip(ha);
14978 16103
14979 16104 /* Memory. */
14980 -
14981 16105 if (rval == QL_SUCCESS) {
14982 16106 /* Code RAM. */
14983 16107 rval = ql_read_risc_ram(ha, 0x20000,
14984 16108 sizeof (fw->code_ram) / 4, fw->code_ram);
14985 16109 }
14986 16110 if (rval == QL_SUCCESS) {
14987 16111 /* External Memory. */
14988 16112 rval = ql_read_risc_ram(ha, 0x100000,
14989 - ha->fw_ext_memory_size / 4, fw->ext_mem);
16113 + ha->fw_ext_memory_size / 4, dp);
14990 16114 }
14991 16115
14992 16116 /* Get the FC event trace buffer */
14993 16117 if (rval == QL_SUCCESS) {
14994 16118 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14995 16119 (ha->fwfcetracebuf.bp != NULL)) {
14996 16120 uint32_t cnt;
14997 16121 uint32_t *w32 = ha->fwfcetracebuf.bp;
14998 16122
14999 16123 /* Sync DMA buffer. */
15000 16124 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
15001 16125 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
15002 16126
15003 16127 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
15004 16128 fw->fce_trace_buf[cnt] = *w32++;
15005 16129 }
15006 16130 }
15007 16131 }
15008 16132
15009 16133 /* Get the extended trace buffer */
15010 16134 if (rval == QL_SUCCESS) {
15011 16135 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
15012 16136 (ha->fwexttracebuf.bp != NULL)) {
15013 16137 uint32_t cnt;
15014 16138 uint32_t *w32 = ha->fwexttracebuf.bp;
15015 16139
15016 16140 /* Sync DMA buffer. */
15017 16141 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
15018 16142 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
15019 16143
|
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
15020 16144 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
15021 16145 fw->ext_trace_buf[cnt] = *w32++;
15022 16146 }
15023 16147 }
15024 16148 }
15025 16149
15026 16150 if (rval != QL_SUCCESS) {
15027 16151 EL(ha, "failed=%xh\n", rval);
15028 16152 } else {
15029 16153 /*EMPTY*/
15030 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16154 + QL_PRINT_3(ha, "done\n");
15031 16155 }
15032 16156
15033 16157 return (rval);
15034 16158 }
15035 16159
15036 16160 /*
15037 16161 * ql_read_risc_ram
15038 16162 * Reads RISC RAM one word at a time.
15039 16163 * Risc interrupts must be disabled when this routine is called.
15040 16164 *
15041 16165 * Input:
15042 16166 * ha: adapter state pointer.
15043 16167 * risc_address: RISC code start address.
15044 16168 * len: Number of words.
15045 16169 * buf: buffer pointer.
15046 16170 *
15047 16171 * Returns:
15048 16172 * ql local function return status code.
15049 16173 *
15050 16174 * Context:
15051 16175 * Interrupt or Kernel context, no mailbox commands allowed.
15052 16176 */
15053 16177 static int
15054 16178 ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
15055 16179 void *buf)
15056 16180 {
15057 16181 uint32_t cnt;
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
15058 16182 uint16_t stat;
15059 16183 clock_t timer;
15060 16184 uint16_t *buf16 = (uint16_t *)buf;
15061 16185 uint32_t *buf32 = (uint32_t *)buf;
15062 16186 int rval = QL_SUCCESS;
15063 16187
15064 16188 for (cnt = 0; cnt < len; cnt++, risc_address++) {
15065 16189 WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_EXTENDED);
15066 16190 WRT16_IO_REG(ha, mailbox_in[1], LSW(risc_address));
15067 16191 WRT16_IO_REG(ha, mailbox_in[8], MSW(risc_address));
15068 - if (CFG_IST(ha, CFG_CTRL_8021)) {
16192 + if (CFG_IST(ha, CFG_CTRL_82XX)) {
15069 16193 WRT32_IO_REG(ha, nx_host_int, NX_MBX_CMD);
15070 - } else if (CFG_IST(ha, CFG_CTRL_242581)) {
16194 + } else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
15071 16195 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
15072 16196 } else {
15073 16197 WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
15074 16198 }
15075 16199 for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
15076 16200 if (INTERRUPT_PENDING(ha)) {
15077 16201 stat = (uint16_t)
15078 16202 (RD16_IO_REG(ha, risc2host) & 0xff);
15079 16203 if ((stat == 1) || (stat == 0x10)) {
15080 - if (CFG_IST(ha, CFG_CTRL_24258081)) {
16204 + if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
15081 16205 buf32[cnt] = SHORT_TO_LONG(
15082 16206 RD16_IO_REG(ha,
15083 16207 mailbox_out[2]),
15084 16208 RD16_IO_REG(ha,
15085 16209 mailbox_out[3]));
15086 16210 } else {
15087 16211 buf16[cnt] =
15088 16212 RD16_IO_REG(ha,
15089 16213 mailbox_out[2]);
15090 16214 }
15091 16215
15092 16216 break;
15093 16217 } else if ((stat == 2) || (stat == 0x11)) {
15094 16218 rval = RD16_IO_REG(ha, mailbox_out[0]);
15095 16219 break;
15096 16220 }
15097 - if (CFG_IST(ha, CFG_CTRL_8021)) {
16221 + if (CFG_IST(ha, CFG_CTRL_82XX)) {
15098 16222 ql_8021_clr_hw_intr(ha);
15099 16223 ql_8021_clr_fw_intr(ha);
15100 - } else if (CFG_IST(ha, CFG_CTRL_242581)) {
16224 + } else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
15101 16225 WRT32_IO_REG(ha, hccr,
15102 16226 HC24_CLR_RISC_INT);
15103 16227 RD32_IO_REG(ha, hccr);
15104 16228 } else {
16229 + WRT16_IO_REG(ha, semaphore, 0);
15105 16230 WRT16_IO_REG(ha, hccr,
15106 16231 HC_CLR_RISC_INT);
16232 + RD16_IO_REG(ha, hccr);
15107 16233 }
15108 16234 }
15109 16235 drv_usecwait(5);
15110 16236 }
15111 - if (CFG_IST(ha, CFG_CTRL_8021)) {
16237 + if (CFG_IST(ha, CFG_CTRL_82XX)) {
15112 16238 ql_8021_clr_hw_intr(ha);
15113 16239 ql_8021_clr_fw_intr(ha);
15114 - } else if (CFG_IST(ha, CFG_CTRL_242581)) {
16240 + } else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
15115 16241 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
15116 16242 RD32_IO_REG(ha, hccr);
15117 16243 } else {
15118 - WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
15119 16244 WRT16_IO_REG(ha, semaphore, 0);
16245 + WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
16246 + RD16_IO_REG(ha, hccr);
15120 16247 }
15121 16248
15122 16249 if (timer == 0) {
15123 16250 rval = QL_FUNCTION_TIMEOUT;
15124 16251 }
15125 16252 }
15126 16253
15127 16254 return (rval);
15128 16255 }
15129 16256
15130 16257 /*
15131 16258 * ql_read_regs
15132 16259 * Reads adapter registers to buffer.
15133 16260 *
15134 16261 * Input:
15135 16262 * ha: adapter state pointer.
15136 16263 * buf: buffer pointer.
15137 16264 * reg: start address.
15138 16265 * count: number of registers.
15139 16266 * wds: register size.
15140 16267 *
15141 16268 * Context:
15142 16269 * Interrupt or Kernel context, no mailbox commands allowed.
15143 16270 */
15144 16271 static void *
15145 16272 ql_read_regs(ql_adapter_state_t *ha, void *buf, void *reg, uint32_t count,
15146 16273 uint8_t wds)
15147 16274 {
15148 16275 uint32_t *bp32, *reg32;
15149 16276 uint16_t *bp16, *reg16;
15150 16277 uint8_t *bp8, *reg8;
15151 16278
15152 16279 switch (wds) {
15153 16280 case 32:
15154 16281 bp32 = buf;
15155 16282 reg32 = reg;
15156 16283 while (count--) {
15157 16284 *bp32++ = RD_REG_DWORD(ha, reg32++);
15158 16285 }
15159 16286 return (bp32);
15160 16287 case 16:
15161 16288 bp16 = buf;
15162 16289 reg16 = reg;
15163 16290 while (count--) {
15164 16291 *bp16++ = RD_REG_WORD(ha, reg16++);
15165 16292 }
15166 16293 return (bp16);
15167 16294 case 8:
15168 16295 bp8 = buf;
15169 16296 reg8 = reg;
15170 16297 while (count--) {
15171 16298 *bp8++ = RD_REG_BYTE(ha, reg8++);
15172 16299 }
15173 16300 return (bp8);
15174 16301 default:
15175 16302 EL(ha, "Unknown word size=%d\n", wds);
15176 16303 return (buf);
15177 16304 }
15178 16305 }
15179 16306
|
↓ open down ↓ |
50 lines elided |
↑ open up ↑ |
15180 16307 static int
15181 16308 ql_save_config_regs(dev_info_t *dip)
15182 16309 {
15183 16310 ql_adapter_state_t *ha;
15184 16311 int ret;
15185 16312 ql_config_space_t chs;
15186 16313 caddr_t prop = "ql-config-space";
15187 16314
15188 16315 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
15189 16316 if (ha == NULL) {
15190 - QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
16317 + QL_PRINT_2(NULL, "no adapter instance=%d\n",
15191 16318 ddi_get_instance(dip));
15192 16319 return (DDI_FAILURE);
15193 16320 }
15194 16321
15195 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16322 + QL_PRINT_3(ha, "started\n");
15196 16323
15197 16324 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
15198 16325 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
15199 16326 1) {
15200 - QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
16327 + QL_PRINT_2(ha, "no prop exit\n");
15201 16328 return (DDI_SUCCESS);
15202 16329 }
15203 16330
15204 16331 chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
15205 16332 chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
15206 16333 PCI_CONF_HEADER);
15207 16334 if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15208 16335 chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
15209 16336 PCI_BCNF_BCNTRL);
15210 16337 }
15211 16338
15212 16339 chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
15213 16340 PCI_CONF_CACHE_LINESZ);
15214 16341
15215 16342 chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
15216 16343 PCI_CONF_LATENCY_TIMER);
15217 16344
15218 16345 if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15219 16346 chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
15220 16347 PCI_BCNF_LATENCY_TIMER);
15221 16348 }
15222 16349
15223 16350 chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
15224 16351 chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
15225 16352 chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
15226 16353 chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
15227 16354 chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
15228 16355 chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
15229 16356
|
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
15230 16357 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15231 16358 ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
15232 16359 (uchar_t *)&chs, sizeof (ql_config_space_t));
15233 16360
15234 16361 if (ret != DDI_PROP_SUCCESS) {
15235 16362 cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
15236 16363 QL_NAME, ddi_get_instance(dip), prop);
15237 16364 return (DDI_FAILURE);
15238 16365 }
15239 16366
15240 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16367 + QL_PRINT_3(ha, "done\n");
15241 16368
15242 16369 return (DDI_SUCCESS);
15243 16370 }
15244 16371
15245 16372 static int
15246 16373 ql_restore_config_regs(dev_info_t *dip)
15247 16374 {
15248 16375 ql_adapter_state_t *ha;
15249 16376 uint_t elements;
15250 16377 ql_config_space_t *chs_p;
15251 16378 caddr_t prop = "ql-config-space";
15252 16379
15253 16380 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
15254 16381 if (ha == NULL) {
15255 - QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
16382 + QL_PRINT_2(NULL, "no adapter instance=%d\n",
15256 16383 ddi_get_instance(dip));
15257 16384 return (DDI_FAILURE);
15258 16385 }
15259 16386
15260 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16387 + QL_PRINT_3(ha, "started\n");
15261 16388
15262 16389 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
15263 16390 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
15264 16391 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
15265 16392 (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
15266 - QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
16393 + QL_PRINT_2(ha, "no prop exit\n");
15267 16394 return (DDI_FAILURE);
15268 16395 }
15269 16396
15270 16397 ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
15271 16398
15272 16399 if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15273 16400 ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
15274 16401 chs_p->chs_bridge_control);
15275 16402 }
15276 16403
15277 16404 ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
15278 16405 chs_p->chs_cache_line_size);
15279 16406
15280 16407 ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
15281 16408 chs_p->chs_latency_timer);
15282 16409
15283 16410 if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15284 16411 ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
15285 16412 chs_p->chs_sec_latency_timer);
15286 16413 }
15287 16414
15288 16415 ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
15289 16416 ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
15290 16417 ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
15291 16418 ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
15292 16419 ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
|
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
15293 16420 ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
15294 16421
15295 16422 ddi_prop_free(chs_p);
15296 16423
15297 16424 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15298 16425 if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
15299 16426 cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
15300 16427 QL_NAME, ddi_get_instance(dip), prop);
15301 16428 }
15302 16429
15303 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16430 + QL_PRINT_3(ha, "done\n");
15304 16431
15305 16432 return (DDI_SUCCESS);
15306 16433 }
15307 16434
15308 16435 uint8_t
15309 16436 ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
15310 16437 {
15311 16438 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15312 16439 return (ddi_get8(ha->sbus_config_handle,
15313 16440 (uint8_t *)(ha->sbus_config_base + off)));
15314 16441 }
15315 16442
15316 16443 #ifdef KERNEL_32
15317 16444 return (pci_config_getb(ha->pci_handle, off));
15318 16445 #else
15319 16446 return (pci_config_get8(ha->pci_handle, off));
15320 16447 #endif
15321 16448 }
15322 16449
15323 16450 uint16_t
15324 16451 ql_pci_config_get16(ql_adapter_state_t *ha, off_t off)
15325 16452 {
15326 16453 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15327 16454 return (ddi_get16(ha->sbus_config_handle,
15328 16455 (uint16_t *)(ha->sbus_config_base + off)));
15329 16456 }
15330 16457
15331 16458 #ifdef KERNEL_32
15332 16459 return (pci_config_getw(ha->pci_handle, off));
15333 16460 #else
15334 16461 return (pci_config_get16(ha->pci_handle, off));
15335 16462 #endif
15336 16463 }
15337 16464
15338 16465 uint32_t
15339 16466 ql_pci_config_get32(ql_adapter_state_t *ha, off_t off)
15340 16467 {
15341 16468 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15342 16469 return (ddi_get32(ha->sbus_config_handle,
15343 16470 (uint32_t *)(ha->sbus_config_base + off)));
15344 16471 }
15345 16472
15346 16473 #ifdef KERNEL_32
15347 16474 return (pci_config_getl(ha->pci_handle, off));
15348 16475 #else
15349 16476 return (pci_config_get32(ha->pci_handle, off));
15350 16477 #endif
15351 16478 }
15352 16479
15353 16480 void
15354 16481 ql_pci_config_put8(ql_adapter_state_t *ha, off_t off, uint8_t val)
15355 16482 {
15356 16483 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15357 16484 ddi_put8(ha->sbus_config_handle,
15358 16485 (uint8_t *)(ha->sbus_config_base + off), val);
15359 16486 } else {
15360 16487 #ifdef KERNEL_32
15361 16488 pci_config_putb(ha->pci_handle, off, val);
15362 16489 #else
15363 16490 pci_config_put8(ha->pci_handle, off, val);
15364 16491 #endif
15365 16492 }
15366 16493 }
15367 16494
15368 16495 void
15369 16496 ql_pci_config_put16(ql_adapter_state_t *ha, off_t off, uint16_t val)
15370 16497 {
15371 16498 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15372 16499 ddi_put16(ha->sbus_config_handle,
15373 16500 (uint16_t *)(ha->sbus_config_base + off), val);
15374 16501 } else {
15375 16502 #ifdef KERNEL_32
15376 16503 pci_config_putw(ha->pci_handle, off, val);
15377 16504 #else
15378 16505 pci_config_put16(ha->pci_handle, off, val);
15379 16506 #endif
15380 16507 }
15381 16508 }
15382 16509
15383 16510 void
15384 16511 ql_pci_config_put32(ql_adapter_state_t *ha, off_t off, uint32_t val)
15385 16512 {
15386 16513 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15387 16514 ddi_put32(ha->sbus_config_handle,
15388 16515 (uint32_t *)(ha->sbus_config_base + off), val);
15389 16516 } else {
15390 16517 #ifdef KERNEL_32
15391 16518 pci_config_putl(ha->pci_handle, off, val);
15392 16519 #else
15393 16520 pci_config_put32(ha->pci_handle, off, val);
15394 16521 #endif
15395 16522 }
15396 16523 }
15397 16524
15398 16525 /*
15399 16526 * ql_halt
15400 16527 * Waits for commands that are running to finish and
15401 16528 * if they do not, commands are aborted.
15402 16529 * Finally the adapter is reset.
15403 16530 *
|
↓ open down ↓ |
90 lines elided |
↑ open up ↑ |
15404 16531 * Input:
15405 16532 * ha: adapter state pointer.
15406 16533 * pwr: power state.
15407 16534 *
15408 16535 * Context:
15409 16536 * Kernel context.
15410 16537 */
15411 16538 static void
15412 16539 ql_halt(ql_adapter_state_t *ha, int pwr)
15413 16540 {
15414 - uint32_t cnt;
16541 + ql_link_t *link;
16542 + ql_response_q_t *rsp_q;
15415 16543 ql_tgt_t *tq;
15416 16544 ql_srb_t *sp;
16545 + uint32_t cnt, i;
15417 16546 uint16_t index;
15418 - ql_link_t *link;
15419 16547
15420 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16548 + QL_PRINT_3(ha, "started\n");
15421 16549
15422 16550 /* Wait for all commands running to finish. */
15423 16551 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
15424 16552 for (link = ha->dev[index].first; link != NULL;
15425 16553 link = link->next) {
15426 16554 tq = link->base_address;
15427 16555 (void) ql_abort_device(ha, tq, 0);
15428 16556
15429 16557 /* Wait for 30 seconds for commands to finish. */
15430 16558 for (cnt = 3000; cnt != 0; cnt--) {
15431 16559 /* Acquire device queue lock. */
15432 16560 DEVICE_QUEUE_LOCK(tq);
15433 16561 if (tq->outcnt == 0) {
15434 16562 /* Release device queue lock. */
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
15435 16563 DEVICE_QUEUE_UNLOCK(tq);
15436 16564 break;
15437 16565 } else {
15438 16566 /* Release device queue lock. */
15439 16567 DEVICE_QUEUE_UNLOCK(tq);
15440 16568 ql_delay(ha, 10000);
15441 16569 }
15442 16570 }
15443 16571
15444 16572 /* Finish any commands waiting for more status. */
15445 - if (ha->status_srb != NULL) {
15446 - sp = ha->status_srb;
15447 - ha->status_srb = NULL;
15448 - sp->cmd.next = NULL;
15449 - ql_done(&sp->cmd);
16573 + for (i = 0; i < ha->rsp_queues_cnt; i++) {
16574 + if ((rsp_q = ha->rsp_queues[i]) != NULL &&
16575 + (sp = rsp_q->status_srb) != NULL) {
16576 + rsp_q->status_srb = NULL;
16577 + sp->cmd.next = NULL;
16578 + ql_done(&sp->cmd, B_FALSE);
16579 + }
15450 16580 }
15451 16581
15452 16582 /* Abort commands that did not finish. */
15453 16583 if (cnt == 0) {
15454 - for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS;
16584 + for (cnt = 1; cnt < ha->osc_max_cnt;
15455 16585 cnt++) {
15456 16586 if (ha->pending_cmds.first != NULL) {
15457 16587 ql_start_iocb(ha, NULL);
15458 16588 cnt = 1;
15459 16589 }
15460 16590 sp = ha->outstanding_cmds[cnt];
15461 16591 if (sp != NULL &&
16592 + sp != QL_ABORTED_SRB(ha) &&
15462 16593 sp->lun_queue->target_queue ==
15463 16594 tq) {
15464 - (void) ql_abort((opaque_t)ha,
15465 - sp->pkt, 0);
16595 + (void) ql_abort_io(ha, sp);
16596 + sp->pkt->pkt_reason =
16597 + CS_ABORTED;
16598 + sp->cmd.next = NULL;
16599 + ql_done(&sp->cmd, B_FALSE);
15466 16600 }
15467 16601 }
15468 16602 }
15469 16603 }
15470 16604 }
15471 16605
15472 16606 /* Shutdown IP. */
15473 16607 if (ha->flags & IP_INITIALIZED) {
15474 16608 (void) ql_shutdown_ip(ha);
15475 16609 }
15476 16610
15477 16611 /* Stop all timers. */
15478 16612 ADAPTER_STATE_LOCK(ha);
15479 16613 ha->port_retry_timer = 0;
15480 16614 ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
15481 16615 ha->watchdog_timer = 0;
15482 16616 ADAPTER_STATE_UNLOCK(ha);
15483 16617
15484 - if (pwr == PM_LEVEL_D3) {
16618 + if (pwr == PM_LEVEL_D3 && ha->flags & ONLINE) {
15485 16619 ADAPTER_STATE_LOCK(ha);
15486 16620 ha->flags &= ~ONLINE;
15487 16621 ADAPTER_STATE_UNLOCK(ha);
15488 16622
16623 + if (CFG_IST(ha, CFG_CTRL_82XX)) {
16624 + ql_8021_clr_drv_active(ha);
16625 + }
16626 +
15489 16627 /* Reset ISP chip. */
15490 16628 ql_reset_chip(ha);
15491 16629 }
15492 16630
15493 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16631 + QL_PRINT_3(ha, "done\n");
15494 16632 }
15495 16633
15496 16634 /*
15497 16635 * ql_get_dma_mem
15498 16636 * Function used to allocate dma memory.
15499 16637 *
15500 16638 * Input:
15501 16639 * ha: adapter state pointer.
15502 16640 * mem: pointer to dma memory object.
15503 16641 * size: size of the request in bytes
15504 16642 *
15505 16643 * Returns:
15506 16644 * qn local function return status code.
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
15507 16645 *
15508 16646 * Context:
15509 16647 * Kernel context.
15510 16648 */
15511 16649 int
15512 16650 ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
15513 16651 mem_alloc_type_t allocation_type, mem_alignment_t alignment)
15514 16652 {
15515 16653 int rval;
15516 16654
15517 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16655 + QL_PRINT_3(ha, "started\n");
15518 16656
15519 16657 mem->size = size;
15520 16658 mem->type = allocation_type;
15521 - mem->cookie_count = 1;
16659 + mem->max_cookie_count = 1;
15522 16660
15523 16661 switch (alignment) {
15524 16662 case QL_DMA_DATA_ALIGN:
15525 16663 mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
15526 16664 break;
15527 16665 case QL_DMA_RING_ALIGN:
15528 16666 mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
15529 16667 break;
15530 16668 default:
15531 16669 EL(ha, "failed, unknown alignment type %x\n", alignment);
15532 16670 break;
15533 16671 }
15534 16672
15535 16673 if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
15536 16674 ql_free_phys(ha, mem);
15537 16675 EL(ha, "failed, alloc_phys=%xh\n", rval);
15538 16676 }
15539 16677
15540 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16678 + QL_PRINT_3(ha, "done\n");
15541 16679
15542 16680 return (rval);
15543 16681 }
15544 16682
15545 16683 /*
16684 + * ql_free_dma_resource
16685 + * Function used to free dma memory.
16686 + *
16687 + * Input:
16688 + * ha: adapter state pointer.
16689 + * mem: pointer to dma memory object.
16690 + * mem->dma_handle DMA memory handle.
16691 + *
16692 + * Context:
16693 + * Kernel context.
16694 + */
16695 +void
16696 +ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
16697 +{
16698 + QL_PRINT_3(ha, "started\n");
16699 +
16700 + ql_free_phys(ha, mem);
16701 +
16702 + QL_PRINT_3(ha, "done\n");
16703 +}
16704 +
16705 +/*
15546 16706 * ql_alloc_phys
15547 16707 * Function used to allocate memory and zero it.
15548 16708 * Memory is below 4 GB.
15549 16709 *
15550 16710 * Input:
15551 16711 * ha: adapter state pointer.
15552 16712 * mem: pointer to dma memory object.
15553 16713 * sleep: KM_SLEEP/KM_NOSLEEP flag.
15554 16714 * mem->cookie_count number of segments allowed.
15555 16715 * mem->type memory allocation type.
15556 16716 * mem->size memory size.
15557 16717 * mem->alignment memory alignment.
15558 16718 *
15559 16719 * Returns:
15560 - * qn local function return status code.
16720 + * ql local function return status code.
15561 16721 *
15562 16722 * Context:
15563 16723 * Kernel context.
15564 16724 */
15565 16725 int
15566 16726 ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15567 16727 {
15568 16728 size_t rlen;
15569 - ddi_dma_attr_t dma_attr;
16729 + ddi_dma_attr_t dma_attr = ha->io_dma_attr;
15570 16730 ddi_device_acc_attr_t acc_attr = ql_dev_acc_attr;
15571 16731
15572 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16732 + QL_PRINT_3(ha, "started\n");
15573 16733
15574 - dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15575 - ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15576 -
15577 16734 dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
15578 - dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
16735 + dma_attr.dma_attr_sgllen = (int)mem->max_cookie_count;
15579 16736
15580 16737 /*
15581 16738 * Workaround for SUN XMITS buffer must end and start on 8 byte
15582 16739 * boundary. Else, hardware will overrun the buffer. Simple fix is
15583 16740 * to make sure buffer has enough room for overrun.
15584 16741 */
15585 16742 if (mem->size & 7) {
15586 16743 mem->size += 8 - (mem->size & 7);
15587 16744 }
15588 16745
15589 16746 mem->flags = DDI_DMA_CONSISTENT;
15590 16747
15591 16748 /*
15592 16749 * Allocate DMA memory for command.
15593 16750 */
15594 16751 if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15595 16752 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15596 16753 DDI_SUCCESS) {
15597 16754 EL(ha, "failed, ddi_dma_alloc_handle\n");
15598 16755 mem->dma_handle = NULL;
15599 16756 return (QL_MEMORY_ALLOC_FAILED);
15600 16757 }
15601 16758
15602 16759 switch (mem->type) {
15603 16760 case KERNEL_MEM:
15604 16761 mem->bp = kmem_zalloc(mem->size, sleep);
15605 16762 break;
15606 16763 case BIG_ENDIAN_DMA:
15607 16764 case LITTLE_ENDIAN_DMA:
15608 16765 case NO_SWAP_DMA:
15609 16766 if (mem->type == BIG_ENDIAN_DMA) {
15610 16767 acc_attr.devacc_attr_endian_flags =
15611 16768 DDI_STRUCTURE_BE_ACC;
15612 16769 } else if (mem->type == NO_SWAP_DMA) {
15613 16770 acc_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
15614 16771 }
15615 16772 if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
15616 16773 mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15617 16774 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&mem->bp, &rlen,
15618 16775 &mem->acc_handle) == DDI_SUCCESS) {
15619 16776 bzero(mem->bp, mem->size);
15620 16777 /* ensure we got what we asked for (32bit) */
15621 16778 if (dma_attr.dma_attr_addr_hi == NULL) {
15622 16779 if (mem->cookie.dmac_notused != NULL) {
15623 16780 EL(ha, "failed, ddi_dma_mem_alloc "
15624 16781 "returned 64 bit DMA address\n");
15625 16782 ql_free_phys(ha, mem);
15626 16783 return (QL_MEMORY_ALLOC_FAILED);
15627 16784 }
15628 16785 }
15629 16786 } else {
15630 16787 mem->acc_handle = NULL;
15631 16788 mem->bp = NULL;
15632 16789 }
15633 16790 break;
15634 16791 default:
15635 16792 EL(ha, "failed, unknown type=%xh\n", mem->type);
15636 16793 mem->acc_handle = NULL;
15637 16794 mem->bp = NULL;
15638 16795 break;
15639 16796 }
|
↓ open down ↓ |
51 lines elided |
↑ open up ↑ |
15640 16797
15641 16798 if (mem->bp == NULL) {
15642 16799 EL(ha, "failed, ddi_dma_mem_alloc\n");
15643 16800 ddi_dma_free_handle(&mem->dma_handle);
15644 16801 mem->dma_handle = NULL;
15645 16802 return (QL_MEMORY_ALLOC_FAILED);
15646 16803 }
15647 16804
15648 16805 mem->flags |= DDI_DMA_RDWR;
15649 16806
16807 + if (qlc_fm_check_dma_handle(ha, mem->dma_handle)
16808 + != DDI_FM_OK) {
16809 + EL(ha, "failed, ddi_dma_addr_bind_handle\n");
16810 + ql_free_phys(ha, mem);
16811 + qlc_fm_report_err_impact(ha,
16812 + QL_FM_EREPORT_DMA_HANDLE_CHECK);
16813 + return (QL_MEMORY_ALLOC_FAILED);
16814 + }
16815 +
15650 16816 if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15651 16817 EL(ha, "failed, ddi_dma_addr_bind_handle\n");
15652 16818 ql_free_phys(ha, mem);
15653 16819 return (QL_MEMORY_ALLOC_FAILED);
15654 16820 }
15655 16821
15656 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16822 + QL_PRINT_3(ha, "done\n");
15657 16823
15658 16824 return (QL_SUCCESS);
15659 16825 }
15660 16826
15661 16827 /*
15662 16828 * ql_free_phys
15663 16829 * Function used to free physical memory.
15664 16830 *
15665 16831 * Input:
15666 16832 * ha: adapter state pointer.
15667 16833 * mem: pointer to dma memory object.
15668 16834 *
15669 16835 * Context:
15670 16836 * Kernel context.
15671 16837 */
15672 16838 void
15673 16839 ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
15674 16840 {
15675 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16841 + QL_PRINT_3(ha, "started\n");
15676 16842
15677 - if (mem != NULL && mem->dma_handle != NULL) {
15678 - ql_unbind_dma_buffer(ha, mem);
16843 + if (mem != NULL) {
16844 + if (mem->memflags == DDI_DMA_MAPPED) {
16845 + ql_unbind_dma_buffer(ha, mem);
16846 + }
16847 +
15679 16848 switch (mem->type) {
15680 16849 case KERNEL_MEM:
15681 16850 if (mem->bp != NULL) {
15682 16851 kmem_free(mem->bp, mem->size);
16852 + mem->bp = NULL;
15683 16853 }
15684 16854 break;
15685 16855 case LITTLE_ENDIAN_DMA:
15686 16856 case BIG_ENDIAN_DMA:
15687 16857 case NO_SWAP_DMA:
15688 16858 if (mem->acc_handle != NULL) {
15689 16859 ddi_dma_mem_free(&mem->acc_handle);
15690 16860 mem->acc_handle = NULL;
16861 + mem->bp = NULL;
15691 16862 }
15692 16863 break;
15693 16864 default:
15694 16865 break;
15695 16866 }
15696 - mem->bp = NULL;
15697 - ddi_dma_free_handle(&mem->dma_handle);
15698 - mem->dma_handle = NULL;
16867 + if (mem->dma_handle != NULL) {
16868 + ddi_dma_free_handle(&mem->dma_handle);
16869 + mem->dma_handle = NULL;
16870 + }
15699 16871 }
15700 16872
15701 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16873 + QL_PRINT_3(ha, "done\n");
15702 16874 }
15703 16875
15704 16876 /*
15705 - * ql_alloc_dma_resouce.
15706 - * Allocates DMA resource for buffer.
15707 - *
15708 - * Input:
15709 - * ha: adapter state pointer.
15710 - * mem: pointer to dma memory object.
15711 - * sleep: KM_SLEEP/KM_NOSLEEP flag.
15712 - * mem->cookie_count number of segments allowed.
15713 - * mem->type memory allocation type.
15714 - * mem->size memory size.
15715 - * mem->bp pointer to memory or struct buf
15716 - *
15717 - * Returns:
15718 - * qn local function return status code.
15719 - *
15720 - * Context:
15721 - * Kernel context.
15722 - */
15723 -int
15724 -ql_alloc_dma_resouce(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15725 -{
15726 - ddi_dma_attr_t dma_attr;
15727 -
15728 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15729 -
15730 - dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15731 - ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15732 - dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15733 -
15734 - /*
15735 - * Allocate DMA handle for command.
15736 - */
15737 - if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15738 - DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15739 - DDI_SUCCESS) {
15740 - EL(ha, "failed, ddi_dma_alloc_handle\n");
15741 - mem->dma_handle = NULL;
15742 - return (QL_MEMORY_ALLOC_FAILED);
15743 - }
15744 -
15745 - mem->flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
15746 -
15747 - if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15748 - EL(ha, "failed, bind_dma_buffer\n");
15749 - ddi_dma_free_handle(&mem->dma_handle);
15750 - mem->dma_handle = NULL;
15751 - return (QL_MEMORY_ALLOC_FAILED);
15752 - }
15753 -
15754 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15755 -
15756 - return (QL_SUCCESS);
15757 -}
15758 -
15759 -/*
15760 - * ql_free_dma_resource
15761 - * Frees DMA resources.
15762 - *
15763 - * Input:
15764 - * ha: adapter state pointer.
15765 - * mem: pointer to dma memory object.
15766 - * mem->dma_handle DMA memory handle.
15767 - *
15768 - * Context:
15769 - * Kernel context.
15770 - */
15771 -void
15772 -ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
15773 -{
15774 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15775 -
15776 - ql_free_phys(ha, mem);
15777 -
15778 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15779 -}
15780 -
15781 -/*
15782 16877 * ql_bind_dma_buffer
15783 16878 * Binds DMA buffer.
15784 16879 *
15785 16880 * Input:
15786 16881 * ha: adapter state pointer.
15787 16882 * mem: pointer to dma memory object.
15788 - * sleep: KM_SLEEP or KM_NOSLEEP.
16883 + * kmflags: KM_SLEEP or KM_NOSLEEP.
15789 16884 * mem->dma_handle DMA memory handle.
15790 - * mem->cookie_count number of segments allowed.
16885 + * mem->max_cookie_count number of segments allowed.
15791 16886 * mem->type memory allocation type.
15792 16887 * mem->size memory size.
15793 16888 * mem->bp pointer to memory or struct buf
15794 16889 *
15795 16890 * Returns:
15796 16891 * mem->cookies pointer to list of cookies.
15797 16892 * mem->cookie_count number of cookies.
15798 16893 * status success = DDI_DMA_MAPPED
15799 16894 * DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
15800 16895 * DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
15801 16896 * DDI_DMA_TOOBIG
15802 16897 *
15803 16898 * Context:
15804 16899 * Kernel context.
15805 16900 */
15806 16901 static int
15807 -ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
16902 +ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int kmflags)
15808 16903 {
15809 - int rval;
15810 16904 ddi_dma_cookie_t *cookiep;
15811 - uint32_t cnt = mem->cookie_count;
16905 + uint32_t cnt;
15812 16906
15813 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16907 + QL_PRINT_3(ha, "started\n");
15814 16908
15815 - if (mem->type == STRUCT_BUF_MEMORY) {
15816 - rval = ddi_dma_buf_bind_handle(mem->dma_handle, mem->bp,
15817 - mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15818 - DDI_DMA_DONTWAIT, NULL, &mem->cookie, &mem->cookie_count);
15819 - } else {
15820 - rval = ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
15821 - mem->size, mem->flags, (sleep == KM_SLEEP) ?
15822 - DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
15823 - &mem->cookie_count);
15824 - }
16909 + mem->memflags = ddi_dma_addr_bind_handle(mem->dma_handle, NULL,
16910 + mem->bp, mem->size, mem->flags, (kmflags == KM_SLEEP) ?
16911 + DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
16912 + &mem->cookie_count);
15825 16913
15826 - if (rval == DDI_DMA_MAPPED) {
15827 - if (mem->cookie_count > cnt) {
16914 + if (mem->memflags == DDI_DMA_MAPPED) {
16915 + if (mem->cookie_count > mem->max_cookie_count) {
15828 16916 (void) ddi_dma_unbind_handle(mem->dma_handle);
15829 16917 EL(ha, "failed, cookie_count %d > %d\n",
15830 - mem->cookie_count, cnt);
15831 - rval = DDI_DMA_TOOBIG;
16918 + mem->cookie_count, mem->max_cookie_count);
16919 + mem->memflags = (uint32_t)DDI_DMA_TOOBIG;
15832 16920 } else {
15833 16921 if (mem->cookie_count > 1) {
15834 16922 if (mem->cookies = kmem_zalloc(
15835 16923 sizeof (ddi_dma_cookie_t) *
15836 - mem->cookie_count, sleep)) {
16924 + mem->cookie_count, kmflags)) {
15837 16925 *mem->cookies = mem->cookie;
15838 16926 cookiep = mem->cookies;
15839 16927 for (cnt = 1; cnt < mem->cookie_count;
15840 16928 cnt++) {
15841 16929 ddi_dma_nextcookie(
15842 16930 mem->dma_handle,
15843 16931 ++cookiep);
15844 16932 }
15845 16933 } else {
15846 16934 (void) ddi_dma_unbind_handle(
15847 16935 mem->dma_handle);
15848 16936 EL(ha, "failed, kmem_zalloc\n");
15849 - rval = DDI_DMA_NORESOURCES;
16937 + mem->memflags = (uint32_t)
16938 + DDI_DMA_NORESOURCES;
15850 16939 }
15851 16940 } else {
15852 16941 /*
15853 16942 * It has been reported that dmac_size at times
15854 16943 * may be incorrect on sparc machines so for
15855 16944 * sparc machines that only have one segment
15856 16945 * use the buffer size instead.
15857 16946 */
15858 16947 mem->cookies = &mem->cookie;
15859 16948 mem->cookies->dmac_size = mem->size;
15860 16949 }
15861 16950 }
15862 16951 }
15863 16952
15864 - if (rval != DDI_DMA_MAPPED) {
15865 - EL(ha, "failed=%xh\n", rval);
16953 + if (mem->memflags != DDI_DMA_MAPPED) {
16954 + EL(ha, "failed=%xh\n", mem->memflags);
15866 16955 } else {
15867 16956 /*EMPTY*/
15868 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16957 + QL_PRINT_3(ha, "done\n");
15869 16958 }
15870 16959
15871 - return (rval);
16960 + return (mem->memflags);
15872 16961 }
15873 16962
15874 16963 /*
15875 16964 * ql_unbind_dma_buffer
15876 16965 * Unbinds DMA buffer.
15877 16966 *
15878 16967 * Input:
15879 16968 * ha: adapter state pointer.
15880 16969 * mem: pointer to dma memory object.
15881 16970 * mem->dma_handle DMA memory handle.
15882 16971 * mem->cookies pointer to cookie list.
15883 16972 * mem->cookie_count number of cookies.
15884 16973 *
15885 16974 * Context:
15886 16975 * Kernel context.
15887 16976 */
15888 16977 /* ARGSUSED */
15889 16978 static void
15890 16979 ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
15891 16980 {
15892 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16981 + QL_PRINT_3(ha, "started\n");
15893 16982
15894 - (void) ddi_dma_unbind_handle(mem->dma_handle);
16983 + if (mem->dma_handle != NULL && mem->memflags == DDI_DMA_MAPPED) {
16984 + (void) ddi_dma_unbind_handle(mem->dma_handle);
16985 + }
15895 16986 if (mem->cookie_count > 1) {
15896 16987 kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
15897 16988 mem->cookie_count);
15898 16989 mem->cookies = NULL;
15899 16990 }
15900 16991 mem->cookie_count = 0;
16992 + mem->memflags = (uint32_t)DDI_DMA_NORESOURCES;
15901 16993
15902 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16994 + QL_PRINT_3(ha, "done\n");
15903 16995 }
15904 16996
15905 16997 static int
15906 16998 ql_suspend_adapter(ql_adapter_state_t *ha)
15907 16999 {
15908 - clock_t timer = 32 * drv_usectohz(1000000);
17000 + clock_t timer = (clock_t)(32 * drv_usectohz(1000000));
15909 17001
15910 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17002 + QL_PRINT_3(ha, "started\n");
15911 17003
17004 + (void) ql_wait_outstanding(ha);
17005 +
15912 17006 /*
17007 + * here we are sure that there will not be any mbox interrupt.
17008 + * So, let's make sure that we return back all the outstanding
17009 + * cmds as well as internally queued commands.
17010 + */
17011 + ql_halt(ha, PM_LEVEL_D0);
17012 +
17013 + /*
15913 17014 * First we will claim mbox ownership so that no
15914 17015 * thread using mbox hangs when we disable the
15915 17016 * interrupt in the middle of it.
15916 17017 */
15917 17018 MBX_REGISTER_LOCK(ha);
15918 17019
15919 17020 /* Check for mailbox available, if not wait for signal. */
15920 17021 while (ha->mailbox_flags & MBX_BUSY_FLG) {
15921 17022 ha->mailbox_flags = (uint8_t)
15922 17023 (ha->mailbox_flags | MBX_WANT_FLG);
15923 17024
15924 17025 /* 30 seconds from now */
15925 17026 if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
15926 17027 timer, TR_CLOCK_TICK) == -1) {
15927 17028
15928 17029 /* Release mailbox register lock. */
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
15929 17030 MBX_REGISTER_UNLOCK(ha);
15930 17031 EL(ha, "failed, Suspend mbox");
15931 17032 return (QL_FUNCTION_TIMEOUT);
15932 17033 }
15933 17034 }
15934 17035
15935 17036 /* Set busy flag. */
15936 17037 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
15937 17038 MBX_REGISTER_UNLOCK(ha);
15938 17039
15939 - (void) ql_wait_outstanding(ha);
15940 -
15941 - /*
15942 - * here we are sure that there will not be any mbox interrupt.
15943 - * So, let's make sure that we return back all the outstanding
15944 - * cmds as well as internally queued commands.
15945 - */
15946 - ql_halt(ha, PM_LEVEL_D0);
15947 -
15948 17040 if (ha->power_level != PM_LEVEL_D3) {
15949 17041 /* Disable ISP interrupts. */
15950 - WRT16_IO_REG(ha, ictrl, 0);
17042 + ql_disable_intr(ha);
15951 17043 }
15952 17044
15953 - ADAPTER_STATE_LOCK(ha);
15954 - ha->flags &= ~INTERRUPTS_ENABLED;
15955 - ADAPTER_STATE_UNLOCK(ha);
15956 -
15957 17045 MBX_REGISTER_LOCK(ha);
15958 17046 /* Reset busy status. */
15959 17047 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
15960 17048
15961 17049 /* If thread is waiting for mailbox go signal it to start. */
15962 17050 if (ha->mailbox_flags & MBX_WANT_FLG) {
15963 17051 ha->mailbox_flags = (uint8_t)
15964 17052 (ha->mailbox_flags & ~MBX_WANT_FLG);
15965 17053 cv_broadcast(&ha->cv_mbx_wait);
15966 17054 }
15967 17055 /* Release mailbox register lock. */
15968 17056 MBX_REGISTER_UNLOCK(ha);
15969 17057
15970 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17058 + QL_PRINT_3(ha, "done\n");
15971 17059
15972 17060 return (QL_SUCCESS);
15973 17061 }
15974 17062
15975 17063 /*
15976 17064 * ql_add_link_b
15977 17065 * Add link to the end of the chain.
15978 17066 *
15979 17067 * Input:
15980 17068 * head = Head of link list.
15981 17069 * link = link to be added.
15982 17070 * LOCK must be already obtained.
15983 17071 *
15984 17072 * Context:
15985 17073 * Interrupt or Kernel context, no mailbox commands allowed.
15986 17074 */
15987 17075 void
15988 17076 ql_add_link_b(ql_head_t *head, ql_link_t *link)
15989 17077 {
17078 + if (link->head != NULL) {
17079 + EL(NULL, "link in use by list=%ph\n", link->head);
17080 + }
17081 +
15990 17082 /* at the end there isn't a next */
15991 17083 link->next = NULL;
15992 17084
15993 17085 if ((link->prev = head->last) == NULL) {
15994 17086 head->first = link;
15995 17087 } else {
15996 17088 head->last->next = link;
15997 17089 }
15998 17090
15999 17091 head->last = link;
16000 17092 link->head = head; /* the queue we're on */
16001 17093 }
16002 17094
16003 17095 /*
16004 17096 * ql_add_link_t
16005 17097 * Add link to the beginning of the chain.
16006 17098 *
16007 17099 * Input:
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
16008 17100 * head = Head of link list.
16009 17101 * link = link to be added.
16010 17102 * LOCK must be already obtained.
16011 17103 *
16012 17104 * Context:
16013 17105 * Interrupt or Kernel context, no mailbox commands allowed.
16014 17106 */
16015 17107 void
16016 17108 ql_add_link_t(ql_head_t *head, ql_link_t *link)
16017 17109 {
17110 + if (link->head != NULL) {
17111 + EL(NULL, "link in use by list=%ph\n", link->head);
17112 + }
16018 17113 link->prev = NULL;
16019 17114
16020 17115 if ((link->next = head->first) == NULL) {
16021 17116 head->last = link;
16022 17117 } else {
16023 17118 head->first->prev = link;
16024 17119 }
16025 17120
16026 17121 head->first = link;
16027 17122 link->head = head; /* the queue we're on */
16028 17123 }
16029 17124
16030 17125 /*
16031 17126 * ql_remove_link
16032 17127 * Remove a link from the chain.
16033 17128 *
16034 17129 * Input:
16035 17130 * head = Head of link list.
16036 17131 * link = link to be removed.
16037 - * LOCK must be already obtained.
17132 + * associated proper LOCK must be already obtained.
16038 17133 *
16039 17134 * Context:
16040 17135 * Interrupt or Kernel context, no mailbox commands allowed.
16041 17136 */
16042 17137 void
16043 17138 ql_remove_link(ql_head_t *head, ql_link_t *link)
16044 17139 {
16045 - if (link->prev != NULL) {
16046 - if ((link->prev->next = link->next) == NULL) {
16047 - head->last = link->prev;
17140 + if (head != NULL) {
17141 + if (link->prev != NULL) {
17142 + if ((link->prev->next = link->next) == NULL) {
17143 + head->last = link->prev;
17144 + } else {
17145 + link->next->prev = link->prev;
17146 + }
17147 + } else if ((head->first = link->next) == NULL) {
17148 + head->last = NULL;
16048 17149 } else {
16049 - link->next->prev = link->prev;
17150 + head->first->prev = NULL;
16050 17151 }
16051 - } else if ((head->first = link->next) == NULL) {
16052 - head->last = NULL;
16053 - } else {
16054 - head->first->prev = NULL;
16055 - }
16056 17152
16057 - /* not on a queue any more */
16058 - link->prev = link->next = NULL;
16059 - link->head = NULL;
17153 + /* not on a queue any more */
17154 + link->prev = link->next = NULL;
17155 + link->head = NULL;
17156 + }
16060 17157 }
16061 17158
16062 17159 /*
16063 17160 * ql_chg_endian
16064 17161 * Change endianess of byte array.
16065 17162 *
16066 17163 * Input:
16067 17164 * buf = array pointer.
16068 17165 * size = size of array in bytes.
16069 17166 *
16070 17167 * Context:
16071 17168 * Interrupt or Kernel context, no mailbox commands allowed.
16072 17169 */
16073 17170 void
16074 17171 ql_chg_endian(uint8_t buf[], size_t size)
16075 17172 {
16076 17173 uint8_t byte;
16077 - size_t cnt1;
16078 - size_t cnt;
17174 + size_t cnt1;
17175 + size_t cnt;
16079 17176
16080 17177 cnt1 = size - 1;
16081 17178 for (cnt = 0; cnt < size / 2; cnt++) {
16082 17179 byte = buf[cnt1];
16083 17180 buf[cnt1] = buf[cnt];
16084 17181 buf[cnt] = byte;
16085 17182 cnt1--;
16086 17183 }
16087 17184 }
16088 17185
16089 17186 /*
16090 17187 * ql_bstr_to_dec
16091 17188 * Convert decimal byte string to number.
16092 17189 *
16093 17190 * Input:
16094 17191 * s: byte string pointer.
16095 17192 * ans: interger pointer for number.
16096 17193 * size: number of ascii bytes.
16097 17194 *
16098 17195 * Returns:
16099 17196 * success = number of ascii bytes processed.
16100 17197 *
16101 17198 * Context:
16102 17199 * Kernel/Interrupt context.
16103 17200 */
16104 17201 static int
16105 17202 ql_bstr_to_dec(char *s, uint32_t *ans, uint32_t size)
16106 17203 {
16107 17204 int mul, num, cnt, pos;
16108 17205 char *str;
16109 17206
16110 17207 /* Calculate size of number. */
16111 17208 if (size == 0) {
16112 17209 for (str = s; *str >= '0' && *str <= '9'; str++) {
16113 17210 size++;
16114 17211 }
16115 17212 }
16116 17213
16117 17214 *ans = 0;
16118 17215 for (cnt = 0; *s != '\0' && size; size--, cnt++) {
16119 17216 if (*s >= '0' && *s <= '9') {
16120 17217 num = *s++ - '0';
16121 17218 } else {
16122 17219 break;
16123 17220 }
16124 17221
16125 17222 for (mul = 1, pos = 1; pos < size; pos++) {
16126 17223 mul *= 10;
16127 17224 }
16128 17225 *ans += num * mul;
16129 17226 }
16130 17227
16131 17228 return (cnt);
16132 17229 }
16133 17230
16134 17231 /*
16135 17232 * ql_delay
16136 17233 * Calls delay routine if threads are not suspended, otherwise, busy waits
16137 17234 * Minimum = 1 tick = 10ms
|
↓ open down ↓ |
49 lines elided |
↑ open up ↑ |
16138 17235 *
16139 17236 * Input:
16140 17237 * dly = delay time in microseconds.
16141 17238 *
16142 17239 * Context:
16143 17240 * Kernel or Interrupt context, no mailbox commands allowed.
16144 17241 */
16145 17242 void
16146 17243 ql_delay(ql_adapter_state_t *ha, clock_t usecs)
16147 17244 {
16148 - if (QL_DAEMON_SUSPENDED(ha) || ddi_in_panic()) {
17245 + if (ha->flags & ADAPTER_SUSPENDED || ddi_in_panic() ||
17246 + curthread->t_flag & T_INTR_THREAD) {
16149 17247 drv_usecwait(usecs);
16150 17248 } else {
16151 17249 delay(drv_usectohz(usecs));
16152 17250 }
16153 17251 }
16154 17252
16155 17253 /*
16156 17254 * ql_stall_drv
16157 17255 * Stalls one or all driver instances, waits for 30 seconds.
16158 17256 *
16159 17257 * Input:
16160 17258 * ha: adapter state pointer or NULL for all.
16161 17259 * options: BIT_0 --> leave driver stalled on exit if
16162 17260 * failed.
16163 17261 *
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
16164 17262 * Returns:
16165 17263 * ql local function return status code.
16166 17264 *
16167 17265 * Context:
16168 17266 * Kernel context.
16169 17267 */
16170 17268 int
16171 17269 ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
16172 17270 {
16173 17271 ql_link_t *link;
16174 - ql_adapter_state_t *ha2;
17272 + ql_adapter_state_t *ha2 = NULL;
16175 17273 uint32_t timer;
16176 17274
16177 - QL_PRINT_3(CE_CONT, "started\n");
17275 + QL_PRINT_3(ha, "started\n");
16178 17276
16179 - /* Wait for 30 seconds for daemons unstall. */
16180 - timer = 3000;
17277 + /* Tell all daemons to stall. */
16181 17278 link = ha == NULL ? ql_hba.first : &ha->hba;
16182 - while (link != NULL && timer) {
17279 + while (link != NULL) {
16183 17280 ha2 = link->base_address;
16184 17281
16185 17282 ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
16186 17283
17284 + link = ha == NULL ? link->next : NULL;
17285 + }
17286 +
17287 + /* Wait for 30 seconds for daemons stall. */
17288 + timer = 3000;
17289 + link = ha == NULL ? ql_hba.first : &ha->hba;
17290 + while (link != NULL && timer) {
17291 + ha2 = link->base_address;
17292 +
16187 17293 if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
16188 17294 (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
17295 + (ha2->task_daemon_flags & FIRMWARE_UP) == 0 ||
16189 17296 (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
16190 - ql_wait_outstanding(ha2) == MAX_OUTSTANDING_COMMANDS)) {
17297 + ql_wait_outstanding(ha2) == ha2->pha->osc_max_cnt)) {
16191 17298 link = ha == NULL ? link->next : NULL;
16192 17299 continue;
16193 17300 }
16194 17301
17302 + QL_PRINT_2(ha2, "status, dtf=%xh, stf=%xh\n",
17303 + ha2->task_daemon_flags, ha2->flags);
17304 +
16195 17305 ql_delay(ha2, 10000);
16196 17306 timer--;
16197 17307 link = ha == NULL ? ql_hba.first : &ha->hba;
16198 17308 }
16199 17309
16200 17310 if (ha2 != NULL && timer == 0) {
16201 17311 EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
16202 17312 ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
16203 17313 "unstalled"));
16204 17314 if (options & BIT_0) {
16205 17315 ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
16206 17316 }
16207 17317 return (QL_FUNCTION_TIMEOUT);
16208 17318 }
16209 17319
16210 - QL_PRINT_3(CE_CONT, "done\n");
17320 + QL_PRINT_3(ha, "done\n");
16211 17321
16212 17322 return (QL_SUCCESS);
16213 17323 }
16214 17324
16215 17325 /*
16216 17326 * ql_restart_driver
16217 17327 * Restarts one or all driver instances.
16218 17328 *
16219 17329 * Input:
16220 17330 * ha: adapter state pointer or NULL for all.
16221 17331 *
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
16222 17332 * Context:
16223 17333 * Kernel context.
16224 17334 */
16225 17335 void
16226 17336 ql_restart_driver(ql_adapter_state_t *ha)
16227 17337 {
16228 17338 ql_link_t *link;
16229 17339 ql_adapter_state_t *ha2;
16230 17340 uint32_t timer;
16231 17341
16232 - QL_PRINT_3(CE_CONT, "started\n");
17342 + QL_PRINT_3(ha, "started\n");
16233 17343
16234 17344 /* Tell all daemons to unstall. */
16235 17345 link = ha == NULL ? ql_hba.first : &ha->hba;
16236 17346 while (link != NULL) {
16237 17347 ha2 = link->base_address;
16238 17348
16239 17349 ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
16240 17350
16241 17351 link = ha == NULL ? link->next : NULL;
16242 17352 }
16243 17353
16244 17354 /* Wait for 30 seconds for all daemons unstall. */
16245 17355 timer = 3000;
16246 17356 link = ha == NULL ? ql_hba.first : &ha->hba;
16247 17357 while (link != NULL && timer) {
16248 17358 ha2 = link->base_address;
16249 17359
16250 17360 if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
16251 17361 (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
16252 17362 (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
16253 - QL_PRINT_2(CE_CONT, "(%d,%d): restarted\n",
16254 - ha2->instance, ha2->vp_index);
17363 + QL_PRINT_2(ha2, "restarted\n");
16255 17364 ql_restart_queues(ha2);
16256 17365 link = ha == NULL ? link->next : NULL;
16257 17366 continue;
16258 17367 }
16259 17368
16260 - QL_PRINT_2(CE_CONT, "(%d,%d): failed, tdf=%xh\n",
16261 - ha2->instance, ha2->vp_index, ha2->task_daemon_flags);
17369 + QL_PRINT_2(ha2, "status, tdf=%xh\n", ha2->task_daemon_flags);
16262 17370
16263 17371 ql_delay(ha2, 10000);
16264 17372 timer--;
16265 17373 link = ha == NULL ? ql_hba.first : &ha->hba;
16266 17374 }
16267 17375
16268 - QL_PRINT_3(CE_CONT, "done\n");
17376 + QL_PRINT_3(ha, "done\n");
16269 17377 }
16270 17378
16271 17379 /*
16272 17380 * ql_setup_interrupts
16273 17381 * Sets up interrupts based on the HBA's and platform's
16274 17382 * capabilities (e.g., legacy / MSI / FIXED).
16275 17383 *
16276 17384 * Input:
16277 17385 * ha = adapter state pointer.
16278 17386 *
16279 17387 * Returns:
16280 17388 * DDI_SUCCESS or DDI_FAILURE.
16281 17389 *
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
16282 17390 * Context:
16283 17391 * Kernel context.
16284 17392 */
16285 17393 static int
16286 17394 ql_setup_interrupts(ql_adapter_state_t *ha)
16287 17395 {
16288 17396 int32_t rval = DDI_FAILURE;
16289 17397 int32_t i;
16290 17398 int32_t itypes = 0;
16291 17399
16292 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17400 + QL_PRINT_3(ha, "started\n");
16293 17401
16294 17402 /*
16295 17403 * The Solaris Advanced Interrupt Functions (aif) are only
16296 17404 * supported on s10U1 or greater.
16297 17405 */
16298 17406 if (ql_os_release_level < 10 || ql_disable_aif != 0) {
16299 17407 EL(ha, "interrupt framework is not supported or is "
16300 17408 "disabled, using legacy\n");
16301 17409 return (ql_legacy_intr(ha));
16302 17410 } else if (ql_os_release_level == 10) {
16303 17411 /*
16304 17412 * See if the advanced interrupt functions (aif) are
16305 17413 * in the kernel
16306 17414 */
16307 17415 void *fptr = (void *)&ddi_intr_get_supported_types;
16308 17416
16309 17417 if (fptr == NULL) {
16310 17418 EL(ha, "aif is not supported, using legacy "
16311 17419 "interrupts (rev)\n");
16312 17420 return (ql_legacy_intr(ha));
16313 17421 }
16314 17422 }
16315 17423
16316 17424 /* See what types of interrupts this HBA and platform support */
16317 17425 if ((i = ddi_intr_get_supported_types(ha->dip, &itypes)) !=
16318 17426 DDI_SUCCESS) {
16319 17427 EL(ha, "get supported types failed, rval=%xh, "
16320 17428 "assuming FIXED\n", i);
16321 17429 itypes = DDI_INTR_TYPE_FIXED;
16322 17430 }
16323 17431
16324 17432 EL(ha, "supported types are: %xh\n", itypes);
16325 17433
16326 17434 if ((itypes & DDI_INTR_TYPE_MSIX) &&
16327 17435 (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
16328 17436 EL(ha, "successful MSI-X setup\n");
|
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
16329 17437 } else if ((itypes & DDI_INTR_TYPE_MSI) &&
16330 17438 (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
16331 17439 EL(ha, "successful MSI setup\n");
16332 17440 } else {
16333 17441 rval = ql_setup_fixed(ha);
16334 17442 }
16335 17443
16336 17444 if (rval != DDI_SUCCESS) {
16337 17445 EL(ha, "failed, aif, rval=%xh\n", rval);
16338 17446 } else {
16339 - /*EMPTY*/
16340 - QL_PRINT_3(CE_CONT, "(%d): done\n");
17447 + /* Setup mutexes */
17448 + if ((rval = ql_init_mutex(ha)) != DDI_SUCCESS) {
17449 + EL(ha, "failed, mutex init ret=%xh\n", rval);
17450 + ql_release_intr(ha);
17451 + }
17452 + QL_PRINT_3(ha, "done\n");
16341 17453 }
16342 17454
16343 17455 return (rval);
16344 17456 }
16345 17457
16346 17458 /*
16347 17459 * ql_setup_msi
16348 17460 * Set up aif MSI interrupts
16349 17461 *
16350 17462 * Input:
16351 17463 * ha = adapter state pointer.
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
16352 17464 *
16353 17465 * Returns:
16354 17466 * DDI_SUCCESS or DDI_FAILURE.
16355 17467 *
16356 17468 * Context:
16357 17469 * Kernel context.
16358 17470 */
16359 17471 static int
16360 17472 ql_setup_msi(ql_adapter_state_t *ha)
16361 17473 {
17474 + uint_t i;
16362 17475 int32_t count = 0;
16363 17476 int32_t avail = 0;
16364 17477 int32_t actual = 0;
16365 17478 int32_t msitype = DDI_INTR_TYPE_MSI;
16366 17479 int32_t ret;
16367 - ql_ifunc_t itrfun[10] = {0};
16368 17480
16369 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17481 + QL_PRINT_3(ha, "started\n");
16370 17482
16371 17483 if (ql_disable_msi != 0) {
16372 17484 EL(ha, "MSI is disabled by user\n");
16373 17485 return (DDI_FAILURE);
16374 17486 }
16375 17487
16376 17488 /* MSI support is only suported on 24xx HBA's. */
16377 - if (!(CFG_IST(ha, CFG_CTRL_24258081))) {
17489 + if (!CFG_IST(ha, CFG_MSI_SUPPORT)) {
16378 17490 EL(ha, "HBA does not support MSI\n");
16379 17491 return (DDI_FAILURE);
16380 17492 }
16381 17493
16382 17494 /* Get number of MSI interrupts the system supports */
16383 17495 if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16384 17496 DDI_SUCCESS) || count == 0) {
16385 17497 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16386 17498 return (DDI_FAILURE);
16387 17499 }
16388 17500
16389 17501 /* Get number of available MSI interrupts */
16390 17502 if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16391 17503 DDI_SUCCESS) || avail == 0) {
16392 17504 EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16393 17505 return (DDI_FAILURE);
16394 17506 }
16395 17507
16396 17508 /* MSI requires only 1. */
16397 17509 count = 1;
16398 - itrfun[0].ifunc = &ql_isr_aif;
16399 17510
16400 17511 /* Allocate space for interrupt handles */
16401 17512 ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16402 17513 ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16403 17514
16404 17515 ha->iflags |= IFLG_INTR_MSI;
16405 17516
16406 17517 /* Allocate the interrupts */
16407 17518 if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
16408 17519 &actual, 0)) != DDI_SUCCESS || actual < count) {
16409 17520 EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16410 17521 "actual=%xh\n", ret, count, actual);
16411 17522 ql_release_intr(ha);
16412 17523 return (DDI_FAILURE);
16413 17524 }
16414 -
16415 17525 ha->intr_cnt = actual;
16416 17526
16417 17527 /* Get interrupt priority */
16418 - if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16419 - DDI_SUCCESS) {
17528 + if ((ret = ddi_intr_get_pri(ha->htable[0], &i)) != DDI_SUCCESS) {
16420 17529 EL(ha, "failed, get_pri ret=%xh\n", ret);
16421 17530 ql_release_intr(ha);
16422 17531 return (ret);
16423 17532 }
17533 + ha->intr_pri = DDI_INTR_PRI(i);
16424 17534
16425 17535 /* Add the interrupt handler */
16426 - if ((ret = ddi_intr_add_handler(ha->htable[0], itrfun[0].ifunc,
17536 + if ((ret = ddi_intr_add_handler(ha->htable[0], ql_isr_aif,
16427 17537 (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
16428 17538 EL(ha, "failed, intr_add ret=%xh\n", ret);
16429 17539 ql_release_intr(ha);
16430 17540 return (ret);
16431 17541 }
16432 17542
16433 - /* Setup mutexes */
16434 - if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16435 - EL(ha, "failed, mutex init ret=%xh\n", ret);
16436 - ql_release_intr(ha);
16437 - return (ret);
16438 - }
16439 -
16440 17543 /* Get the capabilities */
16441 17544 (void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16442 17545
16443 17546 /* Enable interrupts */
16444 17547 if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16445 17548 if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16446 17549 DDI_SUCCESS) {
16447 17550 EL(ha, "failed, block enable, ret=%xh\n", ret);
16448 - ql_destroy_mutex(ha);
16449 17551 ql_release_intr(ha);
16450 17552 return (ret);
16451 17553 }
16452 17554 } else {
16453 - if ((ret = ddi_intr_enable(ha->htable[0])) != DDI_SUCCESS) {
16454 - EL(ha, "failed, intr enable, ret=%xh\n", ret);
16455 - ql_destroy_mutex(ha);
16456 - ql_release_intr(ha);
16457 - return (ret);
17555 + for (i = 0; i < actual; i++) {
17556 + if ((ret = ddi_intr_enable(ha->htable[i])) !=
17557 + DDI_SUCCESS) {
17558 + EL(ha, "failed, intr enable, ret=%xh\n", ret);
17559 + ql_release_intr(ha);
17560 + return (ret);
17561 + }
16458 17562 }
16459 17563 }
16460 17564
16461 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17565 + QL_PRINT_3(ha, "done\n");
16462 17566
16463 17567 return (DDI_SUCCESS);
16464 17568 }
16465 17569
16466 17570 /*
16467 17571 * ql_setup_msix
16468 17572 * Set up aif MSI-X interrupts
16469 17573 *
16470 17574 * Input:
16471 17575 * ha = adapter state pointer.
16472 17576 *
16473 17577 * Returns:
16474 17578 * DDI_SUCCESS or DDI_FAILURE.
16475 17579 *
16476 17580 * Context:
16477 17581 * Kernel context.
16478 17582 */
16479 17583 static int
16480 17584 ql_setup_msix(ql_adapter_state_t *ha)
16481 17585 {
16482 - uint16_t hwvect;
17586 + int hwvect;
16483 17587 int32_t count = 0;
16484 17588 int32_t avail = 0;
16485 17589 int32_t actual = 0;
16486 17590 int32_t msitype = DDI_INTR_TYPE_MSIX;
16487 17591 int32_t ret;
16488 - uint32_t i;
16489 - ql_ifunc_t itrfun[QL_MSIX_MAXAIF] = {0};
17592 + uint_t i;
16490 17593
16491 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17594 + QL_PRINT_3(ha, "started\n");
16492 17595
16493 17596 if (ql_disable_msix != 0) {
16494 17597 EL(ha, "MSI-X is disabled by user\n");
16495 17598 return (DDI_FAILURE);
16496 17599 }
16497 17600
17601 +#ifdef __x86
17602 + if (get_hwenv() == HW_VMWARE) {
17603 + EL(ha, "running under hypervisor, disabling MSI-X\n");
17604 + return (DDI_FAILURE);
17605 + }
17606 +#endif
17607 +
16498 17608 /*
16499 17609 * MSI-X support is only available on 24xx HBA's that have
16500 17610 * rev A2 parts (revid = 3) or greater.
16501 17611 */
16502 - if (!((ha->device_id == 0x2532) || (ha->device_id == 0x2432) ||
16503 - (ha->device_id == 0x8432) || (ha->device_id == 0x8001) ||
16504 - (ha->device_id == 0x8021))) {
17612 + if (CFG_IST(ha, CFG_ISP_FW_TYPE_1) ||
17613 + (CFG_IST(ha, CFG_CTRL_24XX) && ha->rev_id < 3)) {
16505 17614 EL(ha, "HBA does not support MSI-X\n");
16506 17615 return (DDI_FAILURE);
16507 17616 }
16508 17617
16509 - if (CFG_IST(ha, CFG_CTRL_2422) && (ha->rev_id < 3)) {
16510 - EL(ha, "HBA does not support MSI-X (revid)\n");
16511 - return (DDI_FAILURE);
16512 - }
16513 -
16514 17618 /* Per HP, these HP branded HBA's are not supported with MSI-X */
16515 17619 if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
16516 17620 ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
16517 17621 EL(ha, "HBA does not support MSI-X (subdevid)\n");
16518 17622 return (DDI_FAILURE);
16519 17623 }
16520 17624
16521 - /* Get the number of 24xx/25xx MSI-X h/w vectors */
16522 - hwvect = (uint16_t)(((CFG_IST(ha, CFG_CTRL_2422) ?
16523 - ql_pci_config_get16(ha, 0x7e) :
16524 - ql_pci_config_get16(ha, 0xa2)) & 0x3ff) + 1);
16525 -
16526 - EL(ha, "pcie config space hwvect = %d\n", hwvect);
16527 -
16528 - if (hwvect < QL_MSIX_MAXAIF) {
16529 - EL(ha, "failed, min h/w vectors req'd: %d, avail: %d\n",
16530 - QL_MSIX_MAXAIF, hwvect);
16531 - return (DDI_FAILURE);
16532 - }
16533 -
16534 17625 /* Get number of MSI-X interrupts the platform h/w supports */
16535 - if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16536 - DDI_SUCCESS) || count == 0) {
16537 - EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
17626 + if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &hwvect)) !=
17627 + DDI_SUCCESS) || hwvect == 0) {
17628 + EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, hwvect);
16538 17629 return (DDI_FAILURE);
16539 17630 }
17631 + QL_PRINT_10(ha, "ddi_intr_get_nintrs, hwvect=%d\n", hwvect);
16540 17632
16541 17633 /* Get number of available system interrupts */
16542 17634 if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16543 17635 DDI_SUCCESS) || avail == 0) {
16544 17636 EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16545 17637 return (DDI_FAILURE);
16546 17638 }
17639 + QL_PRINT_10(ha, "ddi_intr_get_navail, avail=%d\n", avail);
16547 17640
16548 17641 /* Fill out the intr table */
16549 - count = QL_MSIX_MAXAIF;
16550 - itrfun[QL_MSIX_AIF].ifunc = &ql_isr_aif;
16551 - itrfun[QL_MSIX_RSPQ].ifunc = &ql_isr_aif;
17642 + count = ha->interrupt_count;
17643 + if (ha->flags & MULTI_QUEUE && count < ha->mq_msix_vectors) {
17644 + count = ha->mq_msix_vectors;
17645 + /* don't exceed the h/w capability */
17646 + if (count > hwvect) {
17647 + count = hwvect;
17648 + }
17649 + }
16552 17650
16553 17651 /* Allocate space for interrupt handles */
16554 17652 ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
16555 - if ((ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP)) == NULL) {
16556 - ha->hsize = 0;
16557 - EL(ha, "failed, unable to allocate htable space\n");
16558 - return (DDI_FAILURE);
16559 - }
17653 + ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16560 17654
16561 17655 ha->iflags |= IFLG_INTR_MSIX;
16562 17656
16563 17657 /* Allocate the interrupts */
16564 17658 if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
16565 17659 DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
16566 - actual < QL_MSIX_MAXAIF) {
17660 + actual < ha->interrupt_count) {
16567 17661 EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16568 17662 "actual=%xh\n", ret, count, actual);
16569 17663 ql_release_intr(ha);
16570 17664 return (DDI_FAILURE);
16571 17665 }
16572 -
16573 17666 ha->intr_cnt = actual;
17667 + EL(ha, "min=%d, multi-q=%d, req=%d, rcv=%d\n",
17668 + ha->interrupt_count, ha->mq_msix_vectors, count,
17669 + ha->intr_cnt);
16574 17670
16575 17671 /* Get interrupt priority */
16576 - if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16577 - DDI_SUCCESS) {
17672 + if ((ret = ddi_intr_get_pri(ha->htable[0], &i)) != DDI_SUCCESS) {
16578 17673 EL(ha, "failed, get_pri ret=%xh\n", ret);
16579 17674 ql_release_intr(ha);
16580 17675 return (ret);
16581 17676 }
17677 + ha->intr_pri = DDI_INTR_PRI(i);
16582 17678
16583 17679 /* Add the interrupt handlers */
16584 17680 for (i = 0; i < actual; i++) {
16585 - if ((ret = ddi_intr_add_handler(ha->htable[i], itrfun[i].ifunc,
17681 + if ((ret = ddi_intr_add_handler(ha->htable[i], ql_isr_aif,
16586 17682 (void *)ha, (void *)((ulong_t)i))) != DDI_SUCCESS) {
16587 17683 EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
16588 17684 actual, ret);
16589 17685 ql_release_intr(ha);
16590 17686 return (ret);
16591 17687 }
16592 17688 }
16593 17689
16594 17690 /*
16595 17691 * duplicate the rest of the intr's
16596 17692 * ddi_intr_dup_handler() isn't working on x86 just yet...
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
16597 17693 */
16598 17694 #ifdef __sparc
16599 17695 for (i = actual; i < hwvect; i++) {
16600 17696 if ((ret = ddi_intr_dup_handler(ha->htable[0], (int)i,
16601 17697 &ha->htable[i])) != DDI_SUCCESS) {
16602 17698 EL(ha, "failed, intr_dup#=%xh, act=%xh, ret=%xh\n",
16603 17699 i, actual, ret);
16604 17700 ql_release_intr(ha);
16605 17701 return (ret);
16606 17702 }
17703 + if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
17704 + EL(ha, "failed, intr enable, ret=%xh\n", ret);
17705 + ql_release_intr(ha);
17706 + return (ret);
17707 + }
16607 17708 }
16608 17709 #endif
16609 17710
16610 - /* Setup mutexes */
16611 - if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16612 - EL(ha, "failed, mutex init ret=%xh\n", ret);
16613 - ql_release_intr(ha);
16614 - return (ret);
16615 - }
16616 -
16617 17711 /* Get the capabilities */
16618 17712 (void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16619 17713
16620 17714 /* Enable interrupts */
16621 17715 if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16622 - if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
17716 + if ((ret = ddi_intr_block_enable(ha->htable, actual)) !=
16623 17717 DDI_SUCCESS) {
16624 17718 EL(ha, "failed, block enable, ret=%xh\n", ret);
16625 - ql_destroy_mutex(ha);
16626 17719 ql_release_intr(ha);
16627 17720 return (ret);
16628 17721 }
17722 + QL_PRINT_10(ha, "intr_block_enable %d\n", actual);
16629 17723 } else {
16630 - for (i = 0; i < ha->intr_cnt; i++) {
17724 + for (i = 0; i < actual; i++) {
16631 17725 if ((ret = ddi_intr_enable(ha->htable[i])) !=
16632 17726 DDI_SUCCESS) {
16633 17727 EL(ha, "failed, intr enable, ret=%xh\n", ret);
16634 - ql_destroy_mutex(ha);
16635 17728 ql_release_intr(ha);
16636 17729 return (ret);
16637 17730 }
17731 + QL_PRINT_10(ha, "intr_enable %d\n", i);
16638 17732 }
16639 17733 }
16640 17734
16641 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17735 + QL_PRINT_3(ha, "done\n");
16642 17736
16643 17737 return (DDI_SUCCESS);
16644 17738 }
16645 17739
16646 17740 /*
16647 17741 * ql_setup_fixed
16648 17742 * Sets up aif FIXED interrupts
16649 17743 *
16650 17744 * Input:
16651 17745 * ha = adapter state pointer.
16652 17746 *
16653 17747 * Returns:
16654 17748 * DDI_SUCCESS or DDI_FAILURE.
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
16655 17749 *
16656 17750 * Context:
16657 17751 * Kernel context.
16658 17752 */
16659 17753 static int
16660 17754 ql_setup_fixed(ql_adapter_state_t *ha)
16661 17755 {
16662 17756 int32_t count = 0;
16663 17757 int32_t actual = 0;
16664 17758 int32_t ret;
16665 - uint32_t i;
17759 + uint_t i;
16666 17760
16667 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17761 + QL_PRINT_3(ha, "started\n");
16668 17762
17763 + if (ql_disable_intx != 0) {
17764 + EL(ha, "INT-X is disabled by user\n");
17765 + return (DDI_FAILURE);
17766 + }
17767 +
16669 17768 /* Get number of fixed interrupts the system supports */
16670 17769 if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
16671 17770 &count)) != DDI_SUCCESS) || count == 0) {
16672 17771 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16673 17772 return (DDI_FAILURE);
16674 17773 }
16675 17774
16676 - ha->iflags |= IFLG_INTR_FIXED;
16677 -
16678 17775 /* Allocate space for interrupt handles */
16679 17776 ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16680 17777 ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16681 17778
17779 + ha->iflags |= IFLG_INTR_FIXED;
17780 +
16682 17781 /* Allocate the interrupts */
16683 17782 if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
16684 17783 0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
16685 17784 actual < count) {
16686 17785 EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
16687 17786 "actual=%xh\n", ret, count, actual);
16688 17787 ql_release_intr(ha);
16689 17788 return (DDI_FAILURE);
16690 17789 }
16691 -
16692 17790 ha->intr_cnt = actual;
16693 17791
16694 17792 /* Get interrupt priority */
16695 - if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16696 - DDI_SUCCESS) {
17793 + if ((ret = ddi_intr_get_pri(ha->htable[0], &i)) != DDI_SUCCESS) {
16697 17794 EL(ha, "failed, get_pri ret=%xh\n", ret);
16698 17795 ql_release_intr(ha);
16699 17796 return (ret);
16700 17797 }
17798 + ha->intr_pri = DDI_INTR_PRI(i);
16701 17799
16702 17800 /* Add the interrupt handlers */
16703 - for (i = 0; i < ha->intr_cnt; i++) {
16704 - if ((ret = ddi_intr_add_handler(ha->htable[i], &ql_isr_aif,
17801 + for (i = 0; i < actual; i++) {
17802 + if ((ret = ddi_intr_add_handler(ha->htable[i], ql_isr_aif,
16705 17803 (void *)ha, (void *)((ulong_t)(i)))) != DDI_SUCCESS) {
16706 17804 EL(ha, "failed, intr_add ret=%xh\n", ret);
16707 17805 ql_release_intr(ha);
16708 17806 return (ret);
16709 17807 }
16710 17808 }
16711 17809
16712 - /* Setup mutexes */
16713 - if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16714 - EL(ha, "failed, mutex init ret=%xh\n", ret);
16715 - ql_release_intr(ha);
16716 - return (ret);
16717 - }
16718 -
16719 17810 /* Enable interrupts */
16720 - for (i = 0; i < ha->intr_cnt; i++) {
17811 + for (i = 0; i < actual; i++) {
16721 17812 if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
16722 17813 EL(ha, "failed, intr enable, ret=%xh\n", ret);
16723 - ql_destroy_mutex(ha);
16724 17814 ql_release_intr(ha);
16725 17815 return (ret);
16726 17816 }
16727 17817 }
16728 17818
16729 17819 EL(ha, "using FIXED interupts\n");
16730 17820
16731 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17821 + QL_PRINT_3(ha, "done\n");
16732 17822
16733 17823 return (DDI_SUCCESS);
16734 17824 }
16735 17825
16736 17826 /*
16737 - * ql_disable_intr
16738 - * Disables interrupts
16739 - *
16740 - * Input:
16741 - * ha = adapter state pointer.
16742 - *
16743 - * Returns:
16744 - *
16745 - * Context:
16746 - * Kernel context.
16747 - */
16748 -static void
16749 -ql_disable_intr(ql_adapter_state_t *ha)
16750 -{
16751 - uint32_t i, rval;
16752 -
16753 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16754 -
16755 - if (!(ha->iflags & IFLG_INTR_AIF)) {
16756 -
16757 - /* Disable legacy interrupts */
16758 - (void) ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
16759 -
16760 - } else if ((ha->intr_cap & DDI_INTR_FLAG_BLOCK) &&
16761 - (ha->iflags & (IFLG_INTR_MSI | IFLG_INTR_MSIX))) {
16762 -
16763 - /* Remove AIF block interrupts (MSI) */
16764 - if ((rval = ddi_intr_block_disable(ha->htable, ha->intr_cnt))
16765 - != DDI_SUCCESS) {
16766 - EL(ha, "failed intr block disable, rval=%x\n", rval);
16767 - }
16768 -
16769 - } else {
16770 -
16771 - /* Remove AIF non-block interrupts (fixed). */
16772 - for (i = 0; i < ha->intr_cnt; i++) {
16773 - if ((rval = ddi_intr_disable(ha->htable[i])) !=
16774 - DDI_SUCCESS) {
16775 - EL(ha, "failed intr disable, intr#=%xh, "
16776 - "rval=%xh\n", i, rval);
16777 - }
16778 - }
16779 - }
16780 -
16781 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16782 -}
16783 -
16784 -/*
16785 17827 * ql_release_intr
16786 17828 * Releases aif legacy interrupt resources
16787 17829 *
16788 17830 * Input:
16789 17831 * ha = adapter state pointer.
16790 17832 *
16791 17833 * Returns:
16792 17834 *
16793 17835 * Context:
16794 17836 * Kernel context.
16795 17837 */
16796 17838 static void
16797 17839 ql_release_intr(ql_adapter_state_t *ha)
16798 17840 {
16799 - int32_t i;
17841 + int32_t i, x;
16800 17842
16801 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17843 + QL_PRINT_3(ha, "started\n");
16802 17844
16803 17845 if (!(ha->iflags & IFLG_INTR_AIF)) {
16804 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16805 - return;
16806 - }
17846 + ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
17847 + } else {
17848 + ha->iflags &= ~(IFLG_INTR_AIF);
17849 + if (ha->htable != NULL && ha->hsize > 0) {
17850 + i = x = (int32_t)ha->hsize /
17851 + (int32_t)sizeof (ddi_intr_handle_t);
17852 + if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
17853 + (void) ddi_intr_block_disable(ha->htable,
17854 + ha->intr_cnt);
17855 + } else {
17856 + while (i-- > 0) {
17857 + if (ha->htable[i] == 0) {
17858 + EL(ha, "htable[%x]=0h\n", i);
17859 + continue;
17860 + }
16807 17861
16808 - ha->iflags &= ~(IFLG_INTR_AIF);
16809 - if (ha->htable != NULL && ha->hsize > 0) {
16810 - i = (int32_t)ha->hsize / (int32_t)sizeof (ddi_intr_handle_t);
16811 - while (i-- > 0) {
16812 - if (ha->htable[i] == 0) {
16813 - EL(ha, "htable[%x]=0h\n", i);
16814 - continue;
17862 + (void) ddi_intr_disable(ha->htable[i]);
17863 + }
16815 17864 }
16816 17865
16817 - (void) ddi_intr_disable(ha->htable[i]);
16818 -
16819 - if (i < ha->intr_cnt) {
16820 - (void) ddi_intr_remove_handler(ha->htable[i]);
17866 + i = x;
17867 + while (i-- > 0) {
17868 + if (i < ha->intr_cnt) {
17869 + (void) ddi_intr_remove_handler(
17870 + ha->htable[i]);
17871 + }
17872 + (void) ddi_intr_free(ha->htable[i]);
16821 17873 }
16822 17874
16823 - (void) ddi_intr_free(ha->htable[i]);
16824 - }
17875 + ha->intr_cnt = 0;
17876 + ha->intr_cap = 0;
16825 17877
16826 - kmem_free(ha->htable, ha->hsize);
16827 - ha->htable = NULL;
17878 + kmem_free(ha->htable, ha->hsize);
17879 + ha->htable = NULL;
17880 + ha->hsize = 0;
17881 + }
16828 17882 }
16829 17883
16830 - ha->hsize = 0;
16831 - ha->intr_cnt = 0;
16832 - ha->intr_pri = 0;
16833 - ha->intr_cap = 0;
17884 + ha->intr_pri = NULL;
16834 17885
16835 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17886 + QL_PRINT_3(ha, "done\n");
16836 17887 }
16837 17888
16838 17889 /*
16839 17890 * ql_legacy_intr
16840 17891 * Sets up legacy interrupts.
16841 17892 *
16842 17893 * NB: Only to be used if AIF (Advanced Interupt Framework)
16843 17894 * if NOT in the kernel.
16844 17895 *
16845 17896 * Input:
16846 17897 * ha = adapter state pointer.
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
16847 17898 *
16848 17899 * Returns:
16849 17900 * DDI_SUCCESS or DDI_FAILURE.
16850 17901 *
16851 17902 * Context:
16852 17903 * Kernel context.
16853 17904 */
16854 17905 static int
16855 17906 ql_legacy_intr(ql_adapter_state_t *ha)
16856 17907 {
16857 - int rval = DDI_SUCCESS;
17908 + int rval;
16858 17909
16859 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17910 + QL_PRINT_3(ha, "started\n");
16860 17911
16861 - /* Setup mutexes */
16862 - if (ql_init_mutex(ha) != DDI_SUCCESS) {
16863 - EL(ha, "failed, mutex init\n");
16864 - return (DDI_FAILURE);
17912 + /* Get iblock cookies to initialize mutexes */
17913 + if ((rval = ddi_get_iblock_cookie(ha->dip, 0, &ha->iblock_cookie)) !=
17914 + DDI_SUCCESS) {
17915 + EL(ha, "failed, get_iblock: %xh\n", rval);
17916 + return (rval);
16865 17917 }
17918 + ha->intr_pri = (void *)ha->iblock_cookie;
16866 17919
16867 17920 /* Setup standard/legacy interrupt handler */
16868 17921 if (ddi_add_intr(ha->dip, (uint_t)0, &ha->iblock_cookie,
16869 17922 (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
16870 17923 cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
16871 17924 QL_NAME, ha->instance);
16872 - ql_destroy_mutex(ha);
16873 - rval = DDI_FAILURE;
17925 + return (rval);
16874 17926 }
17927 + ha->iflags |= IFLG_INTR_LEGACY;
16875 17928
16876 - if (rval == DDI_SUCCESS) {
16877 - ha->iflags |= IFLG_INTR_LEGACY;
17929 + /* Setup mutexes */
17930 + if ((rval = ql_init_mutex(ha)) != DDI_SUCCESS) {
17931 + EL(ha, "failed, mutex init ret=%xh\n", rval);
17932 + ql_release_intr(ha);
17933 + } else {
16878 17934 EL(ha, "using legacy interrupts\n");
16879 17935 }
16880 -
16881 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16882 -
16883 17936 return (rval);
16884 17937 }
16885 17938
16886 17939 /*
16887 17940 * ql_init_mutex
16888 17941 * Initializes mutex's
16889 17942 *
16890 17943 * Input:
16891 17944 * ha = adapter state pointer.
16892 17945 *
16893 17946 * Returns:
16894 17947 * DDI_SUCCESS or DDI_FAILURE.
16895 17948 *
16896 17949 * Context:
16897 17950 * Kernel context.
16898 17951 */
16899 17952 static int
16900 17953 ql_init_mutex(ql_adapter_state_t *ha)
16901 17954 {
16902 - int ret;
16903 - void *intr;
17955 + QL_PRINT_3(ha, "started\n");
16904 17956
16905 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16906 -
16907 - if (ha->iflags & IFLG_INTR_AIF) {
16908 - intr = (void *)(uintptr_t)ha->intr_pri;
16909 - } else {
16910 - /* Get iblock cookies to initialize mutexes */
16911 - if ((ret = ddi_get_iblock_cookie(ha->dip, 0,
16912 - &ha->iblock_cookie)) != DDI_SUCCESS) {
16913 - EL(ha, "failed, get_iblock: %xh\n", ret);
16914 - return (DDI_FAILURE);
16915 - }
16916 - intr = (void *)ha->iblock_cookie;
16917 - }
16918 -
16919 17957 /* mutexes to protect the adapter state structure. */
16920 - mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, intr);
17958 + mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
16921 17959
16922 - /* mutex to protect the ISP response ring. */
16923 - mutex_init(&ha->intr_mutex, NULL, MUTEX_DRIVER, intr);
17960 + /* mutex to protect the ISP request ring. */
17961 + mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
16924 17962
17963 + /* I/O completion queue protection. */
17964 + mutex_init(&ha->comp_q_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17965 + cv_init(&ha->cv_comp_thread, NULL, CV_DRIVER, NULL);
17966 +
16925 17967 /* mutex to protect the mailbox registers. */
16926 - mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, intr);
17968 + mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
16927 17969
16928 - /* power management protection */
16929 - mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, intr);
16930 -
16931 17970 /* Mailbox wait and interrupt conditional variable. */
16932 17971 cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
16933 17972 cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
16934 17973
16935 - /* mutex to protect the ISP request ring. */
16936 - mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, intr);
17974 + /* power management protection */
17975 + mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
16937 17976
16938 17977 /* Unsolicited buffer conditional variable. */
17978 + mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
16939 17979 cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
16940 17980
16941 - mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, intr);
16942 - mutex_init(&ha->cache_mutex, NULL, MUTEX_DRIVER, intr);
17981 + /* mutex to protect task daemon context. */
17982 + mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17983 + cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
16943 17984
16944 17985 /* Suspended conditional variable. */
16945 17986 cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
16946 17987
16947 - /* mutex to protect task daemon context. */
16948 - mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, intr);
16949 -
16950 - /* Task_daemon thread conditional variable. */
16951 - cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
16952 -
16953 - /* mutex to protect diag port manage interface */
16954 - mutex_init(&ha->portmutex, NULL, MUTEX_DRIVER, intr);
16955 -
16956 17988 /* mutex to protect per instance f/w dump flags and buffer */
16957 - mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, intr);
17989 + mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
16958 17990
16959 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17991 + QL_PRINT_3(ha, "done\n");
16960 17992
16961 17993 return (DDI_SUCCESS);
16962 17994 }
16963 17995
16964 17996 /*
16965 17997 * ql_destroy_mutex
16966 17998 * Destroys mutex's
16967 17999 *
16968 18000 * Input:
16969 18001 * ha = adapter state pointer.
16970 18002 *
16971 18003 * Returns:
16972 18004 *
16973 18005 * Context:
16974 18006 * Kernel context.
16975 18007 */
16976 18008 static void
16977 18009 ql_destroy_mutex(ql_adapter_state_t *ha)
16978 18010 {
16979 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
18011 + QL_PRINT_3(ha, "started\n");
16980 18012
16981 18013 mutex_destroy(&ha->dump_mutex);
16982 - mutex_destroy(&ha->portmutex);
18014 + cv_destroy(&ha->cv_dr_suspended);
16983 18015 cv_destroy(&ha->cv_task_daemon);
16984 18016 mutex_destroy(&ha->task_daemon_mutex);
16985 - cv_destroy(&ha->cv_dr_suspended);
16986 - mutex_destroy(&ha->cache_mutex);
16987 - mutex_destroy(&ha->ub_mutex);
16988 18017 cv_destroy(&ha->cv_ub);
16989 - mutex_destroy(&ha->req_ring_mutex);
18018 + mutex_destroy(&ha->ub_mutex);
18019 + mutex_destroy(&ha->pm_mutex);
16990 18020 cv_destroy(&ha->cv_mbx_intr);
16991 18021 cv_destroy(&ha->cv_mbx_wait);
16992 - mutex_destroy(&ha->pm_mutex);
16993 18022 mutex_destroy(&ha->mbx_mutex);
16994 - mutex_destroy(&ha->intr_mutex);
18023 + cv_destroy(&ha->cv_comp_thread);
18024 + mutex_destroy(&ha->comp_q_mutex);
18025 + mutex_destroy(&ha->req_ring_mutex);
16995 18026 mutex_destroy(&ha->mutex);
16996 18027
16997 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
18028 + QL_PRINT_3(ha, "done\n");
16998 18029 }
16999 18030
17000 18031 /*
17001 18032 * ql_fwmodule_resolve
17002 18033 * Loads and resolves external firmware module and symbols
17003 18034 *
17004 18035 * Input:
17005 18036 * ha: adapter state pointer.
17006 18037 *
17007 18038 * Returns:
17008 18039 * ql local function return status code:
17009 18040 * QL_SUCCESS - external f/w module module and symbols resolved
17010 18041 * QL_FW_NOT_SUPPORTED - Driver does not support ISP type
17011 18042 * QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
17012 18043 * QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
17013 18044 * Context:
17014 18045 * Kernel context.
17015 18046 *
17016 18047 * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time. We
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
17017 18048 * could switch to a tighter scope around acutal download (and add an extra
17018 18049 * ddi_modopen for module opens that occur before root is mounted).
17019 18050 *
17020 18051 */
17021 18052 uint32_t
17022 18053 ql_fwmodule_resolve(ql_adapter_state_t *ha)
17023 18054 {
17024 18055 int8_t module[128];
17025 18056 int8_t fw_version[128];
17026 18057 uint32_t rval = QL_SUCCESS;
17027 - caddr_t code, code02;
18058 + caddr_t code, code02, code03;
17028 18059 uint8_t *p_ucfw;
17029 18060 uint16_t *p_usaddr, *p_uslen;
17030 18061 uint32_t *p_uiaddr, *p_uilen, *p_uifw;
17031 - uint32_t *p_uiaddr02, *p_uilen02;
18062 + uint32_t *p_uiaddr02, *p_uilen02, *p_uilen03;
17032 18063 struct fw_table *fwt;
17033 18064 extern struct fw_table fw_table[];
17034 18065
17035 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
18066 + QL_PRINT_3(ha, "started\n");
17036 18067
17037 18068 if (ha->fw_module != NULL) {
17038 18069 EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
17039 18070 ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
17040 18071 ha->fw_subminor_version);
17041 18072 return (rval);
17042 18073 }
17043 18074
17044 18075 /* make sure the fw_class is in the fw_table of supported classes */
17045 18076 for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
17046 18077 if (fwt->fw_class == ha->fw_class)
17047 18078 break; /* match */
17048 18079 }
17049 18080 if (fwt->fw_version == NULL) {
17050 18081 cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
17051 18082 "in driver's fw_table", QL_NAME, ha->instance,
17052 18083 ha->fw_class);
17053 18084 return (QL_FW_NOT_SUPPORTED);
17054 18085 }
17055 18086
17056 18087 /*
17057 18088 * open the module related to the fw_class
17058 18089 */
17059 18090 (void) snprintf(module, sizeof (module), "misc/qlc/qlc_fw_%x",
17060 18091 ha->fw_class);
17061 18092
17062 18093 ha->fw_module = ddi_modopen(module, KRTLD_MODE_FIRST, NULL);
17063 18094 if (ha->fw_module == NULL) {
17064 18095 cmn_err(CE_WARN, "%s(%d): can't load firmware file %s",
17065 18096 QL_NAME, ha->instance, module);
17066 18097 return (QL_FWMODLOAD_FAILED);
17067 18098 }
17068 18099
17069 18100 /*
17070 18101 * resolve the fw module symbols, data types depend on fw_class
17071 18102 */
17072 18103
17073 18104 switch (ha->fw_class) {
17074 18105 case 0x2200:
17075 18106 case 0x2300:
17076 18107 case 0x6322:
17077 18108
17078 18109 if ((code = ddi_modsym(ha->fw_module, "risc_code01",
17079 18110 NULL)) == NULL) {
17080 18111 rval = QL_FWSYM_NOT_FOUND;
17081 18112 EL(ha, "failed, f/w module %d rc01 symbol\n", module);
17082 18113 } else if ((p_usaddr = ddi_modsym(ha->fw_module,
17083 18114 "risc_code_addr01", NULL)) == NULL) {
17084 18115 rval = QL_FWSYM_NOT_FOUND;
17085 18116 EL(ha, "failed, f/w module %d rca01 symbol\n", module);
17086 18117 } else if ((p_uslen = ddi_modsym(ha->fw_module,
17087 18118 "risc_code_length01", NULL)) == NULL) {
17088 18119 rval = QL_FWSYM_NOT_FOUND;
17089 18120 EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
17090 18121 } else if ((p_ucfw = ddi_modsym(ha->fw_module,
17091 18122 "firmware_version", NULL)) == NULL) {
17092 18123 rval = QL_FWSYM_NOT_FOUND;
17093 18124 EL(ha, "failed, f/w module %d fwver symbol\n", module);
17094 18125 }
17095 18126
17096 18127 if (rval == QL_SUCCESS) {
17097 18128 ha->risc_fw[0].code = code;
|
↓ open down ↓ |
52 lines elided |
↑ open up ↑ |
17098 18129 ha->risc_fw[0].addr = *p_usaddr;
17099 18130 ha->risc_fw[0].length = *p_uslen;
17100 18131
17101 18132 (void) snprintf(fw_version, sizeof (fw_version),
17102 18133 "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
17103 18134 }
17104 18135 break;
17105 18136
17106 18137 case 0x2400:
17107 18138 case 0x2500:
18139 + case 0x2700:
17108 18140 case 0x8100:
18141 + case 0x8301fc:
17109 18142
17110 18143 if ((code = ddi_modsym(ha->fw_module, "risc_code01",
17111 18144 NULL)) == NULL) {
17112 18145 rval = QL_FWSYM_NOT_FOUND;
17113 18146 EL(ha, "failed, f/w module %d rc01 symbol\n", module);
17114 18147 } else if ((p_uiaddr = ddi_modsym(ha->fw_module,
17115 18148 "risc_code_addr01", NULL)) == NULL) {
17116 18149 rval = QL_FWSYM_NOT_FOUND;
17117 18150 EL(ha, "failed, f/w module %d rca01 symbol\n", module);
17118 18151 } else if ((p_uilen = ddi_modsym(ha->fw_module,
17119 18152 "risc_code_length01", NULL)) == NULL) {
17120 18153 rval = QL_FWSYM_NOT_FOUND;
17121 18154 EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
17122 18155 } else if ((p_uifw = ddi_modsym(ha->fw_module,
17123 18156 "firmware_version", NULL)) == NULL) {
17124 18157 rval = QL_FWSYM_NOT_FOUND;
17125 18158 EL(ha, "failed, f/w module %d fwver symbol\n", module);
17126 18159 }
17127 18160
17128 18161 if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
17129 18162 NULL)) == NULL) {
17130 18163 rval = QL_FWSYM_NOT_FOUND;
17131 18164 EL(ha, "failed, f/w module %d rc02 symbol\n", module);
17132 18165 } else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
17133 18166 "risc_code_addr02", NULL)) == NULL) {
17134 18167 rval = QL_FWSYM_NOT_FOUND;
17135 18168 EL(ha, "failed, f/w module %d rca02 symbol\n", module);
17136 18169 } else if ((p_uilen02 = ddi_modsym(ha->fw_module,
17137 18170 "risc_code_length02", NULL)) == NULL) {
17138 18171 rval = QL_FWSYM_NOT_FOUND;
17139 18172 EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
17140 18173 }
17141 18174
17142 18175 if (rval == QL_SUCCESS) {
18176 + if (ha->fw_class == 0x2700) {
18177 + if ((code03 = ddi_modsym(ha->fw_module,
18178 + "tmplt_code01", NULL)) == NULL) {
18179 + EL(ha, "failed, f/w module %d "
18180 + "tmplt_code01 symbol\n", module);
18181 + } else if ((p_uilen03 = ddi_modsym(
18182 + ha->fw_module, "tmplt_code_length01",
18183 + NULL)) == NULL) {
18184 + code03 = NULL;
18185 + EL(ha, "failed, f/w module %d "
18186 + "tmplt_code_length01 symbol\n",
18187 + module);
18188 + }
18189 + ha->risc_fw[2].code = code03;
18190 + if ((ha->risc_fw[2].code = code03) != NULL) {
18191 + ha->risc_fw[2].length = *p_uilen03;
18192 + }
18193 + }
17143 18194 ha->risc_fw[0].code = code;
17144 18195 ha->risc_fw[0].addr = *p_uiaddr;
17145 18196 ha->risc_fw[0].length = *p_uilen;
17146 18197 ha->risc_fw[1].code = code02;
17147 18198 ha->risc_fw[1].addr = *p_uiaddr02;
17148 18199 ha->risc_fw[1].length = *p_uilen02;
17149 18200
17150 18201 (void) snprintf(fw_version, sizeof (fw_version),
17151 18202 "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
17152 18203 }
17153 18204 break;
17154 18205
17155 18206 default:
17156 18207 EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
17157 18208 rval = QL_FW_NOT_SUPPORTED;
17158 18209 }
17159 18210
17160 18211 if (rval != QL_SUCCESS) {
17161 18212 cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
17162 18213 "module %s (%x)", QL_NAME, ha->instance, module, rval);
17163 18214 if (ha->fw_module != NULL) {
17164 18215 (void) ddi_modclose(ha->fw_module);
17165 18216 ha->fw_module = NULL;
17166 18217 }
17167 18218 } else {
17168 18219 /*
17169 18220 * check for firmware version mismatch between module and
17170 18221 * compiled in fw_table version.
17171 18222 */
17172 18223
17173 18224 if (strcmp(fwt->fw_version, fw_version) != 0) {
17174 18225
|
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
17175 18226 /*
17176 18227 * If f/w / driver version mismatches then
17177 18228 * return a successful status -- however warn
17178 18229 * the user that this is NOT recommended.
17179 18230 */
17180 18231
17181 18232 cmn_err(CE_WARN, "%s(%d): driver / f/w version "
17182 18233 "mismatch for %x: driver-%s module-%s", QL_NAME,
17183 18234 ha->instance, ha->fw_class, fwt->fw_version,
17184 18235 fw_version);
17185 -
17186 - ha->cfg_flags |= CFG_FW_MISMATCH;
17187 - } else {
17188 - ha->cfg_flags &= ~CFG_FW_MISMATCH;
17189 18236 }
17190 18237 }
17191 18238
17192 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
18239 + QL_PRINT_3(ha, "done\n");
17193 18240
17194 18241 return (rval);
17195 18242 }
17196 18243
17197 18244 /*
17198 18245 * ql_port_state
17199 18246 * Set the state on all adapter ports.
17200 18247 *
17201 18248 * Input:
17202 18249 * ha: parent adapter state pointer.
17203 18250 * state: port state.
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
17204 18251 * flags: task daemon flags to set.
17205 18252 *
17206 18253 * Context:
17207 18254 * Interrupt or Kernel context, no mailbox commands allowed.
17208 18255 */
17209 18256 void
17210 18257 ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
17211 18258 {
17212 18259 ql_adapter_state_t *vha;
17213 18260
17214 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
18261 + QL_PRINT_3(ha, "started\n");
17215 18262
17216 18263 TASK_DAEMON_LOCK(ha);
17217 18264 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
17218 18265 if (FC_PORT_STATE_MASK(vha->state) != state) {
17219 18266 vha->state = state != FC_STATE_OFFLINE ?
17220 18267 (FC_PORT_SPEED_MASK(vha->state) | state) : state;
17221 18268 vha->task_daemon_flags |= flags;
17222 18269 }
17223 18270 }
17224 18271 ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
17225 18272 TASK_DAEMON_UNLOCK(ha);
17226 18273
17227 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
18274 + QL_PRINT_3(ha, "done\n");
17228 18275 }
17229 18276
17230 18277 /*
17231 - * ql_el_trace_desc_ctor - Construct an extended logging trace descriptor.
18278 + * ql_el_trace_alloc - Construct an extended logging trace descriptor.
17232 18279 *
17233 18280 * Input: Pointer to the adapter state structure.
17234 - * Returns: Success or Failure.
17235 18281 * Context: Kernel context.
17236 18282 */
17237 -int
17238 -ql_el_trace_desc_ctor(ql_adapter_state_t *ha)
18283 +void
18284 +ql_el_trace_alloc(ql_adapter_state_t *ha)
17239 18285 {
17240 - int rval = DDI_SUCCESS;
18286 + ql_trace_entry_t *entry;
18287 + size_t maxsize;
17241 18288
17242 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
18289 + ha->ql_trace_desc =
18290 + (ql_trace_desc_t *)kmem_zalloc(
18291 + sizeof (ql_trace_desc_t), KM_SLEEP);
17243 18292
17244 - ha->el_trace_desc =
17245 - (el_trace_desc_t *)kmem_zalloc(sizeof (el_trace_desc_t), KM_SLEEP);
18293 + /* ql_log_entries could be adjusted in /etc/system */
18294 + maxsize = ql_log_entries * sizeof (ql_trace_entry_t);
18295 + entry = kmem_zalloc(maxsize, KM_SLEEP);
17246 18296
17247 - if (ha->el_trace_desc == NULL) {
17248 - cmn_err(CE_WARN, "%s(%d): can't construct trace descriptor",
17249 - QL_NAME, ha->instance);
17250 - rval = DDI_FAILURE;
17251 - } else {
17252 - ha->el_trace_desc->next = 0;
17253 - ha->el_trace_desc->trace_buffer =
17254 - (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
18297 + mutex_init(&ha->ql_trace_desc->mutex, NULL,
18298 + MUTEX_DRIVER, NULL);
17255 18299
17256 - if (ha->el_trace_desc->trace_buffer == NULL) {
17257 - cmn_err(CE_WARN, "%s(%d): can't get trace buffer",
17258 - QL_NAME, ha->instance);
17259 - kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17260 - rval = DDI_FAILURE;
17261 - } else {
17262 - ha->el_trace_desc->trace_buffer_size =
17263 - EL_TRACE_BUF_SIZE;
17264 - mutex_init(&ha->el_trace_desc->mutex, NULL,
17265 - MUTEX_DRIVER, NULL);
17266 - }
17267 - }
18300 + ha->ql_trace_desc->trace_buffer = entry;
18301 + ha->ql_trace_desc->trace_buffer_size = maxsize;
18302 + ha->ql_trace_desc->nindex = 0;
17268 18303
17269 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17270 -
17271 - return (rval);
18304 + ha->ql_trace_desc->nentries = ql_log_entries;
18305 + ha->ql_trace_desc->start = ha->ql_trace_desc->end = 0;
18306 + ha->ql_trace_desc->csize = 0;
18307 + ha->ql_trace_desc->count = 0;
17272 18308 }
17273 18309
17274 18310 /*
17275 - * ql_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
18311 + * ql_el_trace_dealloc - Destroy an extended logging trace descriptor.
17276 18312 *
17277 18313 * Input: Pointer to the adapter state structure.
17278 - * Returns: Success or Failure.
17279 18314 * Context: Kernel context.
17280 18315 */
17281 -int
17282 -ql_el_trace_desc_dtor(ql_adapter_state_t *ha)
18316 +void
18317 +ql_el_trace_dealloc(ql_adapter_state_t *ha)
17283 18318 {
17284 - int rval = DDI_SUCCESS;
17285 -
17286 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17287 -
17288 - if (ha->el_trace_desc == NULL) {
17289 - cmn_err(CE_WARN, "%s(%d): can't destroy el trace descriptor",
17290 - QL_NAME, ha->instance);
17291 - rval = DDI_FAILURE;
17292 - } else {
17293 - if (ha->el_trace_desc->trace_buffer != NULL) {
17294 - kmem_free(ha->el_trace_desc->trace_buffer,
17295 - ha->el_trace_desc->trace_buffer_size);
18319 + if (ha->ql_trace_desc != NULL) {
18320 + if (ha->ql_trace_desc->trace_buffer != NULL) {
18321 + kmem_free(ha->ql_trace_desc->trace_buffer,
18322 + ha->ql_trace_desc->trace_buffer_size);
17296 18323 }
17297 - mutex_destroy(&ha->el_trace_desc->mutex);
17298 - kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
18324 + mutex_destroy(&ha->ql_trace_desc->mutex);
18325 + kmem_free(ha->ql_trace_desc,
18326 + sizeof (ql_trace_desc_t));
17299 18327 }
17300 -
17301 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17302 -
17303 - return (rval);
17304 18328 }
17305 18329
17306 18330 /*
17307 18331 * els_cmd_text - Return a pointer to a string describing the command
17308 18332 *
17309 18333 * Input: els_cmd = the els command opcode.
17310 18334 * Returns: pointer to a string.
17311 18335 * Context: Kernel context.
17312 18336 */
17313 18337 char *
17314 18338 els_cmd_text(int els_cmd)
17315 18339 {
17316 18340 cmd_table_t *entry = &els_cmd_tbl[0];
17317 18341
17318 18342 return (cmd_text(entry, els_cmd));
17319 18343 }
17320 18344
17321 18345 /*
17322 18346 * mbx_cmd_text - Return a pointer to a string describing the command
17323 18347 *
17324 18348 * Input: mbx_cmd = the mailbox command opcode.
17325 18349 * Returns: pointer to a string.
17326 18350 * Context: Kernel context.
17327 18351 */
17328 18352 char *
17329 18353 mbx_cmd_text(int mbx_cmd)
17330 18354 {
17331 18355 cmd_table_t *entry = &mbox_cmd_tbl[0];
17332 18356
17333 18357 return (cmd_text(entry, mbx_cmd));
17334 18358 }
17335 18359
17336 18360 /*
17337 18361 * cmd_text Return a pointer to a string describing the command
17338 18362 *
17339 18363 * Input: entry = the command table
17340 18364 * cmd = the command.
17341 18365 * Returns: pointer to a string.
17342 18366 * Context: Kernel context.
17343 18367 */
17344 18368 char *
17345 18369 cmd_text(cmd_table_t *entry, int cmd)
|
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
17346 18370 {
17347 18371 for (; entry->cmd != 0; entry++) {
17348 18372 if (entry->cmd == cmd) {
17349 18373 break;
17350 18374 }
17351 18375 }
17352 18376 return (entry->string);
17353 18377 }
17354 18378
17355 18379 /*
17356 - * ql_els_24xx_mbox_cmd_iocb - els request indication.
18380 + * ql_els_24xx_iocb
18381 + * els request indication.
17357 18382 *
17358 - * Input: ha = adapter state pointer.
17359 - * srb = scsi request block pointer.
17360 - * arg = els passthru entry iocb pointer.
18383 + * Input:
18384 + * ha: adapter state pointer.
18385 + * req_q: request queue structure pointer.
18386 + * srb: scsi request block pointer.
18387 + * arg: els passthru entry iocb pointer.
18388 + *
17361 18389 * Returns:
18390 + *
17362 18391 * Context: Kernel context.
17363 18392 */
17364 18393 void
17365 -ql_els_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *srb, void *arg)
18394 +ql_els_24xx_iocb(ql_adapter_state_t *ha, ql_request_q_t *req_q, ql_srb_t *srb,
18395 + void *arg)
17366 18396 {
17367 18397 els_descriptor_t els_desc;
17368 18398
17369 18399 /* Extract the ELS information */
17370 - ql_fca_isp_els_request(ha, (fc_packet_t *)srb->pkt, &els_desc);
18400 + ql_fca_isp_els_request(ha, req_q, (fc_packet_t *)srb->pkt,
18401 + &els_desc);
17371 18402
17372 18403 /* Construct the passthru entry */
17373 18404 ql_isp_els_request_ctor(&els_desc, (els_passthru_entry_t *)arg);
17374 18405
17375 18406 /* Ensure correct endianness */
17376 18407 ql_isp_els_handle_cmd_endian(ha, srb);
17377 18408 }
17378 18409
17379 18410 /*
17380 - * ql_fca_isp_els_request - Extract into an els descriptor the info required
17381 - * to build an els_passthru iocb from an fc packet.
18411 + * ql_fca_isp_els_request
18412 + * Extract into an els descriptor the info required
18413 + * to build an els_passthru iocb from an fc packet.
17382 18414 *
17383 - * Input: ha = adapter state pointer.
17384 - * pkt = fc packet pointer
17385 - * els_desc = els descriptor pointer
17386 - * Returns:
17387 - * Context: Kernel context.
18415 + * Input:
18416 + * ha: adapter state pointer.
18417 + * req_q: request queue structure pointer.
18418 + * pkt: fc packet pointer
18419 + * els_desc: els descriptor pointer
18420 + *
18421 + * Context:
18422 + * Kernel context.
17388 18423 */
17389 18424 static void
17390 -ql_fca_isp_els_request(ql_adapter_state_t *ha, fc_packet_t *pkt,
17391 - els_descriptor_t *els_desc)
18425 +ql_fca_isp_els_request(ql_adapter_state_t *ha, ql_request_q_t *req_q,
18426 + fc_packet_t *pkt, els_descriptor_t *els_desc)
17392 18427 {
17393 18428 ls_code_t els;
17394 18429
17395 18430 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17396 18431 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17397 18432
17398 18433 els_desc->els = els.ls_code;
17399 18434
17400 - els_desc->els_handle = ha->hba_buf.acc_handle;
18435 + els_desc->els_handle = req_q->req_ring.acc_handle;
17401 18436 els_desc->d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
17402 18437 els_desc->s_id.b24 = pkt->pkt_cmd_fhdr.s_id;
17403 18438 /* if n_port_handle is not < 0x7d use 0 */
17404 18439 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17405 18440 els_desc->n_port_handle = ha->n_port->n_port_handle;
17406 18441 } else {
17407 18442 els_desc->n_port_handle = 0;
17408 18443 }
17409 18444 els_desc->control_flags = 0;
17410 18445 els_desc->cmd_byte_count = pkt->pkt_cmdlen;
17411 18446 /*
17412 18447 * Transmit DSD. This field defines the Fibre Channel Frame payload
17413 18448 * (without the frame header) in system memory.
17414 18449 */
17415 18450 els_desc->tx_dsd.addr[0] = LSD(pkt->pkt_cmd_cookie->dmac_laddress);
17416 18451 els_desc->tx_dsd.addr[1] = MSD(pkt->pkt_cmd_cookie->dmac_laddress);
17417 18452 els_desc->tx_dsd.length = (uint32_t)pkt->pkt_cmd_cookie->dmac_size;
17418 18453
17419 18454 els_desc->rsp_byte_count = pkt->pkt_rsplen;
17420 18455 /*
17421 18456 * Receive DSD. This field defines the ELS response payload buffer
17422 18457 * for the ISP24xx firmware transferring the received ELS
17423 18458 * response frame to a location in host memory.
17424 18459 */
17425 18460 els_desc->rx_dsd.addr[0] = LSD(pkt->pkt_resp_cookie->dmac_laddress);
17426 18461 els_desc->rx_dsd.addr[1] = MSD(pkt->pkt_resp_cookie->dmac_laddress);
17427 18462 els_desc->rx_dsd.length = (uint32_t)pkt->pkt_resp_cookie->dmac_size;
17428 18463 }
17429 18464
17430 18465 /*
17431 18466 * ql_isp_els_request_ctor - Construct an els_passthru_entry iocb
17432 18467 * using the els descriptor.
17433 18468 *
17434 18469 * Input: ha = adapter state pointer.
17435 18470 * els_desc = els descriptor pointer.
17436 18471 * els_entry = els passthru entry iocb pointer.
17437 18472 * Returns:
17438 18473 * Context: Kernel context.
17439 18474 */
17440 18475 static void
17441 18476 ql_isp_els_request_ctor(els_descriptor_t *els_desc,
17442 18477 els_passthru_entry_t *els_entry)
17443 18478 {
17444 18479 uint32_t *ptr32;
17445 18480
17446 18481 /*
17447 18482 * Construct command packet.
17448 18483 */
17449 18484 ddi_put8(els_desc->els_handle, &els_entry->entry_type,
17450 18485 (uint8_t)ELS_PASSTHRU_TYPE);
17451 18486 ddi_put16(els_desc->els_handle, &els_entry->n_port_hdl,
17452 18487 els_desc->n_port_handle);
17453 18488 ddi_put8(els_desc->els_handle, &els_entry->sof_type, (uint8_t)BIT_4);
17454 18489 ddi_put32(els_desc->els_handle, &els_entry->rcv_exch_address,
17455 18490 (uint32_t)0);
17456 18491 ddi_put8(els_desc->els_handle, &els_entry->els_cmd_opcode,
17457 18492 els_desc->els);
17458 18493 ddi_put8(els_desc->els_handle, &els_entry->d_id_7_0,
17459 18494 els_desc->d_id.b.al_pa);
17460 18495 ddi_put8(els_desc->els_handle, &els_entry->d_id_15_8,
17461 18496 els_desc->d_id.b.area);
17462 18497 ddi_put8(els_desc->els_handle, &els_entry->d_id_23_16,
17463 18498 els_desc->d_id.b.domain);
17464 18499 ddi_put8(els_desc->els_handle, &els_entry->s_id_7_0,
17465 18500 els_desc->s_id.b.al_pa);
17466 18501 ddi_put8(els_desc->els_handle, &els_entry->s_id_15_8,
|
↓ open down ↓ |
56 lines elided |
↑ open up ↑ |
17467 18502 els_desc->s_id.b.area);
17468 18503 ddi_put8(els_desc->els_handle, &els_entry->s_id_23_16,
17469 18504 els_desc->s_id.b.domain);
17470 18505 ddi_put16(els_desc->els_handle, &els_entry->control_flags,
17471 18506 els_desc->control_flags);
17472 18507 ddi_put32(els_desc->els_handle, &els_entry->rcv_payld_data_bcnt,
17473 18508 els_desc->rsp_byte_count);
17474 18509 ddi_put32(els_desc->els_handle, &els_entry->xmt_payld_data_bcnt,
17475 18510 els_desc->cmd_byte_count);
17476 18511 /* Load transmit data segments and count. */
17477 - ptr32 = (uint32_t *)&els_entry->xmt_dseg_0_address;
18512 + ptr32 = (uint32_t *)&els_entry->dseg;
17478 18513 ddi_put16(els_desc->els_handle, &els_entry->xmt_dseg_count, 1);
17479 18514 ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[0]);
17480 18515 ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[1]);
17481 18516 ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.length);
17482 18517 ddi_put16(els_desc->els_handle, &els_entry->rcv_dseg_count, 1);
17483 18518 ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[0]);
17484 18519 ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[1]);
17485 18520 ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.length);
17486 18521 }
17487 18522
17488 18523 /*
17489 18524 * ql_isp_els_handle_cmd_endian - els requests must be in big endian
17490 18525 * in host memory.
17491 18526 *
17492 18527 * Input: ha = adapter state pointer.
17493 18528 * srb = scsi request block
17494 18529 * Returns:
17495 18530 * Context: Kernel context.
17496 18531 */
17497 18532 void
17498 18533 ql_isp_els_handle_cmd_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17499 18534 {
17500 18535 ls_code_t els;
17501 18536 fc_packet_t *pkt;
17502 18537 uint8_t *ptr;
17503 18538
17504 18539 pkt = srb->pkt;
17505 18540
17506 18541 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17507 18542 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17508 18543
17509 18544 ptr = (uint8_t *)pkt->pkt_cmd;
17510 18545
17511 18546 ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17512 18547 }
17513 18548
17514 18549 /*
17515 18550 * ql_isp_els_handle_rsp_endian - els responses must be in big endian
17516 18551 * in host memory.
17517 18552 * Input: ha = adapter state pointer.
17518 18553 * srb = scsi request block
17519 18554 * Returns:
17520 18555 * Context: Kernel context.
17521 18556 */
17522 18557 void
17523 18558 ql_isp_els_handle_rsp_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17524 18559 {
17525 18560 ls_code_t els;
17526 18561 fc_packet_t *pkt;
17527 18562 uint8_t *ptr;
17528 18563
17529 18564 pkt = srb->pkt;
17530 18565
17531 18566 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17532 18567 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17533 18568
17534 18569 ptr = (uint8_t *)pkt->pkt_resp;
17535 18570 BIG_ENDIAN_32(&els);
17536 18571 ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17537 18572 }
17538 18573
17539 18574 /*
17540 18575 * ql_isp_els_handle_endian - els requests/responses must be in big endian
17541 18576 * in host memory.
17542 18577 * Input: ha = adapter state pointer.
17543 18578 * ptr = els request/response buffer pointer.
17544 18579 * ls_code = els command code.
17545 18580 * Returns:
17546 18581 * Context: Kernel context.
17547 18582 */
17548 18583 void
17549 18584 ql_isp_els_handle_endian(ql_adapter_state_t *ha, uint8_t *ptr, uint8_t ls_code)
17550 18585 {
17551 18586 switch (ls_code) {
17552 18587 case LA_ELS_PLOGI: {
17553 18588 BIG_ENDIAN_32(ptr); /* Command Code */
17554 18589 ptr += 4;
17555 18590 BIG_ENDIAN_16(ptr); /* FC-PH version */
17556 18591 ptr += 2;
17557 18592 BIG_ENDIAN_16(ptr); /* b2b credit */
17558 18593 ptr += 2;
17559 18594 BIG_ENDIAN_16(ptr); /* Cmn Feature flags */
17560 18595 ptr += 2;
17561 18596 BIG_ENDIAN_16(ptr); /* Rcv data size */
17562 18597 ptr += 2;
17563 18598 BIG_ENDIAN_16(ptr); /* Concurrent Seq */
17564 18599 ptr += 2;
17565 18600 BIG_ENDIAN_16(ptr); /* Rel offset */
17566 18601 ptr += 2;
17567 18602 BIG_ENDIAN_32(ptr); /* E_D_TOV */
17568 18603 ptr += 4; /* Port Name */
17569 18604 ptr += 8; /* Node Name */
17570 18605 ptr += 8; /* Class 1 */
17571 18606 ptr += 16; /* Class 2 */
17572 18607 ptr += 16; /* Class 3 */
17573 18608 BIG_ENDIAN_16(ptr); /* Service options */
17574 18609 ptr += 2;
17575 18610 BIG_ENDIAN_16(ptr); /* Initiator control */
17576 18611 ptr += 2;
17577 18612 BIG_ENDIAN_16(ptr); /* Recipient Control */
17578 18613 ptr += 2;
17579 18614 BIG_ENDIAN_16(ptr); /* Rcv size */
17580 18615 ptr += 2;
17581 18616 BIG_ENDIAN_16(ptr); /* Concurrent Seq */
17582 18617 ptr += 2;
17583 18618 BIG_ENDIAN_16(ptr); /* N_Port e2e credit */
17584 18619 ptr += 2;
17585 18620 BIG_ENDIAN_16(ptr); /* Open Seq/Exch */
17586 18621 break;
17587 18622 }
17588 18623 case LA_ELS_PRLI: {
17589 18624 BIG_ENDIAN_32(ptr); /* Command Code/Page length */
17590 18625 ptr += 4; /* Type */
17591 18626 ptr += 2;
17592 18627 BIG_ENDIAN_16(ptr); /* Flags */
17593 18628 ptr += 2;
17594 18629 BIG_ENDIAN_32(ptr); /* Originator Process associator */
17595 18630 ptr += 4;
17596 18631 BIG_ENDIAN_32(ptr); /* Responder Process associator */
17597 18632 ptr += 4;
17598 18633 BIG_ENDIAN_32(ptr); /* Flags */
17599 18634 break;
17600 18635 }
17601 18636 default:
17602 18637 EL(ha, "can't handle els code %x\n", ls_code);
17603 18638 break;
17604 18639 }
17605 18640 }
17606 18641
17607 18642 /*
17608 18643 * ql_n_port_plogi
17609 18644 * In N port 2 N port topology where an N Port has logged in with the
17610 18645 * firmware because it has the N_Port login initiative, we send up
17611 18646 * a plogi by proxy which stimulates the login procedure to continue.
17612 18647 *
17613 18648 * Input:
|
↓ open down ↓ |
126 lines elided |
↑ open up ↑ |
17614 18649 * ha = adapter state pointer.
17615 18650 * Returns:
17616 18651 *
17617 18652 * Context:
17618 18653 * Kernel context.
17619 18654 */
17620 18655 static int
17621 18656 ql_n_port_plogi(ql_adapter_state_t *ha)
17622 18657 {
17623 18658 int rval;
17624 - ql_tgt_t *tq;
18659 + ql_tgt_t *tq = NULL;
17625 18660 ql_head_t done_q = { NULL, NULL };
17626 18661
17627 18662 rval = QL_SUCCESS;
17628 18663
17629 18664 if (ha->topology & QL_N_PORT) {
17630 18665 /* if we're doing this the n_port_handle must be good */
17631 18666 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17632 18667 tq = ql_loop_id_to_queue(ha,
17633 18668 ha->n_port->n_port_handle);
17634 18669 if (tq != NULL) {
17635 18670 (void) ql_send_plogi(ha, tq, &done_q);
17636 18671 } else {
17637 18672 EL(ha, "n_port_handle = %x, tq = %x\n",
17638 18673 ha->n_port->n_port_handle, tq);
17639 18674 }
17640 18675 } else {
17641 18676 EL(ha, "n_port_handle = %x, tq = %x\n",
17642 18677 ha->n_port->n_port_handle, tq);
17643 18678 }
17644 18679 if (done_q.first != NULL) {
17645 - ql_done(done_q.first);
18680 + ql_done(done_q.first, B_FALSE);
17646 18681 }
17647 18682 }
17648 18683 return (rval);
17649 18684 }
17650 18685
17651 18686 /*
17652 18687 * Compare two WWNs. The NAA is omitted for comparison.
17653 18688 *
17654 18689 * Note particularly that the indentation used in this
17655 18690 * function isn't according to Sun recommendations. It
17656 18691 * is indented to make reading a bit easy.
17657 18692 *
17658 18693 * Return Values:
17659 18694 * if first == second return 0
17660 18695 * if first > second return 1
17661 18696 * if first < second return -1
17662 18697 */
18698 +/* ARGSUSED */
17663 18699 int
17664 18700 ql_wwn_cmp(ql_adapter_state_t *ha, la_wwn_t *first, la_wwn_t *second)
17665 18701 {
17666 18702 la_wwn_t t1, t2;
17667 18703 int rval;
17668 18704
17669 - EL(ha, "WWPN=%08x%08x\n",
17670 - BE_32(first->i_wwn[0]), BE_32(first->i_wwn[1]));
17671 - EL(ha, "WWPN=%08x%08x\n",
17672 - BE_32(second->i_wwn[0]), BE_32(second->i_wwn[1]));
17673 18705 /*
17674 18706 * Fibre Channel protocol is big endian, so compare
17675 18707 * as big endian values
17676 18708 */
17677 18709 t1.i_wwn[0] = BE_32(first->i_wwn[0]);
17678 18710 t1.i_wwn[1] = BE_32(first->i_wwn[1]);
17679 18711
17680 18712 t2.i_wwn[0] = BE_32(second->i_wwn[0]);
17681 18713 t2.i_wwn[1] = BE_32(second->i_wwn[1]);
17682 18714
17683 18715 if (t1.i_wwn[0] == t2.i_wwn[0]) {
17684 18716 if (t1.i_wwn[1] == t2.i_wwn[1]) {
17685 18717 rval = 0;
17686 18718 } else if (t1.i_wwn[1] > t2.i_wwn[1]) {
17687 18719 rval = 1;
17688 18720 } else {
17689 18721 rval = -1;
17690 18722 }
17691 18723 } else {
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
17692 18724 if (t1.i_wwn[0] > t2.i_wwn[0]) {
17693 18725 rval = 1;
17694 18726 } else {
17695 18727 rval = -1;
17696 18728 }
17697 18729 }
17698 18730 return (rval);
17699 18731 }
17700 18732
17701 18733 /*
17702 - * ql_wait_for_td_stop
17703 - * Wait for task daemon to stop running. Internal command timeout
17704 - * is approximately 30 seconds, so it may help in some corner
17705 - * cases to wait that long
17706 - *
17707 - * Input:
17708 - * ha = adapter state pointer.
17709 - *
17710 - * Returns:
17711 - * DDI_SUCCESS or DDI_FAILURE.
17712 - *
17713 - * Context:
17714 - * Kernel context.
17715 - */
17716 -
17717 -static int
17718 -ql_wait_for_td_stop(ql_adapter_state_t *ha)
17719 -{
17720 - int rval = DDI_FAILURE;
17721 - UINT16 wait_cnt;
17722 -
17723 - for (wait_cnt = 0; wait_cnt < 3000; wait_cnt++) {
17724 - /* The task daemon clears the stop flag on exit. */
17725 - if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
17726 - if (ha->cprinfo.cc_events & CALLB_CPR_START ||
17727 - ddi_in_panic()) {
17728 - drv_usecwait(10000);
17729 - } else {
17730 - delay(drv_usectohz(10000));
17731 - }
17732 - } else {
17733 - rval = DDI_SUCCESS;
17734 - break;
17735 - }
17736 - }
17737 - return (rval);
17738 -}
17739 -
17740 -/*
17741 18734 * ql_nvram_cache_desc_ctor - Construct an nvram cache descriptor.
17742 18735 *
17743 18736 * Input: Pointer to the adapter state structure.
17744 18737 * Returns: Success or Failure.
17745 18738 * Context: Kernel context.
17746 18739 */
17747 18740 int
17748 18741 ql_nvram_cache_desc_ctor(ql_adapter_state_t *ha)
17749 18742 {
17750 18743 int rval = DDI_SUCCESS;
17751 18744
17752 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
18745 + QL_PRINT_3(ha, "started\n");
17753 18746
17754 18747 ha->nvram_cache =
17755 18748 (nvram_cache_desc_t *)kmem_zalloc(sizeof (nvram_cache_desc_t),
17756 18749 KM_SLEEP);
17757 18750
17758 18751 if (ha->nvram_cache == NULL) {
17759 18752 cmn_err(CE_WARN, "%s(%d): can't construct nvram cache"
17760 18753 " descriptor", QL_NAME, ha->instance);
17761 18754 rval = DDI_FAILURE;
17762 18755 } else {
17763 - if (CFG_IST(ha, CFG_CTRL_24258081)) {
18756 + if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
17764 18757 ha->nvram_cache->size = sizeof (nvram_24xx_t);
17765 18758 } else {
17766 18759 ha->nvram_cache->size = sizeof (nvram_t);
17767 18760 }
17768 18761 ha->nvram_cache->cache =
17769 18762 (void *)kmem_zalloc(ha->nvram_cache->size, KM_SLEEP);
17770 18763 if (ha->nvram_cache->cache == NULL) {
17771 18764 cmn_err(CE_WARN, "%s(%d): can't get nvram cache buffer",
17772 18765 QL_NAME, ha->instance);
17773 18766 kmem_free(ha->nvram_cache,
17774 18767 sizeof (nvram_cache_desc_t));
17775 18768 ha->nvram_cache = 0;
17776 18769 rval = DDI_FAILURE;
17777 18770 } else {
17778 - mutex_init(&ha->nvram_cache->mutex, NULL,
17779 - MUTEX_DRIVER, NULL);
17780 18771 ha->nvram_cache->valid = 0;
17781 18772 }
17782 18773 }
17783 18774
17784 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
18775 + QL_PRINT_3(ha, "done\n");
17785 18776
17786 18777 return (rval);
17787 18778 }
17788 18779
17789 18780 /*
17790 18781 * ql_nvram_cache_desc_dtor - Destroy an nvram cache descriptor.
17791 18782 *
17792 18783 * Input: Pointer to the adapter state structure.
17793 18784 * Returns: Success or Failure.
17794 18785 * Context: Kernel context.
17795 18786 */
17796 18787 int
17797 18788 ql_nvram_cache_desc_dtor(ql_adapter_state_t *ha)
17798 18789 {
17799 18790 int rval = DDI_SUCCESS;
17800 18791
17801 - QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
18792 + QL_PRINT_3(ha, "started\n");
17802 18793
17803 18794 if (ha->nvram_cache == NULL) {
17804 18795 cmn_err(CE_WARN, "%s(%d): can't destroy nvram descriptor",
17805 18796 QL_NAME, ha->instance);
17806 18797 rval = DDI_FAILURE;
17807 18798 } else {
17808 18799 if (ha->nvram_cache->cache != NULL) {
17809 18800 kmem_free(ha->nvram_cache->cache,
17810 18801 ha->nvram_cache->size);
17811 18802 }
17812 - mutex_destroy(&ha->nvram_cache->mutex);
17813 18803 kmem_free(ha->nvram_cache, sizeof (nvram_cache_desc_t));
17814 18804 }
17815 18805
17816 - QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
18806 + QL_PRINT_3(ha, "done\n");
17817 18807
17818 18808 return (rval);
17819 18809 }
17820 18810
17821 18811 /*
17822 - * ql_process_idc_event - Handle an Inter-Driver Communication async event.
18812 + * ql_plogi_params_desc_ctor - Construct an plogi retry params descriptor.
17823 18813 *
17824 18814 * Input: Pointer to the adapter state structure.
17825 - * Returns: void
18815 + * Returns: Success or Failure.
17826 18816 * Context: Kernel context.
17827 18817 */
18818 +int
18819 +ql_plogi_params_desc_ctor(ql_adapter_state_t *ha)
18820 +{
18821 + int rval = DDI_SUCCESS;
18822 +
18823 + QL_PRINT_3(ha, "started\n");
18824 +
18825 + ha->plogi_params =
18826 + (plogi_params_desc_t *)kmem_zalloc(sizeof (plogi_params_desc_t),
18827 + KM_SLEEP);
18828 +
18829 + if (ha->plogi_params == NULL) {
18830 + cmn_err(CE_WARN, "%s(%d): can't construct plogi params"
18831 + " descriptor", QL_NAME, ha->instance);
18832 + rval = DDI_FAILURE;
18833 + } else {
18834 + /* default initializers. */
18835 + ha->plogi_params->retry_cnt = QL_PLOGI_RETRY_CNT;
18836 + ha->plogi_params->retry_dly_usec = QL_PLOGI_RETRY_DLY_USEC;
18837 + }
18838 +
18839 + QL_PRINT_3(ha, "done\n");
18840 +
18841 + return (rval);
18842 +}
18843 +
18844 +/*
18845 + * ql_plogi_params_desc_dtor - Destroy an plogi retry params descriptor.
18846 + *
18847 + * Input: Pointer to the adapter state structure.
18848 + * Returns: Success or Failure.
18849 + * Context: Kernel context.
18850 + */
18851 +int
18852 +ql_plogi_params_desc_dtor(ql_adapter_state_t *ha)
18853 +{
18854 + int rval = DDI_SUCCESS;
18855 +
18856 + QL_PRINT_3(ha, "started\n");
18857 +
18858 + if (ha->plogi_params == NULL) {
18859 + cmn_err(CE_WARN, "%s(%d): can't destroy plogi params"
18860 + " descriptor", QL_NAME, ha->instance);
18861 + rval = DDI_FAILURE;
18862 + } else {
18863 + kmem_free(ha->plogi_params, sizeof (plogi_params_desc_t));
18864 + }
18865 +
18866 + QL_PRINT_3(ha, "done\n");
18867 +
18868 + return (rval);
18869 +}
18870 +
18871 +/*
18872 + * ql_toggle_loop_state
18873 + * Changes looop state to offline and then online.
18874 + *
18875 + * Input:
18876 + * ha: adapter state pointer.
18877 + *
18878 + * Context:
18879 + * Kernel context.
18880 + */
18881 +void
18882 +ql_toggle_loop_state(ql_adapter_state_t *ha)
18883 +{
18884 + uint32_t timer;
18885 +
18886 + if (LOOP_READY(ha)) {
18887 + ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
18888 + ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
18889 + for (timer = 30; timer; timer--) {
18890 + if (!(ha->task_daemon_flags & FC_STATE_CHANGE)) {
18891 + break;
18892 + }
18893 + delay(100);
18894 + }
18895 + ql_loop_online(ha);
18896 + }
18897 +}
18898 +
18899 +/*
18900 + * ql_create_queues
18901 + * Allocate request/response queues.
18902 + *
18903 + * Input:
18904 + * ha: adapter state pointer.
18905 + *
18906 + * Returns:
18907 + * ql driver local function return status codes
18908 + *
18909 + * Context:
18910 + * Kernel context.
18911 + */
18912 +static int
18913 +ql_create_queues(ql_adapter_state_t *ha)
18914 +{
18915 + int rval;
18916 + uint16_t cnt;
18917 +
18918 + QL_PRINT_10(ha, "started\n");
18919 +
18920 + if (ha->req_q[0] != NULL) {
18921 + QL_PRINT_10(ha, "done, queues already exist\n");
18922 + return (QL_SUCCESS);
18923 + }
18924 + if (ha->vp_index != 0) {
18925 + QL_PRINT_10(ha, "done, no multi-req-q \n");
18926 + ha->req_q[0] = ha->pha->req_q[0];
18927 + ha->req_q[1] = ha->pha->req_q[1];
18928 + ha->rsp_queues = ha->pha->rsp_queues;
18929 + return (QL_SUCCESS);
18930 + }
18931 +
18932 + /* Setup request queue buffer pointers. */
18933 + ha->req_q[0] = kmem_zalloc(sizeof (ql_request_q_t), KM_SLEEP);
18934 +
18935 + /* Allocate request queue. */
18936 + ha->req_q[0]->req_entry_cnt = REQUEST_ENTRY_CNT;
18937 + ha->req_q[0]->req_ring.size = ha->req_q[0]->req_entry_cnt *
18938 + REQUEST_ENTRY_SIZE;
18939 + if (ha->flags & QUEUE_SHADOW_PTRS) {
18940 + ha->req_q[0]->req_ring.size += SHADOW_ENTRY_SIZE;
18941 + }
18942 + ha->req_q[0]->req_ring.type = LITTLE_ENDIAN_DMA;
18943 + ha->req_q[0]->req_ring.max_cookie_count = 1;
18944 + ha->req_q[0]->req_ring.alignment = 64;
18945 + if ((rval = ql_alloc_phys(ha, &ha->req_q[0]->req_ring, KM_SLEEP)) !=
18946 + QL_SUCCESS) {
18947 + EL(ha, "request queue status=%xh", rval);
18948 + ql_delete_queues(ha);
18949 + return (rval);
18950 + }
18951 + if (ha->flags & QUEUE_SHADOW_PTRS) {
18952 + ha->req_q[0]->req_out_shadow_ofst =
18953 + ha->req_q[0]->req_entry_cnt * REQUEST_ENTRY_SIZE;
18954 + ha->req_q[0]->req_out_shadow_ptr = (uint32_t *)
18955 + ((caddr_t)ha->req_q[0]->req_ring.bp +
18956 + ha->req_q[0]->req_out_shadow_ofst);
18957 + }
18958 + ha->fw_transfer_size = ha->req_q[0]->req_ring.size;
18959 + if (ha->flags & MULTI_QUEUE) {
18960 + ha->req_q[0]->mbar_req_in = MBAR2_REQ_IN;
18961 + ha->req_q[0]->mbar_req_out = MBAR2_REQ_OUT;
18962 + if (ha->req_q[0]->mbar_req_in >= ha->mbar_size) {
18963 + EL(ha, "req_q index=0 exceeds mbar size=%xh",
18964 + ha->mbar_size);
18965 + ql_delete_queues(ha);
18966 + return (QL_FUNCTION_PARAMETER_ERROR);
18967 + }
18968 + }
18969 +
18970 + /* Allocate response queues. */
18971 + if (ha->rsp_queues == NULL) {
18972 + if (ha->intr_cnt > 1) {
18973 + ha->rsp_queues_cnt = (uint8_t)(ha->intr_cnt - 1);
18974 + } else {
18975 + ha->rsp_queues_cnt = 1;
18976 + }
18977 + ha->io_min_rsp_q_number = 0;
18978 + if (ha->rsp_queues_cnt > 1) {
18979 + /* Setup request queue buffer pointers. */
18980 + ha->req_q[1] = kmem_zalloc(sizeof (ql_request_q_t),
18981 + KM_SLEEP);
18982 +
18983 + /* Allocate request queue. */
18984 + ha->req_q[1]->req_entry_cnt = REQUEST_ENTRY_CNT;
18985 + ha->req_q[1]->req_ring.size =
18986 + ha->req_q[1]->req_entry_cnt * REQUEST_ENTRY_SIZE;
18987 + if (ha->flags & QUEUE_SHADOW_PTRS) {
18988 + ha->req_q[1]->req_ring.size +=
18989 + SHADOW_ENTRY_SIZE;
18990 + }
18991 + ha->req_q[1]->req_ring.type = LITTLE_ENDIAN_DMA;
18992 + ha->req_q[1]->req_ring.max_cookie_count = 1;
18993 + ha->req_q[1]->req_ring.alignment = 64;
18994 + if ((rval = ql_alloc_phys(ha, &ha->req_q[1]->req_ring,
18995 + KM_SLEEP)) != QL_SUCCESS) {
18996 + EL(ha, "ha request queue status=%xh", rval);
18997 + ql_delete_queues(ha);
18998 + return (rval);
18999 + }
19000 + if (ha->flags & QUEUE_SHADOW_PTRS) {
19001 + ha->req_q[1]->req_out_shadow_ofst =
19002 + ha->req_q[1]->req_entry_cnt *
19003 + REQUEST_ENTRY_SIZE;
19004 + ha->req_q[1]->req_out_shadow_ptr = (uint32_t *)
19005 + ((caddr_t)ha->req_q[1]->req_ring.bp +
19006 + ha->req_q[1]->req_out_shadow_ofst);
19007 + }
19008 + ha->req_q[1]->req_q_number = 1;
19009 + if (ha->flags & MULTI_QUEUE) {
19010 + ha->req_q[1]->mbar_req_in =
19011 + ha->mbar_queue_offset + MBAR2_REQ_IN;
19012 + ha->req_q[1]->mbar_req_out =
19013 + ha->mbar_queue_offset + MBAR2_REQ_OUT;
19014 + if (ha->req_q[1]->mbar_req_in >=
19015 + ha->mbar_size) {
19016 + EL(ha, "ha req_q index=1 exceeds mbar "
19017 + "size=%xh", ha->mbar_size);
19018 + ql_delete_queues(ha);
19019 + return (QL_FUNCTION_PARAMETER_ERROR);
19020 + }
19021 + }
19022 + }
19023 +
19024 + /* Allocate enough rsp_queue descriptors for IRM */
19025 + ha->rsp_queues_size = (ha->hsize / sizeof (ddi_intr_handle_t)) *
19026 + sizeof (ql_response_q_t *);
19027 + ha->rsp_queues = kmem_zalloc(ha->rsp_queues_size, KM_SLEEP);
19028 +
19029 + /* Create rsp_queues for the current rsp_queue_cnt */
19030 + for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
19031 + rval = ql_create_rsp_queue(ha, cnt);
19032 + if (rval != QL_SUCCESS) {
19033 + ql_delete_queues(ha);
19034 + return (rval);
19035 + }
19036 + }
19037 + }
19038 +
19039 + if (CFG_IST(ha, CFG_FCIP_TYPE_1)) {
19040 + /* Allocate IP receive queue. */
19041 + ha->rcv_ring.size = RCVBUF_QUEUE_SIZE;
19042 + ha->rcv_ring.type = LITTLE_ENDIAN_DMA;
19043 + ha->rcv_ring.max_cookie_count = 1;
19044 + ha->rcv_ring.alignment = 64;
19045 + if ((rval = ql_alloc_phys(ha, &ha->rcv_ring, KM_SLEEP)) !=
19046 + QL_SUCCESS) {
19047 + EL(ha, "receive queue status=%xh", rval);
19048 + ql_delete_queues(ha);
19049 + return (rval);
19050 + }
19051 + }
19052 +
19053 + QL_PRINT_10(ha, "done\n");
19054 +
19055 + return (rval);
19056 +}
19057 +
19058 +/*
19059 + * ql_create_rsp_queue
19060 + * Allocate a response queues.
19061 + *
19062 + * Input:
19063 + * ha: adapter state pointer.
19064 + *
19065 + * Returns:
19066 + * ql driver local function return status codes
19067 + *
19068 + * Context:
19069 + * Kernel context.
19070 + */
19071 +static int
19072 +ql_create_rsp_queue(ql_adapter_state_t *ha, uint16_t rsp_q_indx)
19073 +{
19074 + ql_response_q_t *rsp_q;
19075 + int rval = QL_SUCCESS;
19076 +
19077 + QL_PRINT_3(ha, "started\n");
19078 +
19079 + ha->rsp_queues[rsp_q_indx] = rsp_q =
19080 + kmem_zalloc(sizeof (ql_response_q_t), KM_SLEEP);
19081 + /* ISP response ring and interrupt protection. */
19082 + mutex_init(&rsp_q->intr_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
19083 + rsp_q->rsp_q_number = rsp_q_indx;
19084 + rsp_q->msi_x_vector = (uint16_t)(rsp_q_indx + 1);
19085 + if (ha->flags & MULTI_QUEUE) {
19086 + rsp_q->mbar_rsp_in = rsp_q->rsp_q_number *
19087 + ha->mbar_queue_offset + MBAR2_RESP_IN;
19088 + rsp_q->mbar_rsp_out = rsp_q->rsp_q_number *
19089 + ha->mbar_queue_offset + MBAR2_RESP_OUT;
19090 + if (rsp_q->mbar_rsp_in >= ha->mbar_size) {
19091 + EL(ha, "rsp_q index=%xh exceeds mbar size=%xh",
19092 + rsp_q_indx, ha->mbar_size);
19093 + return (QL_FUNCTION_PARAMETER_ERROR);
19094 + }
19095 + }
19096 +
19097 + rsp_q->rsp_entry_cnt = RESPONSE_ENTRY_CNT;
19098 + rsp_q->rsp_ring.size = rsp_q->rsp_entry_cnt * RESPONSE_ENTRY_SIZE;
19099 + if (ha->flags & QUEUE_SHADOW_PTRS) {
19100 + rsp_q->rsp_ring.size += SHADOW_ENTRY_SIZE;
19101 + }
19102 + rsp_q->rsp_ring.type = LITTLE_ENDIAN_DMA;
19103 + rsp_q->rsp_ring.max_cookie_count = 1;
19104 + rsp_q->rsp_ring.alignment = 64;
19105 + rval = ql_alloc_phys(ha, &rsp_q->rsp_ring, KM_SLEEP);
19106 + if (rval != QL_SUCCESS) {
19107 + EL(ha, "response queue status=%xh", rval);
19108 + }
19109 + if (ha->flags & QUEUE_SHADOW_PTRS) {
19110 + rsp_q->rsp_in_shadow_ofst =
19111 + rsp_q->rsp_entry_cnt * RESPONSE_ENTRY_SIZE;
19112 + rsp_q->rsp_in_shadow_ptr = (uint32_t *)
19113 + ((caddr_t)rsp_q->rsp_ring.bp +
19114 + rsp_q->rsp_in_shadow_ofst);
19115 + }
19116 +
19117 + QL_PRINT_3(ha, "done\n");
19118 + return (rval);
19119 +}
19120 +
19121 +/*
19122 + * ql_delete_queues
19123 + * Deletes request/response queues.
19124 + *
19125 + * Input:
19126 + * ha = adapter state pointer.
19127 + *
19128 + * Context:
19129 + * Kernel context.
19130 + */
17828 19131 static void
17829 -ql_process_idc_event(ql_adapter_state_t *ha)
19132 +ql_delete_queues(ql_adapter_state_t *ha)
17830 19133 {
17831 - int rval;
19134 + uint32_t cnt;
17832 19135
17833 - switch (ha->idc_mb[0]) {
17834 - case MBA_IDC_NOTIFICATION:
19136 + QL_PRINT_10(ha, "started\n");
19137 +
19138 + if (ha->vp_index != 0) {
19139 + QL_PRINT_10(ha, "done, no multi-req-q \n");
19140 + ha->req_q[0] = ha->req_q[1] = NULL;
19141 + return;
19142 + }
19143 + if (ha->req_q[0] != NULL) {
19144 + ql_free_phys(ha, &ha->req_q[0]->req_ring);
19145 + kmem_free(ha->req_q[0], sizeof (ql_request_q_t));
19146 + ha->req_q[0] = NULL;
19147 + }
19148 + if (ha->req_q[1] != NULL) {
19149 + ql_free_phys(ha, &ha->req_q[1]->req_ring);
19150 + kmem_free(ha->req_q[1], sizeof (ql_request_q_t));
19151 + ha->req_q[1] = NULL;
19152 + }
19153 +
19154 + if (ha->rsp_queues != NULL) {
19155 + ql_response_q_t *rsp_q;
19156 +
19157 + for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
19158 + if ((rsp_q = ha->rsp_queues[cnt]) == NULL) {
19159 + continue;
19160 + }
19161 +
19162 + mutex_destroy(&rsp_q->intr_mutex);
19163 + ql_free_phys(ha, &rsp_q->rsp_ring);
19164 + kmem_free(rsp_q, sizeof (ql_response_q_t));
19165 + ha->rsp_queues[cnt] = NULL;
19166 + }
19167 + kmem_free(ha->rsp_queues, ha->rsp_queues_size);
19168 + ha->rsp_queues = NULL;
19169 + }
19170 +
19171 + QL_PRINT_10(ha, "done\n");
19172 +}
19173 +
19174 +/*
19175 + * ql_multi_queue_support
19176 + * Test 2500 or 8100 adapters for support of multi-queue
19177 + *
19178 + * Input:
19179 + * ha: adapter state pointer.
19180 + *
19181 + * Returns:
19182 + * ql local function return status code.
19183 + *
19184 + * Context:
19185 + * Kernel context.
19186 + */
19187 +static int
19188 +ql_multi_queue_support(ql_adapter_state_t *ha)
19189 +{
19190 + uint32_t data;
19191 + int rval;
19192 +
19193 + data = ql_get_cap_ofst(ha, PCI_CAP_ID_MSI_X);
19194 + if ((ql_pci_config_get16(ha, data + PCI_MSIX_CTRL) &
19195 + PCI_MSIX_TBL_SIZE_MASK) > 2) {
19196 + ha->mbar_size = MBAR2_MULTI_Q_MAX * MBAR2_REG_OFFSET;
19197 +
19198 + if (ql_map_mem_bar(ha, &ha->mbar_dev_handle, &ha->mbar,
19199 + PCI_CONF_BASE3, ha->mbar_size) != DDI_SUCCESS) {
19200 + return (QL_FUNCTION_FAILED);
19201 + }
19202 + if ((rval = qlc_fm_check_acc_handle(ha,
19203 + ha->mbar_dev_handle)) != DDI_FM_OK) {
19204 + qlc_fm_report_err_impact(ha,
19205 + QL_FM_EREPORT_ACC_HANDLE_CHECK);
19206 + EL(ha, "fm_check_acc_handle mbar_dev_handle "
19207 + "status=%xh\n", rval);
19208 + return (QL_FUNCTION_FAILED);
19209 + }
19210 + return (QL_SUCCESS);
19211 + }
19212 + return (QL_FUNCTION_FAILED);
19213 +}
19214 +
19215 +/*
19216 + * ql_get_cap_ofst
19217 + * Locates PCI configuration space capability pointer
19218 + *
19219 + * Input:
19220 + * ha: adapter state pointer.
19221 + * cap_id: Capability ID.
19222 + *
19223 + * Returns:
19224 + * capability offset
19225 + *
19226 + * Context:
19227 + * Kernel context.
19228 + */
19229 +int
19230 +ql_get_cap_ofst(ql_adapter_state_t *ha, uint8_t cap_id)
19231 +{
19232 + int cptr = PCI_CAP_NEXT_PTR_NULL;
19233 +
19234 + QL_PRINT_3(ha, "started\n");
19235 +
19236 + if (ql_pci_config_get16(ha, PCI_CONF_STAT) & PCI_STAT_CAP) {
19237 + cptr = ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
19238 +
19239 + while (cptr != PCI_CAP_NEXT_PTR_NULL) {
19240 + if (ql_pci_config_get8(ha, cptr) == cap_id) {
19241 + break;
19242 + }
19243 + cptr = ql_pci_config_get8(ha, cptr + PCI_CAP_NEXT_PTR);
19244 + }
19245 + }
19246 +
19247 + QL_PRINT_3(ha, "done\n");
19248 + return (cptr);
19249 +}
19250 +
19251 +/*
19252 + * ql_map_mem_bar
19253 + * Map Mem BAR
19254 + *
19255 + * Input:
19256 + * ha: adapter state pointer.
19257 + * handlep: access handle pointer.
19258 + * addrp: address structure pointer.
19259 + * ofst: BAR offset.
19260 + * len: address space length.
19261 + *
19262 + * Returns:
19263 + * DDI_SUCCESS or DDI_FAILURE.
19264 + *
19265 + * Context:
19266 + * Kernel context.
19267 + */
19268 +static int
19269 +ql_map_mem_bar(ql_adapter_state_t *ha, ddi_acc_handle_t *handlep,
19270 + caddr_t *addrp, uint32_t ofst, uint32_t len)
19271 +{
19272 + caddr_t nreg;
19273 + pci_regspec_t *reg, *reg2;
19274 + int rval;
19275 + uint_t rlen;
19276 + uint32_t rcnt, w32, nreg_size;
19277 +
19278 + QL_PRINT_10(ha, "started\n");
19279 +
19280 + /* Check for Mem BAR */
19281 + w32 = ql_pci_config_get32(ha, ofst);
19282 + if (w32 == 0) {
19283 + EL(ha, "no Mem BAR %xh\n", ofst);
19284 + return (DDI_FAILURE);
19285 + }
19286 +
19287 + /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
19288 + if ((rval = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, ha->dip,
19289 + DDI_PROP_DONTPASS, "reg", (int **)®, &rlen)) !=
19290 + DDI_PROP_SUCCESS) {
19291 + EL(ha, "ddi_prop_lookup_int_array status=%xh\n", rval);
19292 + return (DDI_FAILURE);
19293 + }
19294 + rlen = (uint_t)(rlen * sizeof (int)); /* in bytes */
19295 + rcnt = (uint32_t)(rlen / sizeof (pci_regspec_t));
19296 +
19297 + /* Check if register already added. */
19298 + reg2 = reg;
19299 + for (w32 = 0; w32 < rcnt; w32++) {
19300 + if ((reg2->pci_phys_hi & PCI_REG_REG_M) == ofst) {
19301 + EL(ha, "already mapped\n");
19302 + break;
19303 + }
19304 + reg2++;
19305 + }
19306 + if (w32 == rcnt) {
17835 19307 /*
17836 - * The informational opcode (idc_mb[2]) can be a
17837 - * defined value or the mailbox command being executed
17838 - * on another function which stimulated this IDC message.
19308 + * Allocate memory for the existing reg(s) plus one and then
19309 + * build it.
17839 19310 */
17840 - ADAPTER_STATE_LOCK(ha);
17841 - switch (ha->idc_mb[2]) {
17842 - case IDC_OPC_DRV_START:
17843 - if (ha->idc_flash_acc != 0) {
17844 - ha->idc_flash_acc--;
17845 - if (ha->idc_flash_acc == 0) {
17846 - ha->idc_flash_acc_timer = 0;
17847 - GLOBAL_HW_UNLOCK();
17848 - }
19311 + nreg_size = (uint32_t)(rlen + sizeof (pci_regspec_t));
19312 + nreg = kmem_zalloc(nreg_size, KM_SLEEP);
19313 +
19314 + /*
19315 + * Find a current map memory reg to copy.
19316 + */
19317 + reg2 = reg;
19318 + while ((reg2->pci_phys_hi & PCI_REG_ADDR_M) !=
19319 + PCI_ADDR_MEM32 && (reg2->pci_phys_hi & PCI_REG_ADDR_M) !=
19320 + PCI_ADDR_MEM64) {
19321 + reg2++;
19322 + if ((caddr_t)reg2 >= (caddr_t)reg + rlen) {
19323 + reg2 = reg;
19324 + break;
17849 19325 }
17850 - if (ha->idc_restart_cnt != 0) {
17851 - ha->idc_restart_cnt--;
17852 - if (ha->idc_restart_cnt == 0) {
17853 - ha->idc_restart_timer = 0;
17854 - ADAPTER_STATE_UNLOCK(ha);
17855 - TASK_DAEMON_LOCK(ha);
17856 - ha->task_daemon_flags &= ~DRIVER_STALL;
17857 - TASK_DAEMON_UNLOCK(ha);
17858 - ql_restart_queues(ha);
17859 - } else {
17860 - ADAPTER_STATE_UNLOCK(ha);
19326 + }
19327 + w32 = (reg2->pci_phys_hi & ~PCI_REG_REG_M) | ofst;
19328 +
19329 + bcopy(reg, nreg, rlen);
19330 + reg2 = (pci_regspec_t *)(nreg + rlen);
19331 +
19332 + reg2->pci_phys_hi = w32;
19333 + reg2->pci_phys_mid = 0;
19334 + reg2->pci_phys_low = 0;
19335 + reg2->pci_size_hi = 0;
19336 + reg2->pci_size_low = len;
19337 +
19338 + /*
19339 + * Write out the new "reg" property
19340 + */
19341 + /*LINTED [Solaris DDI_DEV_T_NONE Lint error]*/
19342 + (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, ha->dip,
19343 + "reg", (int *)nreg, (uint_t)(nreg_size / sizeof (int)));
19344 +
19345 + w32 = (uint_t)(nreg_size / sizeof (pci_regspec_t) - 1);
19346 + kmem_free((caddr_t)nreg, nreg_size);
19347 + }
19348 +
19349 + ddi_prop_free(reg);
19350 +
19351 + /* Map register */
19352 + rval = ddi_regs_map_setup(ha->dip, w32, addrp, 0, len,
19353 + &ql_dev_acc_attr, handlep);
19354 + if (rval != DDI_SUCCESS || *addrp == NULL || *handlep == NULL) {
19355 + EL(ha, "regs_map status=%xh, base=%xh, handle=%xh\n",
19356 + rval, *addrp, *handlep);
19357 + if (*handlep != NULL) {
19358 + ddi_regs_map_free(handlep);
19359 + *handlep = NULL;
19360 + }
19361 + }
19362 +
19363 + QL_PRINT_10(ha, "done\n");
19364 +
19365 + return (rval);
19366 +}
19367 +
19368 +/*
19369 + * ql_intr_lock
19370 + * Acquires all interrupt locks.
19371 + *
19372 + * Input:
19373 + * ha: adapter state pointer.
19374 + *
19375 + * Context:
19376 + * Kernel/Interrupt context.
19377 + */
19378 +void
19379 +ql_intr_lock(ql_adapter_state_t *ha)
19380 +{
19381 + uint16_t cnt;
19382 +
19383 + QL_PRINT_3(ha, "started\n");
19384 +
19385 + if (ha->rsp_queues != NULL) {
19386 + for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
19387 + if (ha->rsp_queues[cnt] != NULL) {
19388 + INDX_INTR_LOCK(ha, cnt);
19389 + }
19390 + }
19391 + }
19392 + QL_PRINT_3(ha, "done\n");
19393 +}
19394 +
19395 +/*
19396 + * ql_intr_unlock
19397 + * Releases all interrupt locks.
19398 + *
19399 + * Input:
19400 + * ha: adapter state pointer.
19401 + *
19402 + * Context:
19403 + * Kernel/Interrupt context.
19404 + */
19405 +void
19406 +ql_intr_unlock(ql_adapter_state_t *ha)
19407 +{
19408 + uint16_t cnt;
19409 +
19410 + QL_PRINT_3(ha, "started\n");
19411 +
19412 + if (ha->rsp_queues != NULL) {
19413 + for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
19414 + if (ha->rsp_queues[cnt] != NULL) {
19415 + INDX_INTR_UNLOCK(ha, cnt);
19416 + }
19417 + }
19418 + }
19419 + QL_PRINT_3(ha, "done\n");
19420 +}
19421 +
19422 +/*
19423 + * ql_completion_thread
19424 + * I/O completion thread.
19425 + *
19426 + * Input:
19427 + * arg: port info pointer.
19428 + * COMP_Q_LOCK must be acquired prior to call.
19429 + *
19430 + * Context:
19431 + * Kernel context.
19432 + */
19433 +static void
19434 +ql_completion_thread(void *arg)
19435 +{
19436 + ql_srb_t *sp;
19437 + ql_adapter_state_t *ha = arg;
19438 +
19439 + QL_PRINT_3(ha, "started, hsp=%p\n", (void *)&sp);
19440 +
19441 + COMP_Q_LOCK(ha);
19442 + ha->comp_thds_active++;
19443 + ha->comp_thds_awake++;
19444 + while (!(ha->flags & COMP_THD_TERMINATE)) {
19445 + /* process completion queue items */
19446 + while (ha->comp_q.first != NULL) {
19447 + sp = (ha->comp_q.first)->base_address;
19448 + /* Remove command from completion queue */
19449 + ql_remove_link(&ha->comp_q, &sp->cmd);
19450 + COMP_Q_UNLOCK(ha);
19451 + QL_PRINT_3(ha, "pkt_comp, sp=%p, pkt_state=%xh, "
19452 + "hsp=%p\n", (void*)sp, sp->pkt->pkt_state,
19453 + (void *)&sp);
19454 + (sp->pkt->pkt_comp)(sp->pkt);
19455 + COMP_Q_LOCK(ha);
19456 + }
19457 + ha->comp_thds_awake--;
19458 + QL_PRINT_3(ha, "sleep, hsp=%p\n", (void *)&sp);
19459 + cv_wait(&ha->cv_comp_thread, &ha->comp_q_mutex);
19460 + QL_PRINT_3(ha, "awoke, hsp=%p\n", (void *)&sp);
19461 + }
19462 + ha->comp_thds_awake--;
19463 + ha->comp_thds_active--;
19464 + COMP_Q_UNLOCK(ha);
19465 +
19466 + QL_PRINT_3(ha, "done\n");
19467 +}
19468 +
19469 +/*
19470 + * ql_io_comp
19471 + * Transport I/O completion
19472 + *
19473 + * Input:
19474 + * sp: SRB structure pointer
19475 + *
19476 + * Context:
19477 + * Kernel context.
19478 + */
19479 +void
19480 +ql_io_comp(ql_srb_t *sp)
19481 +{
19482 + ql_adapter_state_t *ha = sp->ha->pha;
19483 +
19484 + QL_PRINT_3(ha, "started, sp=%ph, d_id=%xh\n", (void*)sp,
19485 + sp->pkt->pkt_cmd_fhdr.d_id);
19486 +
19487 + if (sp->pkt->pkt_comp && !ddi_in_panic()) {
19488 + QL_PRINT_3(ha, "added to comp_q\n");
19489 + COMP_Q_LOCK(ha);
19490 + ql_add_link_b(&ha->comp_q, &sp->cmd);
19491 + if (ha->comp_thds_awake < ha->comp_thds_active) {
19492 + ha->comp_thds_awake++;
19493 + QL_PRINT_3(ha, "signal\n");
19494 + cv_signal(&ha->cv_comp_thread);
19495 + }
19496 + COMP_Q_UNLOCK(ha);
19497 + }
19498 +
19499 + QL_PRINT_3(ha, "done\n");
19500 +}
19501 +
19502 +/*
19503 + * ql_process_comp_queue
19504 + * Process completion queue entries.
19505 + *
19506 + * Input:
19507 + * arg: adapter state pointer.
19508 + *
19509 + * Context:
19510 + * Kernel context.
19511 + */
19512 +static void
19513 +ql_process_comp_queue(void *arg)
19514 +{
19515 + ql_srb_t *sp;
19516 + ql_adapter_state_t *ha = arg;
19517 +
19518 + QL_PRINT_3(ha, "started\n");
19519 +
19520 + COMP_Q_LOCK(ha);
19521 +
19522 + /* process completion queue items */
19523 + while (ha->comp_q.first != NULL) {
19524 + sp = (ha->comp_q.first)->base_address;
19525 + QL_PRINT_3(ha, "sending comp=0x%p\n", (void *)sp);
19526 + /* Remove command from completion queue */
19527 + ql_remove_link(&ha->comp_q, &sp->cmd);
19528 + COMP_Q_UNLOCK(ha);
19529 + (sp->pkt->pkt_comp)(sp->pkt);
19530 + COMP_Q_LOCK(ha);
19531 + }
19532 +
19533 + COMP_Q_UNLOCK(ha);
19534 +
19535 + QL_PRINT_3(ha, "done\n");
19536 +}
19537 +
19538 +/*
19539 + * ql_abort_io
19540 + * Abort I/O.
19541 + *
19542 + * Input:
19543 + * ha: adapter state pointer.
19544 + * sp: SRB pointer.
19545 + *
19546 + * Returns:
19547 + * ql local function return status code.
19548 + *
19549 + * Context:
19550 + * Kernel context.
19551 + */
19552 +static int
19553 +ql_abort_io(ql_adapter_state_t *vha, ql_srb_t *sp)
19554 +{
19555 + ql_link_t *link;
19556 + ql_srb_t *sp2;
19557 + ql_tgt_t *tq;
19558 + ql_lun_t *lq;
19559 + int rval = QL_FUNCTION_FAILED;
19560 + ql_adapter_state_t *ha = vha->pha;
19561 +
19562 + QL_PRINT_10(ha, "started, sp=%ph, handle=%xh\n", (void *)sp,
19563 + sp->handle);
19564 +
19565 + if ((lq = sp->lun_queue) != NULL) {
19566 + tq = lq->target_queue;
19567 + } else {
19568 + tq = NULL;
19569 + }
19570 +
19571 + /* Acquire target queue lock. */
19572 + if (tq) {
19573 + DEVICE_QUEUE_LOCK(tq);
19574 + }
19575 + REQUEST_RING_LOCK(ha);
19576 +
19577 + /* If command not already started. */
19578 + if (!(sp->flags & SRB_ISP_STARTED)) {
19579 + rval = QL_FUNCTION_PARAMETER_ERROR;
19580 +
19581 + /* Check pending queue for command. */
19582 + for (link = ha->pending_cmds.first; link != NULL;
19583 + link = link->next) {
19584 + sp2 = link->base_address;
19585 + if (sp2 == sp) {
19586 + rval = QL_SUCCESS;
19587 + /* Remove srb from pending command queue */
19588 + ql_remove_link(&ha->pending_cmds, &sp->cmd);
19589 + break;
19590 + }
19591 + }
19592 +
19593 + if (link == NULL && lq) {
19594 + /* Check for cmd on device queue. */
19595 + for (link = lq->cmd.first; link != NULL;
19596 + link = link->next) {
19597 + sp2 = link->base_address;
19598 + if (sp2 == sp) {
19599 + rval = QL_SUCCESS;
19600 + /* Remove srb from device queue. */
19601 + ql_remove_link(&lq->cmd, &sp->cmd);
19602 + sp->flags &= ~SRB_IN_DEVICE_QUEUE;
19603 + break;
17861 19604 }
17862 - } else {
17863 - ADAPTER_STATE_UNLOCK(ha);
17864 19605 }
17865 - break;
17866 - case IDC_OPC_FLASH_ACC:
17867 - ha->idc_flash_acc_timer = 30;
17868 - if (ha->idc_flash_acc == 0) {
17869 - GLOBAL_HW_LOCK();
19606 + }
19607 + }
19608 +
19609 + REQUEST_RING_UNLOCK(ha);
19610 + if (tq) {
19611 + DEVICE_QUEUE_UNLOCK(tq);
19612 + }
19613 +
19614 + if (sp->flags & SRB_ISP_COMPLETED || rval == QL_SUCCESS) {
19615 + rval = QL_SUCCESS;
19616 + } else {
19617 + uint32_t index;
19618 +
19619 + INTR_LOCK(ha);
19620 + sp->flags |= SRB_ABORTING;
19621 + if (sp->handle != 0) {
19622 + index = sp->handle & OSC_INDEX_MASK;
19623 + if (ha->outstanding_cmds[index] == sp) {
19624 + ha->outstanding_cmds[index] =
19625 + QL_ABORTED_SRB(ha);
17870 19626 }
17871 - ha->idc_flash_acc++;
19627 + /* Decrement outstanding commands on device. */
19628 + if (tq != NULL && tq->outcnt != 0) {
19629 + tq->outcnt--;
19630 + }
19631 + if (lq != NULL && sp->flags & SRB_FCP_CMD_PKT &&
19632 + lq->lun_outcnt != 0) {
19633 + lq->lun_outcnt--;
19634 + }
19635 + /* Remove command from watchdog queue. */
19636 + if (sp->flags & SRB_WATCHDOG_ENABLED) {
19637 + if (tq != NULL) {
19638 + ql_remove_link(&tq->wdg, &sp->wdg);
19639 + }
19640 + sp->flags &= ~SRB_WATCHDOG_ENABLED;
19641 + }
19642 + INTR_UNLOCK(ha);
19643 + (void) ql_abort_command(ha, sp);
19644 + sp->handle = 0;
19645 + } else {
19646 + INTR_UNLOCK(ha);
19647 + }
19648 + rval = QL_SUCCESS;
19649 + }
19650 +
19651 + if (rval != QL_SUCCESS) {
19652 + EL(ha, "sp=%p not aborted=%xh\n", (void *)sp, rval);
19653 + } else {
19654 + /*EMPTY*/
19655 + QL_PRINT_10(ha, "done\n");
19656 + }
19657 + return (rval);
19658 +}
19659 +
19660 +/*
19661 + * ql_idc
19662 + * Inter driver communication thread.
19663 + *
19664 + * Input:
19665 + * ha = adapter state pointer.
19666 + *
19667 + * Context:
19668 + * Kernel context.
19669 + */
19670 +static void
19671 +ql_idc(ql_adapter_state_t *ha)
19672 +{
19673 + int rval;
19674 + uint32_t timer = 300;
19675 +
19676 + QL_PRINT_10(ha, "started\n");
19677 +
19678 + for (;;) {
19679 + /* IDC Stall needed. */
19680 + if (ha->flags & IDC_STALL_NEEDED) {
19681 + ADAPTER_STATE_LOCK(ha);
19682 + ha->flags &= ~IDC_STALL_NEEDED;
17872 19683 ADAPTER_STATE_UNLOCK(ha);
17873 - break;
17874 - case IDC_OPC_RESTART_MPI:
17875 - ha->idc_restart_timer = 30;
17876 - ha->idc_restart_cnt++;
17877 - ADAPTER_STATE_UNLOCK(ha);
17878 19684 TASK_DAEMON_LOCK(ha);
17879 19685 ha->task_daemon_flags |= DRIVER_STALL;
17880 19686 TASK_DAEMON_UNLOCK(ha);
17881 - break;
17882 - case IDC_OPC_PORT_RESET_MBC:
17883 - case IDC_OPC_SET_PORT_CONFIG_MBC:
17884 - ha->idc_restart_timer = 30;
17885 - ha->idc_restart_cnt++;
19687 + if (LOOP_READY(ha)) {
19688 + if ((ha->idc_mb[1] & IDC_TIMEOUT_MASK) <
19689 + IDC_TIMEOUT_MASK) {
19690 + ha->idc_mb[1] = (uint16_t)
19691 + (ha->idc_mb[1] | IDC_TIMEOUT_MASK);
19692 + rval = ql_idc_time_extend(ha);
19693 + if (rval != QL_SUCCESS) {
19694 + EL(ha, "idc_time_extend status"
19695 + "=%xh\n", rval);
19696 + }
19697 + }
19698 + (void) ql_wait_outstanding(ha);
19699 + }
19700 + }
19701 +
19702 + /* IDC ACK needed. */
19703 + if (ha->flags & IDC_ACK_NEEDED) {
19704 + ADAPTER_STATE_LOCK(ha);
19705 + ha->flags &= ~IDC_ACK_NEEDED;
17886 19706 ADAPTER_STATE_UNLOCK(ha);
19707 + rval = ql_idc_ack(ha);
19708 + if (rval != QL_SUCCESS) {
19709 + EL(ha, "idc_ack status=%xh\n", rval);
19710 + ADAPTER_STATE_LOCK(ha);
19711 + ha->flags |= IDC_RESTART_NEEDED;
19712 + ADAPTER_STATE_UNLOCK(ha);
19713 + }
19714 + }
19715 +
19716 + /* IDC Restart needed. */
19717 + if (timer-- == 0 || ha->flags & ADAPTER_SUSPENDED ||
19718 + (ha->flags & IDC_RESTART_NEEDED &&
19719 + !(ha->flags & LOOPBACK_ACTIVE))) {
19720 + ADAPTER_STATE_LOCK(ha);
19721 + ha->flags &= ~(IDC_RESTART_NEEDED | IDC_STALL_NEEDED |
19722 + IDC_ACK_NEEDED);
19723 + ADAPTER_STATE_UNLOCK(ha);
17887 19724 TASK_DAEMON_LOCK(ha);
17888 - ha->task_daemon_flags |= DRIVER_STALL;
19725 + ha->task_daemon_flags &= ~DRIVER_STALL;
17889 19726 TASK_DAEMON_UNLOCK(ha);
17890 - (void) ql_wait_outstanding(ha);
19727 + if (LOOP_READY(ha)) {
19728 + ql_restart_queues(ha);
19729 + }
17891 19730 break;
17892 - default:
17893 - ADAPTER_STATE_UNLOCK(ha);
17894 - EL(ha, "Unknown IDC opcode=%xh %xh\n", ha->idc_mb[0],
17895 - ha->idc_mb[2]);
17896 - break;
17897 19731 }
19732 + delay(10);
19733 + }
19734 +
19735 + QL_PRINT_10(ha, "done\n");
19736 +}
19737 +
19738 +/*
19739 + * ql_get_lun_addr
19740 + * get the lunslun address.
19741 + *
19742 + * Input:
19743 + * tq: target queue pointer.
19744 + * lun: the lun number.
19745 + *
19746 + * Returns:
19747 + * the lun address.
19748 + *
19749 + * Context:
19750 + * Interrupt or Kernel context, no mailbox commands allowed.
19751 + */
19752 +uint64_t
19753 +ql_get_lun_addr(ql_tgt_t *tq, uint16_t lun)
19754 +{
19755 + ql_lun_t *lq;
19756 + ql_link_t *link = NULL;
19757 + uint64_t lun_addr = 0;
19758 + fcp_ent_addr_t *fcp_ent_addr = (fcp_ent_addr_t *)&lun_addr;
19759 +
19760 + /* If the lun queue exists */
19761 + if (tq) {
19762 + for (link = tq->lun_queues.first; link != NULL;
19763 + link = link->next) {
19764 + lq = link->base_address;
19765 + if (lq->lun_no == lun) {
19766 + break;
19767 + }
19768 + }
19769 + }
19770 + if (link == NULL) {
19771 + /* create an fcp_ent_addr from the lun number */
19772 + if (MSB(lun)) {
19773 + fcp_ent_addr->ent_addr_0 = CHAR_TO_SHORT(lobyte(lun),
19774 + (hibyte(lun) | QL_LUN_AM_FLAT));
19775 + } else {
19776 + fcp_ent_addr->ent_addr_0 = CHAR_TO_SHORT(lobyte(lun),
19777 + hibyte(lun));
19778 + }
19779 + } else {
19780 + lun_addr = lq->lun_addr;
19781 + }
19782 +
19783 + return (lun_addr);
19784 +}
19785 +
19786 +
19787 +/*
19788 + * ql_83xx_binary_fw_dump
19789 + *
19790 + * Input:
19791 + * ha: adapter state pointer.
19792 + * fw: firmware dump context pointer.
19793 + *
19794 + * Returns:
19795 + * ql local function return status code.
19796 + *
19797 + * Context:
19798 + * Interrupt or Kernel context, no mailbox commands allowed.
19799 + */
19800 +static int
19801 +ql_83xx_binary_fw_dump(ql_adapter_state_t *ha, ql_83xx_fw_dump_t *fw)
19802 +{
19803 + uint32_t *reg32, cnt, *w32ptr, index, *dp;
19804 + void *bp;
19805 + clock_t timer;
19806 + int rv, rval = QL_SUCCESS;
19807 +
19808 + QL_PRINT_3(ha, "started\n");
19809 +
19810 + fw->req_q_size[0] = ha->req_q[0]->req_ring.size;
19811 + if (ha->req_q[1] != NULL) {
19812 + fw->req_q_size[1] = ha->req_q[1]->req_ring.size;
19813 + }
19814 + fw->rsp_q_size = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
19815 +
19816 + fw->hccr = RD32_IO_REG(ha, hccr);
19817 + fw->r2h_status = RD32_IO_REG(ha, risc2host);
19818 + fw->aer_ues = ql_pci_config_get32(ha, 0x104);
19819 +
19820 + /* Disable ISP interrupts. */
19821 + ql_disable_intr(ha);
19822 +
19823 + /* Pause RISC. */
19824 + if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
19825 + WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
19826 + for (timer = 30000;
19827 + (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
19828 + rval == QL_SUCCESS; timer--) {
19829 + if (timer) {
19830 + drv_usecwait(100);
19831 + if (timer % 10000 == 0) {
19832 + EL(ha, "risc pause %d\n", timer);
19833 + }
19834 + } else {
19835 + EL(ha, "risc pause timeout\n");
19836 + rval = QL_FUNCTION_TIMEOUT;
19837 + }
19838 + }
19839 + }
19840 +
19841 + WRT32_IO_REG(ha, io_base_addr, 0x6000);
19842 + WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0);
19843 + WRT_REG_DWORD(ha, ha->iobase + 0xcc, 0);
19844 +
19845 + WRT32_IO_REG(ha, io_base_addr, 0x6010);
19846 + WRT_REG_DWORD(ha, ha->iobase + 0xd4, 0);
19847 +
19848 + WRT32_IO_REG(ha, io_base_addr, 0x0F70);
19849 + WRT_REG_DWORD(ha, ha->iobase + 0xf0, 0x60000000);
19850 +
19851 + /* Host Interface registers */
19852 +
19853 + /* HostRisc registers. */
19854 + WRT32_IO_REG(ha, io_base_addr, 0x7000);
19855 + bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
19856 + 16, 32);
19857 + WRT32_IO_REG(ha, io_base_addr, 0x7010);
19858 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19859 + WRT32_IO_REG(ha, io_base_addr, 0x7040);
19860 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19861 +
19862 + /* PCIe registers. */
19863 + WRT32_IO_REG(ha, io_base_addr, 0x7c00);
19864 + WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
19865 + bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
19866 + 3, 32);
19867 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
19868 + WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
19869 +
19870 + /* Host interface registers. */
19871 + (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
19872 + sizeof (fw->host_reg) / 4, 32);
19873 +
19874 + /* Shadow registers. */
19875 +
19876 + WRT32_IO_REG(ha, io_base_addr, 0x0F70);
19877 + RD32_IO_REG(ha, io_base_addr);
19878 +
19879 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19880 + WRT_REG_DWORD(ha, reg32, 0xB0000000);
19881 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19882 + fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
19883 +
19884 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19885 + WRT_REG_DWORD(ha, reg32, 0xB0100000);
19886 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19887 + fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
19888 +
19889 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19890 + WRT_REG_DWORD(ha, reg32, 0xB0200000);
19891 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19892 + fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
19893 +
19894 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19895 + WRT_REG_DWORD(ha, reg32, 0xB0300000);
19896 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19897 + fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
19898 +
19899 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19900 + WRT_REG_DWORD(ha, reg32, 0xB0400000);
19901 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19902 + fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
19903 +
19904 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19905 + WRT_REG_DWORD(ha, reg32, 0xB0500000);
19906 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19907 + fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
19908 +
19909 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19910 + WRT_REG_DWORD(ha, reg32, 0xB0600000);
19911 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19912 + fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
19913 +
19914 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19915 + WRT_REG_DWORD(ha, reg32, 0xB0700000);
19916 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19917 + fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
19918 +
19919 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19920 + WRT_REG_DWORD(ha, reg32, 0xB0800000);
19921 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19922 + fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
19923 +
19924 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19925 + WRT_REG_DWORD(ha, reg32, 0xB0900000);
19926 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19927 + fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
19928 +
19929 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19930 + WRT_REG_DWORD(ha, reg32, 0xB0A00000);
19931 + reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19932 + fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
19933 +
19934 + /* RISC I/O register. */
19935 +
19936 + WRT32_IO_REG(ha, io_base_addr, 0x0010);
19937 + (void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
19938 + 1, 32);
19939 +
19940 + /* Mailbox registers. */
19941 +
19942 + (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
19943 + sizeof (fw->mailbox_reg) / 2, 16);
19944 +
19945 + /* Transfer sequence registers. */
19946 +
19947 + /* XSEQ GP */
19948 + WRT32_IO_REG(ha, io_base_addr, 0xBE00);
19949 + bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
19950 + 16, 32);
19951 + WRT32_IO_REG(ha, io_base_addr, 0xBE10);
19952 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19953 + WRT32_IO_REG(ha, io_base_addr, 0xBE20);
19954 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19955 + WRT32_IO_REG(ha, io_base_addr, 0xBE30);
19956 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19957 + WRT32_IO_REG(ha, io_base_addr, 0xBE40);
19958 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19959 + WRT32_IO_REG(ha, io_base_addr, 0xBE50);
19960 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19961 + WRT32_IO_REG(ha, io_base_addr, 0xBE60);
19962 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19963 + WRT32_IO_REG(ha, io_base_addr, 0xBE70);
19964 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19965 + WRT32_IO_REG(ha, io_base_addr, 0xBF00);
19966 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19967 + WRT32_IO_REG(ha, io_base_addr, 0xBF10);
19968 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19969 + WRT32_IO_REG(ha, io_base_addr, 0xBF20);
19970 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19971 + WRT32_IO_REG(ha, io_base_addr, 0xBF30);
19972 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19973 + WRT32_IO_REG(ha, io_base_addr, 0xBF40);
19974 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19975 + WRT32_IO_REG(ha, io_base_addr, 0xBF50);
19976 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19977 + WRT32_IO_REG(ha, io_base_addr, 0xBF60);
19978 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19979 + WRT32_IO_REG(ha, io_base_addr, 0xBF70);
19980 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19981 +
19982 + /* XSEQ-0 */
19983 + WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
19984 + bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0, 16, 32);
19985 + WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
19986 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19987 + WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
19988 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19989 +
19990 + /* XSEQ-1 */
19991 + WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
19992 + (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
19993 + 16, 32);
19994 +
19995 + /* XSEQ-2 */
19996 + WRT32_IO_REG(ha, io_base_addr, 0xBEF0);
19997 + (void) ql_read_regs(ha, fw->xseq_2_reg, ha->iobase + 0xC0,
19998 + 16, 32);
19999 +
20000 + /* Receive sequence registers. */
20001 +
20002 + /* RSEQ GP */
20003 + WRT32_IO_REG(ha, io_base_addr, 0xFE00);
20004 + bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0, 16, 32);
20005 + WRT32_IO_REG(ha, io_base_addr, 0xFE10);
20006 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20007 + WRT32_IO_REG(ha, io_base_addr, 0xFE20);
20008 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20009 + WRT32_IO_REG(ha, io_base_addr, 0xFE30);
20010 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20011 + WRT32_IO_REG(ha, io_base_addr, 0xFE40);
20012 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20013 + WRT32_IO_REG(ha, io_base_addr, 0xFE50);
20014 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20015 + WRT32_IO_REG(ha, io_base_addr, 0xFE60);
20016 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20017 + WRT32_IO_REG(ha, io_base_addr, 0xFE70);
20018 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20019 + WRT32_IO_REG(ha, io_base_addr, 0xFF00);
20020 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20021 + WRT32_IO_REG(ha, io_base_addr, 0xFF10);
20022 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20023 + WRT32_IO_REG(ha, io_base_addr, 0xFF20);
20024 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20025 + WRT32_IO_REG(ha, io_base_addr, 0xFF30);
20026 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20027 + WRT32_IO_REG(ha, io_base_addr, 0xFF40);
20028 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20029 + WRT32_IO_REG(ha, io_base_addr, 0xFF50);
20030 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20031 + WRT32_IO_REG(ha, io_base_addr, 0xFF60);
20032 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20033 + WRT32_IO_REG(ha, io_base_addr, 0xFF70);
20034 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20035 +
20036 + /* RSEQ-0 */
20037 + WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
20038 + bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
20039 + 16, 32);
20040 + WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
20041 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20042 +
20043 + /* RSEQ-1 */
20044 + WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
20045 + (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
20046 + sizeof (fw->rseq_1_reg) / 4, 32);
20047 +
20048 + /* RSEQ-2 */
20049 + WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
20050 + (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
20051 + sizeof (fw->rseq_2_reg) / 4, 32);
20052 +
20053 + /* RSEQ-3 */
20054 + WRT32_IO_REG(ha, io_base_addr, 0xFEF0);
20055 + (void) ql_read_regs(ha, fw->rseq_3_reg, ha->iobase + 0xC0,
20056 + sizeof (fw->rseq_3_reg) / 4, 32);
20057 +
20058 + /* Auxiliary sequencer registers. */
20059 +
20060 + /* ASEQ GP */
20061 + WRT32_IO_REG(ha, io_base_addr, 0xB000);
20062 + bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0, 16, 32);
20063 + WRT32_IO_REG(ha, io_base_addr, 0xB010);
20064 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20065 + WRT32_IO_REG(ha, io_base_addr, 0xB020);
20066 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20067 + WRT32_IO_REG(ha, io_base_addr, 0xB030);
20068 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20069 + WRT32_IO_REG(ha, io_base_addr, 0xB040);
20070 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20071 + WRT32_IO_REG(ha, io_base_addr, 0xB050);
20072 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20073 + WRT32_IO_REG(ha, io_base_addr, 0xB060);
20074 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20075 + WRT32_IO_REG(ha, io_base_addr, 0xB070);
20076 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20077 + WRT32_IO_REG(ha, io_base_addr, 0xB100);
20078 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20079 + WRT32_IO_REG(ha, io_base_addr, 0xB110);
20080 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20081 + WRT32_IO_REG(ha, io_base_addr, 0xB120);
20082 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20083 + WRT32_IO_REG(ha, io_base_addr, 0xB130);
20084 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20085 + WRT32_IO_REG(ha, io_base_addr, 0xB140);
20086 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20087 + WRT32_IO_REG(ha, io_base_addr, 0xB150);
20088 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20089 + WRT32_IO_REG(ha, io_base_addr, 0xB160);
20090 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20091 + WRT32_IO_REG(ha, io_base_addr, 0xB170);
20092 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20093 +
20094 + /* ASEQ-0 */
20095 + WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
20096 + bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
20097 + 16, 32);
20098 + WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
20099 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20100 +
20101 + /* ASEQ-1 */
20102 + WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
20103 + (void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
20104 + 16, 32);
20105 +
20106 + /* ASEQ-2 */
20107 + WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
20108 + (void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
20109 + 16, 32);
20110 +
20111 + /* ASEQ-3 */
20112 + WRT32_IO_REG(ha, io_base_addr, 0xB1F0);
20113 + (void) ql_read_regs(ha, fw->aseq_3_reg, ha->iobase + 0xC0,
20114 + 16, 32);
20115 +
20116 + /* Command DMA registers. */
20117 +
20118 + WRT32_IO_REG(ha, io_base_addr, 0x7100);
20119 + bp = ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
20120 + 16, 32);
20121 + WRT32_IO_REG(ha, io_base_addr, 0x7120);
20122 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20123 + WRT32_IO_REG(ha, io_base_addr, 0x7130);
20124 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20125 + WRT32_IO_REG(ha, io_base_addr, 0x71f0);
20126 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20127 +
20128 + /* Queues. */
20129 +
20130 + /* RequestQ0 */
20131 + WRT32_IO_REG(ha, io_base_addr, 0x7200);
20132 + bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
20133 + 8, 32);
20134 + (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
20135 +
20136 + /* ResponseQ0 */
20137 + WRT32_IO_REG(ha, io_base_addr, 0x7300);
20138 + bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
20139 + 8, 32);
20140 + (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
20141 +
20142 + /* RequestQ1 */
20143 + WRT32_IO_REG(ha, io_base_addr, 0x7400);
20144 + bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
20145 + 8, 32);
20146 + (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
20147 +
20148 + /* Transmit DMA registers. */
20149 +
20150 + /* XMT0 */
20151 + WRT32_IO_REG(ha, io_base_addr, 0x7600);
20152 + bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
20153 + 16, 32);
20154 + WRT32_IO_REG(ha, io_base_addr, 0x7610);
20155 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20156 +
20157 + /* XMT1 */
20158 + WRT32_IO_REG(ha, io_base_addr, 0x7620);
20159 + bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
20160 + 16, 32);
20161 + WRT32_IO_REG(ha, io_base_addr, 0x7630);
20162 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20163 +
20164 + /* XMT2 */
20165 + WRT32_IO_REG(ha, io_base_addr, 0x7640);
20166 + bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
20167 + 16, 32);
20168 + WRT32_IO_REG(ha, io_base_addr, 0x7650);
20169 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20170 +
20171 + /* XMT3 */
20172 + WRT32_IO_REG(ha, io_base_addr, 0x7660);
20173 + bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
20174 + 16, 32);
20175 + WRT32_IO_REG(ha, io_base_addr, 0x7670);
20176 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20177 +
20178 + /* XMT4 */
20179 + WRT32_IO_REG(ha, io_base_addr, 0x7680);
20180 + bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
20181 + 16, 32);
20182 + WRT32_IO_REG(ha, io_base_addr, 0x7690);
20183 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20184 +
20185 + /* XMT Common */
20186 + WRT32_IO_REG(ha, io_base_addr, 0x76A0);
20187 + (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
20188 + ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
20189 +
20190 + /* Receive DMA registers. */
20191 +
20192 + /* RCVThread0 */
20193 + WRT32_IO_REG(ha, io_base_addr, 0x7700);
20194 + bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
20195 + ha->iobase + 0xC0, 16, 32);
20196 + WRT32_IO_REG(ha, io_base_addr, 0x7710);
20197 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20198 +
20199 + /* RCVThread1 */
20200 + WRT32_IO_REG(ha, io_base_addr, 0x7720);
20201 + bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
20202 + ha->iobase + 0xC0, 16, 32);
20203 + WRT32_IO_REG(ha, io_base_addr, 0x7730);
20204 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20205 +
20206 + /* RISC registers. */
20207 +
20208 + /* RISC GP */
20209 + WRT32_IO_REG(ha, io_base_addr, 0x0F00);
20210 + bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0, 16, 32);
20211 + WRT32_IO_REG(ha, io_base_addr, 0x0F10);
20212 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20213 + WRT32_IO_REG(ha, io_base_addr, 0x0F20);
20214 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20215 + WRT32_IO_REG(ha, io_base_addr, 0x0F30);
20216 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20217 + WRT32_IO_REG(ha, io_base_addr, 0x0F40);
20218 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20219 + WRT32_IO_REG(ha, io_base_addr, 0x0F50);
20220 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20221 + WRT32_IO_REG(ha, io_base_addr, 0x0F60);
20222 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20223 + WRT32_IO_REG(ha, io_base_addr, 0x0F70);
20224 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20225 +
20226 + /* Local memory controller (LMC) registers. */
20227 +
20228 + /* LMC */
20229 + WRT32_IO_REG(ha, io_base_addr, 0x3000);
20230 + bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0, 16, 32);
20231 + WRT32_IO_REG(ha, io_base_addr, 0x3010);
20232 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20233 + WRT32_IO_REG(ha, io_base_addr, 0x3020);
20234 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20235 + WRT32_IO_REG(ha, io_base_addr, 0x3030);
20236 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20237 + WRT32_IO_REG(ha, io_base_addr, 0x3040);
20238 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20239 + WRT32_IO_REG(ha, io_base_addr, 0x3050);
20240 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20241 + WRT32_IO_REG(ha, io_base_addr, 0x3060);
20242 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20243 + WRT32_IO_REG(ha, io_base_addr, 0x3070);
20244 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20245 +
20246 + /* Fibre Protocol Module registers. */
20247 +
20248 + /* FPM hardware */
20249 + WRT32_IO_REG(ha, io_base_addr, 0x4000);
20250 + bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0, 16, 32);
20251 + WRT32_IO_REG(ha, io_base_addr, 0x4010);
20252 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20253 + WRT32_IO_REG(ha, io_base_addr, 0x4020);
20254 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20255 + WRT32_IO_REG(ha, io_base_addr, 0x4030);
20256 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20257 + WRT32_IO_REG(ha, io_base_addr, 0x4040);
20258 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20259 + WRT32_IO_REG(ha, io_base_addr, 0x4050);
20260 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20261 + WRT32_IO_REG(ha, io_base_addr, 0x4060);
20262 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20263 + WRT32_IO_REG(ha, io_base_addr, 0x4070);
20264 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20265 + WRT32_IO_REG(ha, io_base_addr, 0x4080);
20266 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20267 + WRT32_IO_REG(ha, io_base_addr, 0x4090);
20268 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20269 + WRT32_IO_REG(ha, io_base_addr, 0x40A0);
20270 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20271 + WRT32_IO_REG(ha, io_base_addr, 0x40B0);
20272 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20273 + WRT32_IO_REG(ha, io_base_addr, 0x40C0);
20274 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20275 + WRT32_IO_REG(ha, io_base_addr, 0x40D0);
20276 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20277 + WRT32_IO_REG(ha, io_base_addr, 0x40E0);
20278 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20279 + WRT32_IO_REG(ha, io_base_addr, 0x40F0);
20280 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20281 +
20282 + /* Pointer arrays registers */
20283 +
20284 + /* RQ0 Array registers. */
20285 + WRT32_IO_REG(ha, io_base_addr, 0x5C00);
20286 + bp = ql_read_regs(ha, fw->rq0_array_reg, ha->iobase + 0xC0,
20287 + 16, 32);
20288 + WRT32_IO_REG(ha, io_base_addr, 0x5C10);
20289 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20290 + WRT32_IO_REG(ha, io_base_addr, 0x5C20);
20291 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20292 + WRT32_IO_REG(ha, io_base_addr, 0x5C30);
20293 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20294 + WRT32_IO_REG(ha, io_base_addr, 0x5C40);
20295 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20296 + WRT32_IO_REG(ha, io_base_addr, 0x5C50);
20297 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20298 + WRT32_IO_REG(ha, io_base_addr, 0x5C60);
20299 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20300 + WRT32_IO_REG(ha, io_base_addr, 0x5C70);
20301 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20302 + WRT32_IO_REG(ha, io_base_addr, 0x5C80);
20303 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20304 + WRT32_IO_REG(ha, io_base_addr, 0x5C90);
20305 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20306 + WRT32_IO_REG(ha, io_base_addr, 0x5CA0);
20307 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20308 + WRT32_IO_REG(ha, io_base_addr, 0x5CB0);
20309 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20310 + WRT32_IO_REG(ha, io_base_addr, 0x5CC0);
20311 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20312 + WRT32_IO_REG(ha, io_base_addr, 0x5CD0);
20313 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20314 + WRT32_IO_REG(ha, io_base_addr, 0x5CE0);
20315 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20316 + WRT32_IO_REG(ha, io_base_addr, 0x5CF0);
20317 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20318 +
20319 + /* RQ1 Array registers. */
20320 + WRT32_IO_REG(ha, io_base_addr, 0x5D00);
20321 + bp = ql_read_regs(ha, fw->rq1_array_reg, ha->iobase + 0xC0, 16, 32);
20322 + WRT32_IO_REG(ha, io_base_addr, 0x5D10);
20323 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20324 + WRT32_IO_REG(ha, io_base_addr, 0x5D20);
20325 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20326 + WRT32_IO_REG(ha, io_base_addr, 0x5D30);
20327 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20328 + WRT32_IO_REG(ha, io_base_addr, 0x5D40);
20329 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20330 + WRT32_IO_REG(ha, io_base_addr, 0x5D50);
20331 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20332 + WRT32_IO_REG(ha, io_base_addr, 0x5D60);
20333 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20334 + WRT32_IO_REG(ha, io_base_addr, 0x5D70);
20335 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20336 + WRT32_IO_REG(ha, io_base_addr, 0x5D80);
20337 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20338 + WRT32_IO_REG(ha, io_base_addr, 0x5D90);
20339 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20340 + WRT32_IO_REG(ha, io_base_addr, 0x5DA0);
20341 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20342 + WRT32_IO_REG(ha, io_base_addr, 0x5DB0);
20343 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20344 + WRT32_IO_REG(ha, io_base_addr, 0x5DC0);
20345 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20346 + WRT32_IO_REG(ha, io_base_addr, 0x5DD0);
20347 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20348 + WRT32_IO_REG(ha, io_base_addr, 0x5DE0);
20349 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20350 + WRT32_IO_REG(ha, io_base_addr, 0x5DF0);
20351 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20352 +
20353 + /* RP0 Array registers. */
20354 + WRT32_IO_REG(ha, io_base_addr, 0x5E00);
20355 + bp = ql_read_regs(ha, fw->rp0_array_reg, ha->iobase + 0xC0, 16, 32);
20356 + WRT32_IO_REG(ha, io_base_addr, 0x5E10);
20357 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20358 + WRT32_IO_REG(ha, io_base_addr, 0x5E20);
20359 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20360 + WRT32_IO_REG(ha, io_base_addr, 0x5E30);
20361 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20362 + WRT32_IO_REG(ha, io_base_addr, 0x5E40);
20363 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20364 + WRT32_IO_REG(ha, io_base_addr, 0x5E50);
20365 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20366 + WRT32_IO_REG(ha, io_base_addr, 0x5E60);
20367 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20368 + WRT32_IO_REG(ha, io_base_addr, 0x5E70);
20369 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20370 + WRT32_IO_REG(ha, io_base_addr, 0x5E80);
20371 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20372 + WRT32_IO_REG(ha, io_base_addr, 0x5E90);
20373 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20374 + WRT32_IO_REG(ha, io_base_addr, 0x5EA0);
20375 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20376 + WRT32_IO_REG(ha, io_base_addr, 0x5EB0);
20377 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20378 + WRT32_IO_REG(ha, io_base_addr, 0x5EC0);
20379 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20380 + WRT32_IO_REG(ha, io_base_addr, 0x5ED0);
20381 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20382 + WRT32_IO_REG(ha, io_base_addr, 0x5EE0);
20383 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20384 + WRT32_IO_REG(ha, io_base_addr, 0x5EF0);
20385 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20386 +
20387 + /* RP1 Array registers. */
20388 + WRT32_IO_REG(ha, io_base_addr, 0x5F00);
20389 + bp = ql_read_regs(ha, fw->rp1_array_reg, ha->iobase + 0xC0, 16, 32);
20390 + WRT32_IO_REG(ha, io_base_addr, 0x5F10);
20391 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20392 + WRT32_IO_REG(ha, io_base_addr, 0x5F20);
20393 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20394 + WRT32_IO_REG(ha, io_base_addr, 0x5F30);
20395 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20396 + WRT32_IO_REG(ha, io_base_addr, 0x5F40);
20397 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20398 + WRT32_IO_REG(ha, io_base_addr, 0x5F50);
20399 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20400 + WRT32_IO_REG(ha, io_base_addr, 0x5F60);
20401 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20402 + WRT32_IO_REG(ha, io_base_addr, 0x5F70);
20403 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20404 + WRT32_IO_REG(ha, io_base_addr, 0x5F80);
20405 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20406 + WRT32_IO_REG(ha, io_base_addr, 0x5F90);
20407 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20408 + WRT32_IO_REG(ha, io_base_addr, 0x5FA0);
20409 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20410 + WRT32_IO_REG(ha, io_base_addr, 0x5FB0);
20411 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20412 + WRT32_IO_REG(ha, io_base_addr, 0x5FC0);
20413 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20414 + WRT32_IO_REG(ha, io_base_addr, 0x5FD0);
20415 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20416 + WRT32_IO_REG(ha, io_base_addr, 0x5FE0);
20417 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20418 + WRT32_IO_REG(ha, io_base_addr, 0x5FF0);
20419 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20420 +
20421 + /* AT0 Array Registers */
20422 + WRT32_IO_REG(ha, io_base_addr, 0x7080);
20423 + bp = ql_read_regs(ha, fw->ato_array_reg, ha->iobase + 0xC0, 16, 32);
20424 + WRT32_IO_REG(ha, io_base_addr, 0x7090);
20425 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20426 + WRT32_IO_REG(ha, io_base_addr, 0x70A0);
20427 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20428 + WRT32_IO_REG(ha, io_base_addr, 0x70B0);
20429 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20430 + WRT32_IO_REG(ha, io_base_addr, 0x70C0);
20431 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20432 + WRT32_IO_REG(ha, io_base_addr, 0x70D0);
20433 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20434 + WRT32_IO_REG(ha, io_base_addr, 0x70E0);
20435 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20436 + WRT32_IO_REG(ha, io_base_addr, 0x70F0);
20437 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20438 +
20439 + /* I/O queue control registers */
20440 +
20441 + /* Queue Control Registers. */
20442 + WRT32_IO_REG(ha, io_base_addr, 0x7800);
20443 + (void) ql_read_regs(ha, fw->queue_control_reg, ha->iobase + 0xC0,
20444 + 16, 32);
20445 +
20446 + /* Frame Buffer registers. */
20447 +
20448 + /* FB hardware */
20449 + WRT32_IO_REG(ha, io_base_addr, 0x6000);
20450 + bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0, 16, 32);
20451 + WRT32_IO_REG(ha, io_base_addr, 0x6010);
20452 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20453 + WRT32_IO_REG(ha, io_base_addr, 0x6020);
20454 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20455 + WRT32_IO_REG(ha, io_base_addr, 0x6030);
20456 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20457 + WRT32_IO_REG(ha, io_base_addr, 0x6040);
20458 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20459 + WRT32_IO_REG(ha, io_base_addr, 0x6060);
20460 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20461 + WRT32_IO_REG(ha, io_base_addr, 0x6070);
20462 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20463 + WRT32_IO_REG(ha, io_base_addr, 0x6100);
20464 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20465 + WRT32_IO_REG(ha, io_base_addr, 0x6130);
20466 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20467 + WRT32_IO_REG(ha, io_base_addr, 0x6150);
20468 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20469 + WRT32_IO_REG(ha, io_base_addr, 0x6170);
20470 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20471 + WRT32_IO_REG(ha, io_base_addr, 0x6190);
20472 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20473 + WRT32_IO_REG(ha, io_base_addr, 0x61B0);
20474 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20475 + WRT32_IO_REG(ha, io_base_addr, 0x61C0);
20476 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20477 + WRT32_IO_REG(ha, io_base_addr, 0x6530);
20478 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20479 + WRT32_IO_REG(ha, io_base_addr, 0x6540);
20480 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20481 + WRT32_IO_REG(ha, io_base_addr, 0x6550);
20482 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20483 + WRT32_IO_REG(ha, io_base_addr, 0x6560);
20484 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20485 + WRT32_IO_REG(ha, io_base_addr, 0x6570);
20486 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20487 + WRT32_IO_REG(ha, io_base_addr, 0x6580);
20488 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20489 + WRT32_IO_REG(ha, io_base_addr, 0x6590);
20490 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20491 + WRT32_IO_REG(ha, io_base_addr, 0x65A0);
20492 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20493 + WRT32_IO_REG(ha, io_base_addr, 0x65B0);
20494 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20495 + WRT32_IO_REG(ha, io_base_addr, 0x65C0);
20496 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20497 + WRT32_IO_REG(ha, io_base_addr, 0x65D0);
20498 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20499 + WRT32_IO_REG(ha, io_base_addr, 0x65E0);
20500 + bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20501 + WRT32_IO_REG(ha, io_base_addr, 0x6F00);
20502 + (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20503 +
20504 + /* Get the Queue Pointers */
20505 + dp = fw->req_rsp_ext_mem;
20506 + for (index = 0; index < ha->rsp_queues_cnt; index++) {
20507 + if (index == 0) {
20508 + *dp = RD32_MBAR_REG(ha, ha->req_q[0]->mbar_req_in);
20509 + LITTLE_ENDIAN_32(dp);
20510 + dp++;
20511 + *dp = RD32_MBAR_REG(ha, ha->req_q[0]->mbar_req_out);
20512 + LITTLE_ENDIAN_32(dp);
20513 + dp++;
20514 + } else if (index == 1) {
20515 + *dp = RD32_MBAR_REG(ha, ha->req_q[1]->mbar_req_in);
20516 + LITTLE_ENDIAN_32(dp);
20517 + dp++;
20518 + *dp = RD32_MBAR_REG(ha, ha->req_q[1]->mbar_req_out);
20519 + LITTLE_ENDIAN_32(dp);
20520 + dp++;
20521 + } else {
20522 + *dp++ = 0;
20523 + *dp++ = 0;
20524 + }
20525 + *dp = RD32_MBAR_REG(ha, ha->rsp_queues[index]->mbar_rsp_in);
20526 + LITTLE_ENDIAN_32(dp);
20527 + dp++;
20528 + *dp = RD32_MBAR_REG(ha, ha->rsp_queues[index]->mbar_rsp_out);
20529 + LITTLE_ENDIAN_32(dp);
20530 + dp++;
20531 + }
20532 +
20533 + /* Get the request queue */
20534 + (void) ddi_dma_sync(ha->req_q[0]->req_ring.dma_handle, 0, 0,
20535 + DDI_DMA_SYNC_FORCPU);
20536 + w32ptr = (uint32_t *)ha->req_q[0]->req_ring.bp;
20537 + for (cnt = 0; cnt < fw->req_q_size[0] / 4; cnt++) {
20538 + *dp = *w32ptr++;
20539 + LITTLE_ENDIAN_32(dp);
20540 + dp++;
20541 + }
20542 + if (ha->req_q[1] != NULL) {
20543 + (void) ddi_dma_sync(ha->req_q[1]->req_ring.dma_handle, 0, 0,
20544 + DDI_DMA_SYNC_FORCPU);
20545 + w32ptr = (uint32_t *)ha->req_q[1]->req_ring.bp;
20546 + for (cnt = 0; cnt < fw->req_q_size[1] / 4; cnt++) {
20547 + *dp = *w32ptr++;
20548 + LITTLE_ENDIAN_32(dp);
20549 + dp++;
20550 + }
20551 + }
20552 +
20553 + /* Get the response queues */
20554 + for (index = 0; index < ha->rsp_queues_cnt; index++) {
20555 + (void) ddi_dma_sync(ha->rsp_queues[index]->rsp_ring.dma_handle,
20556 + 0, 0, DDI_DMA_SYNC_FORCPU);
20557 + w32ptr = (uint32_t *)ha->rsp_queues[index]->rsp_ring.bp;
20558 + for (cnt = 0; cnt < ha->rsp_queues[index]->rsp_ring.size / 4;
20559 + cnt++) {
20560 + *dp = *w32ptr++;
20561 + LITTLE_ENDIAN_32(dp);
20562 + dp++;
20563 + }
20564 + }
20565 +
20566 + /* Reset RISC. */
20567 + ql_reset_chip(ha);
20568 +
20569 + /* Code RAM. */
20570 + rv = ql_read_risc_ram(ha, 0x20000, sizeof (fw->code_ram) / 4,
20571 + fw->code_ram);
20572 + if (rval == QL_SUCCESS) {
20573 + rval = rv;
20574 + }
20575 + rv = ql_read_risc_ram(ha, 0x100000,
20576 + ha->fw_ext_memory_size / 4, dp);
20577 + if (rval == QL_SUCCESS) {
20578 + rval = rv;
20579 + }
20580 +
20581 + /* Get the extended trace buffer */
20582 + if (ha->fwexttracebuf.dma_handle != NULL) {
20583 + /* Sync DMA buffer. */
20584 + (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
20585 + FWEXTSIZE, DDI_DMA_SYNC_FORCPU);
20586 +
20587 + w32ptr = ha->fwexttracebuf.bp;
20588 + for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
20589 + fw->ext_trace_buf[cnt] = *w32ptr++;
20590 + }
20591 + }
20592 +
20593 + /* Get the FC event trace buffer */
20594 + if (ha->fwfcetracebuf.dma_handle != NULL) {
20595 + /* Sync DMA buffer. */
20596 + (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
20597 + FWFCESIZE, DDI_DMA_SYNC_FORCPU);
20598 +
20599 + w32ptr = ha->fwfcetracebuf.bp;
20600 + for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
20601 + fw->fce_trace_buf[cnt] = *w32ptr++;
20602 + }
20603 + }
20604 +
20605 + if (rval != QL_SUCCESS) {
20606 + EL(ha, "failed, rval = %xh\n", rval);
20607 + } else {
20608 + /*EMPTY*/
20609 + QL_PRINT_10(ha, "done\n");
20610 + }
20611 + return (QL_SUCCESS);
20612 +}
20613 +
20614 +/*
20615 + * ql_83xx_ascii_fw_dump
20616 + * Converts ISP83xx firmware binary dump to ascii.
20617 + *
20618 + * Input:
20619 + * ha = adapter state pointer.
20620 + * bptr = buffer pointer.
20621 + *
20622 + * Returns:
20623 + * Amount of data buffer used.
20624 + *
20625 + * Context:
20626 + * Kernel context.
20627 + */
20628 +static size_t
20629 +ql_83xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
20630 +{
20631 + uint32_t cnt, cnt1, len, *dp, *dp2;
20632 + caddr_t bp = bufp;
20633 + ql_83xx_fw_dump_t *fw = ha->ql_dump_ptr;
20634 +
20635 + QL_PRINT_3(ha, "started\n");
20636 +
20637 + if ((len = ha->risc_dump_size) == 0) {
20638 + QL_PRINT_10(ha, "no buffer\n");
20639 + return (0);
20640 + }
20641 + (void) snprintf(bp, len, "\nISP FW Version %d.%02d.%02d Attributes "
20642 + "%X\n", ha->fw_major_version, ha->fw_minor_version,
20643 + ha->fw_subminor_version, ha->fw_attributes);
20644 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20645 + return (strlen(bufp));
20646 + }
20647 +
20648 + (void) snprintf(bp, len, "\nHCCR Register\n%08x\n", fw->hccr);
20649 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20650 + return (strlen(bufp));
20651 + }
20652 +
20653 + (void) snprintf(bp, len, "\nR2H Status Register\n%08x\n",
20654 + fw->r2h_status);
20655 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20656 + return (strlen(bufp));
20657 + }
20658 +
20659 + (void) snprintf(bp, len,
20660 + "\nAER Uncorrectable Error Status Register\n%08x\n", fw->aer_ues);
20661 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20662 + return (strlen(bufp));
20663 + }
20664 +
20665 + (void) snprintf(bp, len, "\nHostRisc Registers");
20666 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20667 + return (strlen(bufp));
20668 + }
20669 + for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
20670 + if (cnt % 8 == 0) {
20671 + (void) snprintf(bp, len, "\n");
20672 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20673 + return (strlen(bufp));
20674 + }
20675 + }
20676 + (void) snprintf(bp, len, "%08x ", fw->hostrisc_reg[cnt]);
20677 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20678 + return (strlen(bufp));
20679 + }
20680 + }
20681 +
20682 + (void) snprintf(bp, len, "\n\nPCIe Registers");
20683 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20684 + return (strlen(bufp));
20685 + }
20686 + for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
20687 + if (cnt % 8 == 0) {
20688 + (void) snprintf(bp, len, "\n");
20689 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20690 + return (strlen(bufp));
20691 + }
20692 + }
20693 + (void) snprintf(bp, len, "%08x ", fw->pcie_reg[cnt]);
20694 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20695 + return (strlen(bufp));
20696 + }
20697 + }
20698 +
20699 + dp = fw->req_rsp_ext_mem;
20700 + for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
20701 + (void) snprintf(bp, len, "\n\nQueue Pointers #%d:\n", cnt);
20702 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20703 + return (strlen(bufp));
20704 + }
20705 + for (cnt1 = 0; cnt1 < 4; cnt1++) {
20706 + (void) snprintf(bp, len, "%08x ", *dp++);
20707 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20708 + return (strlen(bufp));
20709 + }
20710 + }
20711 + }
20712 +
20713 + (void) snprintf(bp, len, "\n\nHost Interface Registers");
20714 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20715 + return (strlen(bufp));
20716 + }
20717 + for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
20718 + if (cnt % 8 == 0) {
20719 + (void) snprintf(bp, len, "\n");
20720 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20721 + return (strlen(bufp));
20722 + }
20723 + }
20724 + (void) snprintf(bp, len, "%08x ", fw->host_reg[cnt]);
20725 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20726 + return (strlen(bufp));
20727 + }
20728 + }
20729 +
20730 + (void) snprintf(bp, len, "\n\nShadow Registers");
20731 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20732 + return (strlen(bufp));
20733 + }
20734 + for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
20735 + if (cnt % 8 == 0) {
20736 + (void) snprintf(bp, len, "\n");
20737 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20738 + return (strlen(bufp));
20739 + }
20740 + }
20741 + (void) snprintf(bp, len, "%08x ", fw->shadow_reg[cnt]);
20742 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20743 + return (strlen(bufp));
20744 + }
20745 + }
20746 +
20747 + (void) snprintf(bp, len, "\n\nRISC IO Register\n%08x", fw->risc_io);
20748 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20749 + return (strlen(bufp));
20750 + }
20751 +
20752 + (void) snprintf(bp, len, "\n\nMailbox Registers");
20753 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20754 + return (strlen(bufp));
20755 + }
20756 + for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
20757 + if (cnt % 16 == 0) {
20758 + (void) snprintf(bp, len, "\n");
20759 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20760 + return (strlen(bufp));
20761 + }
20762 + }
20763 + (void) snprintf(bp, len, "%04x ", fw->mailbox_reg[cnt]);
20764 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20765 + return (strlen(bufp));
20766 + }
20767 + }
20768 +
20769 + (void) snprintf(bp, len, "\n\nXSEQ GP Registers");
20770 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20771 + return (strlen(bufp));
20772 + }
20773 + for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
20774 + if (cnt % 8 == 0) {
20775 + (void) snprintf(bp, len, "\n");
20776 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20777 + return (strlen(bufp));
20778 + }
20779 + }
20780 + (void) snprintf(bp, len, "%08x ", fw->xseq_gp_reg[cnt]);
20781 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20782 + return (strlen(bufp));
20783 + }
20784 + }
20785 +
20786 + (void) snprintf(bp, len, "\n\nXSEQ-0 Registers");
20787 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20788 + return (strlen(bufp));
20789 + }
20790 + for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
20791 + if (cnt % 8 == 0) {
20792 + (void) snprintf(bp, len, "\n");
20793 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20794 + return (strlen(bufp));
20795 + }
20796 + }
20797 + (void) snprintf(bp, len, "%08x ", fw->xseq_0_reg[cnt]);
20798 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20799 + return (strlen(bufp));
20800 + }
20801 + }
20802 +
20803 + (void) snprintf(bp, len, "\n\nXSEQ-1 Registers");
20804 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20805 + return (strlen(bufp));
20806 + }
20807 + for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
20808 + if (cnt % 8 == 0) {
20809 + (void) snprintf(bp, len, "\n");
20810 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20811 + return (strlen(bufp));
20812 + }
20813 + }
20814 + (void) snprintf(bp, len, "%08x ", fw->xseq_1_reg[cnt]);
20815 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20816 + return (strlen(bufp));
20817 + }
20818 + }
20819 +
20820 + (void) snprintf(bp, len, "\n\nXSEQ-2 Registers");
20821 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20822 + return (strlen(bufp));
20823 + }
20824 + for (cnt = 0; cnt < sizeof (fw->xseq_2_reg) / 4; cnt++) {
20825 + if (cnt % 8 == 0) {
20826 + (void) snprintf(bp, len, "\n");
20827 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20828 + return (strlen(bufp));
20829 + }
20830 + }
20831 + (void) snprintf(bp, len, "%08x ", fw->xseq_2_reg[cnt]);
20832 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20833 + return (strlen(bufp));
20834 + }
20835 + }
20836 +
20837 + (void) snprintf(bp, len, "\n\nRSEQ GP Registers");
20838 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20839 + return (strlen(bufp));
20840 + }
20841 + for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
20842 + if (cnt % 8 == 0) {
20843 + (void) snprintf(bp, len, "\n");
20844 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20845 + return (strlen(bufp));
20846 + }
20847 + }
20848 + (void) snprintf(bp, len, "%08x ", fw->rseq_gp_reg[cnt]);
20849 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20850 + return (strlen(bufp));
20851 + }
20852 + }
20853 +
20854 + (void) snprintf(bp, len, "\n\nRSEQ-0 Registers");
20855 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20856 + return (strlen(bufp));
20857 + }
20858 + for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
20859 + if (cnt % 8 == 0) {
20860 + (void) snprintf(bp, len, "\n");
20861 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20862 + return (strlen(bufp));
20863 + }
20864 + }
20865 + (void) snprintf(bp, len, "%08x ", fw->rseq_0_reg[cnt]);
20866 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20867 + return (strlen(bufp));
20868 + }
20869 + }
20870 +
20871 + (void) snprintf(bp, len, "\n\nRSEQ-1 Registers");
20872 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20873 + return (strlen(bufp));
20874 + }
20875 + for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
20876 + if (cnt % 8 == 0) {
20877 + (void) snprintf(bp, len, "\n");
20878 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20879 + return (strlen(bufp));
20880 + }
20881 + }
20882 + (void) snprintf(bp, len, "%08x ", fw->rseq_1_reg[cnt]);
20883 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20884 + return (strlen(bufp));
20885 + }
20886 + }
20887 +
20888 + (void) snprintf(bp, len, "\n\nRSEQ-2 Registers");
20889 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20890 + return (strlen(bufp));
20891 + }
20892 + for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
20893 + if (cnt % 8 == 0) {
20894 + (void) snprintf(bp, len, "\n");
20895 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20896 + return (strlen(bufp));
20897 + }
20898 + }
20899 + (void) snprintf(bp, len, "%08x ", fw->rseq_2_reg[cnt]);
20900 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20901 + return (strlen(bufp));
20902 + }
20903 + }
20904 +
20905 + (void) snprintf(bp, len, "\n\nRSEQ-3 Registers");
20906 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20907 + return (strlen(bufp));
20908 + }
20909 + for (cnt = 0; cnt < sizeof (fw->rseq_3_reg) / 4; cnt++) {
20910 + if (cnt % 8 == 0) {
20911 + (void) snprintf(bp, len, "\n");
20912 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20913 + return (strlen(bufp));
20914 + }
20915 + }
20916 + (void) snprintf(bp, len, "%08x ", fw->rseq_3_reg[cnt]);
20917 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20918 + return (strlen(bufp));
20919 + }
20920 + }
20921 +
20922 + (void) snprintf(bp, len, "\n\nASEQ GP Registers");
20923 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20924 + return (strlen(bufp));
20925 + }
20926 + for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
20927 + if (cnt % 8 == 0) {
20928 + (void) snprintf(bp, len, "\n");
20929 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20930 + return (strlen(bufp));
20931 + }
20932 + }
20933 + (void) snprintf(bp, len, "%08x ", fw->aseq_gp_reg[cnt]);
20934 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20935 + return (strlen(bufp));
20936 + }
20937 + }
20938 +
20939 + (void) snprintf(bp, len, "\n\nASEQ-0 Registers");
20940 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20941 + return (strlen(bufp));
20942 + }
20943 + for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
20944 + if (cnt % 8 == 0) {
20945 + (void) snprintf(bp, len, "\n");
20946 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20947 + return (strlen(bufp));
20948 + }
20949 + }
20950 + (void) snprintf(bp, len, "%08x ", fw->aseq_0_reg[cnt]);
20951 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20952 + return (strlen(bufp));
20953 + }
20954 + }
20955 +
20956 + (void) snprintf(bp, len, "\n\nASEQ-1 Registers");
20957 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20958 + return (strlen(bufp));
20959 + }
20960 + for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
20961 + if (cnt % 8 == 0) {
20962 + (void) snprintf(bp, len, "\n");
20963 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20964 + return (strlen(bufp));
20965 + }
20966 + }
20967 + (void) snprintf(bp, len, "%08x ", fw->aseq_1_reg[cnt]);
20968 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20969 + return (strlen(bufp));
20970 + }
20971 + }
20972 +
20973 + (void) snprintf(bp, len, "\n\nASEQ-2 Registers");
20974 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20975 + return (strlen(bufp));
20976 + }
20977 + for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
20978 + if (cnt % 8 == 0) {
20979 + (void) snprintf(bp, len, "\n");
20980 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20981 + return (strlen(bufp));
20982 + }
20983 + }
20984 + (void) snprintf(bp, len, "%08x ", fw->aseq_2_reg[cnt]);
20985 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20986 + return (strlen(bufp));
20987 + }
20988 + }
20989 +
20990 + (void) snprintf(bp, len, "\n\nASEQ-3 Registers");
20991 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20992 + return (strlen(bufp));
20993 + }
20994 + for (cnt = 0; cnt < sizeof (fw->aseq_3_reg) / 4; cnt++) {
20995 + if (cnt % 8 == 0) {
20996 + (void) snprintf(bp, len, "\n");
20997 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20998 + return (strlen(bufp));
20999 + }
21000 + }
21001 + (void) snprintf(bp, len, "%08x ", fw->aseq_3_reg[cnt]);
21002 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21003 + return (strlen(bufp));
21004 + }
21005 + }
21006 +
21007 + (void) snprintf(bp, len, "\n\nCommand DMA Registers");
21008 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21009 + return (strlen(bufp));
21010 + }
21011 + for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
21012 + if (cnt % 8 == 0) {
21013 + (void) snprintf(bp, len, "\n");
21014 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21015 + return (strlen(bufp));
21016 + }
21017 + }
21018 + (void) snprintf(bp, len, "%08x ", fw->cmd_dma_reg[cnt]);
21019 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21020 + return (strlen(bufp));
21021 + }
21022 + }
21023 +
21024 + (void) snprintf(bp, len, "\n\nRequest0 Queue DMA Channel Registers");
21025 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21026 + return (strlen(bufp));
21027 + }
21028 + for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
21029 + if (cnt % 8 == 0) {
21030 + (void) snprintf(bp, len, "\n");
21031 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21032 + return (strlen(bufp));
21033 + }
21034 + }
21035 + (void) snprintf(bp, len, "%08x ", fw->req0_dma_reg[cnt]);
21036 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21037 + return (strlen(bufp));
21038 + }
21039 + }
21040 +
21041 + (void) snprintf(bp, len, "\n\nResponse0 Queue DMA Channel Registers");
21042 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21043 + return (strlen(bufp));
21044 + }
21045 + for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
21046 + if (cnt % 8 == 0) {
21047 + (void) snprintf(bp, len, "\n");
21048 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21049 + return (strlen(bufp));
21050 + }
21051 + }
21052 + (void) snprintf(bp, len, "%08x ", fw->resp0_dma_reg[cnt]);
21053 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21054 + return (strlen(bufp));
21055 + }
21056 + }
21057 +
21058 + (void) snprintf(bp, len, "\n\nRequest1 Queue DMA Channel Registers");
21059 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21060 + return (strlen(bufp));
21061 + }
21062 + for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
21063 + if (cnt % 8 == 0) {
21064 + (void) snprintf(bp, len, "\n");
21065 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21066 + return (strlen(bufp));
21067 + }
21068 + }
21069 + (void) snprintf(bp, len, "%08x ", fw->req1_dma_reg[cnt]);
21070 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21071 + return (strlen(bufp));
21072 + }
21073 + }
21074 +
21075 + (void) snprintf(bp, len, "\n\nXMT0 Data DMA Registers");
21076 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21077 + return (strlen(bufp));
21078 + }
21079 + for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
21080 + if (cnt % 8 == 0) {
21081 + (void) snprintf(bp, len, "\n");
21082 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21083 + return (strlen(bufp));
21084 + }
21085 + }
21086 + (void) snprintf(bp, len, "%08x ", fw->xmt0_dma_reg[cnt]);
21087 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21088 + return (strlen(bufp));
21089 + }
21090 + }
21091 +
21092 + (void) snprintf(bp, len, "\n\nXMT1 Data DMA Registers");
21093 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21094 + return (strlen(bufp));
21095 + }
21096 + for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
21097 + if (cnt % 8 == 0) {
21098 + (void) snprintf(bp, len, "\n");
21099 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21100 + return (strlen(bufp));
21101 + }
21102 + }
21103 + (void) snprintf(bp, len, "%08x ", fw->xmt1_dma_reg[cnt]);
21104 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21105 + return (strlen(bufp));
21106 + }
21107 + }
21108 +
21109 + (void) snprintf(bp, len, "\n\nXMT2 Data DMA Registers");
21110 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21111 + return (strlen(bufp));
21112 + }
21113 + for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
21114 + if (cnt % 8 == 0) {
21115 + (void) snprintf(bp, len, "\n");
21116 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21117 + return (strlen(bufp));
21118 + }
21119 + }
21120 + (void) snprintf(bp, len, "%08x ", fw->xmt2_dma_reg[cnt]);
21121 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21122 + return (strlen(bufp));
21123 + }
21124 + }
21125 +
21126 + (void) snprintf(bp, len, "\n\nXMT3 Data DMA Registers");
21127 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21128 + return (strlen(bufp));
21129 + }
21130 + for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
21131 + if (cnt % 8 == 0) {
21132 + (void) snprintf(bp, len, "\n");
21133 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21134 + return (strlen(bufp));
21135 + }
21136 + }
21137 + (void) snprintf(bp, len, "%08x ", fw->xmt3_dma_reg[cnt]);
21138 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21139 + return (strlen(bufp));
21140 + }
21141 + }
21142 +
21143 + (void) snprintf(bp, len, "\n\nXMT4 Data DMA Registers");
21144 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21145 + return (strlen(bufp));
21146 + }
21147 + for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
21148 + if (cnt % 8 == 0) {
21149 + (void) snprintf(bp, len, "\n");
21150 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21151 + return (strlen(bufp));
21152 + }
21153 + }
21154 + (void) snprintf(bp, len, "%08x ", fw->xmt4_dma_reg[cnt]);
21155 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21156 + return (strlen(bufp));
21157 + }
21158 + }
21159 +
21160 + (void) snprintf(bp, len, "\n\nXMT Data DMA Common Registers");
21161 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21162 + return (strlen(bufp));
21163 + }
21164 + for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
21165 + if (cnt % 8 == 0) {
21166 + (void) snprintf(bp, len, "\n");
21167 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21168 + return (strlen(bufp));
21169 + }
21170 + }
21171 + (void) snprintf(bp, len, "%08x ", fw->xmt_data_dma_reg[cnt]);
21172 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21173 + return (strlen(bufp));
21174 + }
21175 + }
21176 +
21177 + (void) snprintf(bp, len, "\n\nRCV Thread 0 Data DMA Registers");
21178 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21179 + return (strlen(bufp));
21180 + }
21181 + for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
21182 + if (cnt % 8 == 0) {
21183 + (void) snprintf(bp, len, "\n");
21184 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21185 + return (strlen(bufp));
21186 + }
21187 + }
21188 + (void) snprintf(bp, len, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
21189 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21190 + return (strlen(bufp));
21191 + }
21192 + }
21193 +
21194 + (void) snprintf(bp, len, "\n\nRCV Thread 1 Data DMA Registers");
21195 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21196 + return (strlen(bufp));
21197 + }
21198 + for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
21199 + if (cnt % 8 == 0) {
21200 + (void) snprintf(bp, len, "\n");
21201 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21202 + return (strlen(bufp));
21203 + }
21204 + }
21205 + (void) snprintf(bp, len, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
21206 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21207 + return (strlen(bufp));
21208 + }
21209 + }
21210 +
21211 + (void) snprintf(bp, len, "\n\nRISC GP Registers");
21212 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21213 + return (strlen(bufp));
21214 + }
21215 + for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
21216 + if (cnt % 8 == 0) {
21217 + (void) snprintf(bp, len, "\n");
21218 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21219 + return (strlen(bufp));
21220 + }
21221 + }
21222 + (void) snprintf(bp, len, "%08x ", fw->risc_gp_reg[cnt]);
21223 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21224 + return (strlen(bufp));
21225 + }
21226 + }
21227 +
21228 + (void) snprintf(bp, len, "\n\nLMC Registers");
21229 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21230 + return (strlen(bufp));
21231 + }
21232 + for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
21233 + if (cnt % 8 == 0) {
21234 + (void) snprintf(bp, len, "\n");
21235 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21236 + return (strlen(bufp));
21237 + }
21238 + }
21239 + (void) snprintf(bp, len, "%08x ", fw->lmc_reg[cnt]);
21240 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21241 + return (strlen(bufp));
21242 + }
21243 + }
21244 +
21245 + (void) snprintf(bp, len, "\n\nFPM Hardware Registers");
21246 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21247 + return (strlen(bufp));
21248 + }
21249 + cnt1 = (uint32_t)(sizeof (fw->fpm_hdw_reg));
21250 + for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21251 + if (cnt % 8 == 0) {
21252 + (void) snprintf(bp, len, "\n");
21253 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21254 + return (strlen(bufp));
21255 + }
21256 + }
21257 + (void) snprintf(bp, len, "%08x ", fw->fpm_hdw_reg[cnt]);
21258 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21259 + return (strlen(bufp));
21260 + }
21261 + }
21262 +
21263 + (void) snprintf(bp, len, "\n\nRQ0 Array Registers");
21264 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21265 + return (strlen(bufp));
21266 + }
21267 + cnt1 = (uint32_t)(sizeof (fw->rq0_array_reg));
21268 + for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21269 + if (cnt % 8 == 0) {
21270 + (void) snprintf(bp, len, "\n");
21271 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21272 + return (strlen(bufp));
21273 + }
21274 + }
21275 + (void) snprintf(bp, len, "%08x ", fw->rq0_array_reg[cnt]);
21276 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21277 + return (strlen(bufp));
21278 + }
21279 + }
21280 +
21281 + (void) snprintf(bp, len, "\n\nRQ1 Array Registers");
21282 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21283 + return (strlen(bufp));
21284 + }
21285 + cnt1 = (uint32_t)(sizeof (fw->rq1_array_reg));
21286 + for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21287 + if (cnt % 8 == 0) {
21288 + (void) snprintf(bp, len, "\n");
21289 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21290 + return (strlen(bufp));
21291 + }
21292 + }
21293 + (void) snprintf(bp, len, "%08x ", fw->rq1_array_reg[cnt]);
21294 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21295 + return (strlen(bufp));
21296 + }
21297 + }
21298 +
21299 + (void) snprintf(bp, len, "\n\nRP0 Array Registers");
21300 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21301 + return (strlen(bufp));
21302 + }
21303 + cnt1 = (uint32_t)(sizeof (fw->rp0_array_reg));
21304 + for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21305 + if (cnt % 8 == 0) {
21306 + (void) snprintf(bp, len, "\n");
21307 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21308 + return (strlen(bufp));
21309 + }
21310 + }
21311 + (void) snprintf(bp, len, "%08x ", fw->rp0_array_reg[cnt]);
21312 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21313 + return (strlen(bufp));
21314 + }
21315 + }
21316 +
21317 + (void) snprintf(bp, len, "\n\nRP1 Array Registers");
21318 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21319 + return (strlen(bufp));
21320 + }
21321 + cnt1 = (uint32_t)(sizeof (fw->rp1_array_reg));
21322 + for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21323 + if (cnt % 8 == 0) {
21324 + (void) snprintf(bp, len, "\n");
21325 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21326 + return (strlen(bufp));
21327 + }
21328 + }
21329 + (void) snprintf(bp, len, "%08x ", fw->rp1_array_reg[cnt]);
21330 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21331 + return (strlen(bufp));
21332 + }
21333 + }
21334 +
21335 + (void) snprintf(bp, len, "\n\nAT0 Array Registers");
21336 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21337 + return (strlen(bufp));
21338 + }
21339 + cnt1 = (uint32_t)(sizeof (fw->ato_array_reg));
21340 + for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21341 + if (cnt % 8 == 0) {
21342 + (void) snprintf(bp, len, "\n");
21343 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21344 + return (strlen(bufp));
21345 + }
21346 + }
21347 + (void) snprintf(bp, len, "%08x ", fw->ato_array_reg[cnt]);
21348 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21349 + return (strlen(bufp));
21350 + }
21351 + }
21352 +
21353 + (void) snprintf(bp, len, "\n\nQueue Control Registers");
21354 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21355 + return (strlen(bufp));
21356 + }
21357 + cnt1 = (uint32_t)(sizeof (fw->queue_control_reg));
21358 + for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21359 + if (cnt % 8 == 0) {
21360 + (void) snprintf(bp, len, "\n");
21361 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21362 + return (strlen(bufp));
21363 + }
21364 + }
21365 + (void) snprintf(bp, len, "%08x ", fw->queue_control_reg[cnt]);
21366 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21367 + return (strlen(bufp));
21368 + }
21369 + }
21370 +
21371 + (void) snprintf(bp, len, "\n\nFB Hardware Registers");
21372 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21373 + return (strlen(bufp));
21374 + }
21375 + cnt1 = (uint32_t)(sizeof (fw->fb_hdw_reg));
21376 + for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21377 + if (cnt % 8 == 0) {
21378 + (void) snprintf(bp, len, "\n");
21379 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21380 + return (strlen(bufp));
21381 + }
21382 + }
21383 + (void) snprintf(bp, len, "%08x ", fw->fb_hdw_reg[cnt]);
21384 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21385 + return (strlen(bufp));
21386 + }
21387 + }
21388 +
21389 + (void) snprintf(bp, len, "\n\nCode RAM");
21390 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21391 + return (strlen(bufp));
21392 + }
21393 + for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
21394 + if (cnt % 8 == 0) {
21395 + (void) snprintf(bp, len, "\n%08x: ", cnt + 0x20000);
21396 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21397 + return (strlen(bufp));
21398 + }
21399 + }
21400 + (void) snprintf(bp, len, "%08x ", fw->code_ram[cnt]);
21401 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21402 + return (strlen(bufp));
21403 + }
21404 + }
21405 +
21406 + (void) snprintf(bp, len, "\n\nExternal Memory");
21407 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21408 + return (strlen(bufp));
21409 + }
21410 + dp = (uint32_t *)((caddr_t)fw->req_rsp_ext_mem + fw->req_q_size[0] +
21411 + fw->req_q_size[1] + fw->rsp_q_size + (ha->rsp_queues_cnt * 16));
21412 + for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
21413 + if (cnt % 8 == 0) {
21414 + (void) snprintf(bp, len, "\n%08x: ", cnt + 0x100000);
21415 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21416 + return (strlen(bufp));
21417 + }
21418 + }
21419 + (void) snprintf(bp, len, "%08x ", *dp++);
21420 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21421 + return (strlen(bufp));
21422 + }
21423 + }
21424 +
21425 + (void) snprintf(bp, len, "\n\n[<==END] ISP Debug Dump");
21426 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21427 + return (strlen(bufp));
21428 + }
21429 +
21430 + dp = fw->req_rsp_ext_mem + (ha->rsp_queues_cnt * 4);
21431 + for (cnt = 0; cnt < 2 && fw->req_q_size[cnt]; cnt++) {
21432 + dp2 = dp;
21433 + for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
21434 + if (*dp2++) {
21435 + break;
21436 + }
21437 + }
21438 + if (cnt1 == fw->req_q_size[cnt] / 4) {
21439 + dp = dp2;
21440 + continue;
21441 + }
21442 + (void) snprintf(bp, len, "\n\nRequest Queue\nQueue %d:", cnt);
21443 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21444 + return (strlen(bufp));
21445 + }
21446 + for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
21447 + if (cnt1 % 8 == 0) {
21448 + (void) snprintf(bp, len, "\n%08x: ", cnt1);
21449 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21450 + return (strlen(bufp));
21451 + }
21452 + }
21453 + (void) snprintf(bp, len, "%08x ", *dp++);
21454 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21455 + return (strlen(bufp));
21456 + }
21457 + }
21458 + }
21459 +
21460 + for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
21461 + dp2 = dp;
21462 + for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
21463 + cnt1++) {
21464 + if (*dp2++) {
21465 + break;
21466 + }
21467 + }
21468 + if (cnt1 == ha->rsp_queues[cnt]->rsp_ring.size / 4) {
21469 + dp = dp2;
21470 + continue;
21471 + }
21472 + (void) snprintf(bp, len, "\n\nResponse Queue\nQueue %d:", cnt);
21473 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21474 + return (strlen(bufp));
21475 + }
21476 + for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
21477 + cnt1++) {
21478 + if (cnt1 % 8 == 0) {
21479 + (void) snprintf(bp, len, "\n%08x: ", cnt1);
21480 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21481 + return (strlen(bufp));
21482 + }
21483 + }
21484 + (void) snprintf(bp, len, "%08x ", *dp++);
21485 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21486 + return (strlen(bufp));
21487 + }
21488 + }
21489 + }
21490 +
21491 + if (ha->fwexttracebuf.dma_handle != NULL) {
21492 + uint32_t cnt_b;
21493 + uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
21494 +
21495 + (void) snprintf(bp, len, "\n\nExtended Trace Buffer Memory");
21496 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21497 + return (strlen(bufp));
21498 + }
21499 + /* show data address as a byte address, data as long words */
21500 + for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
21501 + cnt_b = cnt * 4;
21502 + if (cnt_b % 32 == 0) {
21503 + (void) snprintf(bp, len, "\n%08x: ",
21504 + (int)(w64 + cnt_b));
21505 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21506 + return (strlen(bufp));
21507 + }
21508 + }
21509 + (void) snprintf(bp, len, "%08x ",
21510 + fw->ext_trace_buf[cnt]);
21511 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21512 + return (strlen(bufp));
21513 + }
21514 + }
21515 + }
21516 +
21517 + if (ha->fwfcetracebuf.dma_handle != NULL) {
21518 + uint32_t cnt_b;
21519 + uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
21520 +
21521 + (void) snprintf(bp, len, "\n\nFC Event Trace Buffer Memory");
21522 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21523 + return (strlen(bufp));
21524 + }
21525 + /* show data address as a byte address, data as long words */
21526 + for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
21527 + cnt_b = cnt * 4;
21528 + if (cnt_b % 32 == 0) {
21529 + (void) snprintf(bp, len, "\n%08x: ",
21530 + (int)(w64 + cnt_b));
21531 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21532 + return (strlen(bufp));
21533 + }
21534 + }
21535 + (void) snprintf(bp, len, "%08x ",
21536 + fw->fce_trace_buf[cnt]);
21537 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21538 + return (strlen(bufp));
21539 + }
21540 + }
21541 + }
21542 +
21543 + QL_PRINT_10(ha, "done=%xh\n", strlen(bufp));
21544 +
21545 + return (strlen(bufp));
21546 +}
21547 +
21548 +
21549 +/*
21550 + * ql_str_ptr
21551 + * Verifies buffer is not full
21552 + *
21553 + * Input:
21554 + * ha: adapter state pointer.
21555 + * bp: string buffer pointer
21556 + * len: buffer length
21557 + *
21558 + * Returns:
21559 + * NULL = buffer full else adjusted buffer pointer
21560 + *
21561 + * Context:
21562 + * Kernel context.
21563 + */
21564 +/*ARGSUSED*/
21565 +static caddr_t
21566 +ql_str_ptr(ql_adapter_state_t *ha, caddr_t bp, uint32_t *len)
21567 +{
21568 + uint32_t i;
21569 +
21570 + i = strlen(bp);
21571 + if (i > *len || !(*len -= i)) {
21572 + QL_PRINT_10(ha, "full buffer\n");
21573 + return (NULL);
21574 + }
21575 + return (bp += i);
21576 +}
21577 +
21578 +/*
21579 + * ql_27xx_binary_fw_dump
21580 + *
21581 + * Input:
21582 + * ha: adapter state pointer.
21583 + * dmp: firmware dump pointer.
21584 + *
21585 + * Returns:
21586 + * ql local function return status code.
21587 + *
21588 + * Context:
21589 + * Interrupt or Kernel context, no mailbox commands allowed.
21590 + */
21591 +static int
21592 +ql_27xx_binary_fw_dump(ql_adapter_state_t *ha)
21593 +{
21594 + ql_dmp_template_t *template_buff;
21595 + int rval;
21596 + uint32_t cnt, *dp, *bp, tsize;
21597 +
21598 + QL_PRINT_10(ha, "started\n");
21599 +
21600 + if (ha->dmp_template.dma_handle == NULL) {
21601 + rval = CFG_IST(ha, CFG_LOAD_FLASH_FW) ?
21602 + ql_2700_get_flash_dmp_template(ha) :
21603 + ql_2700_get_module_dmp_template(ha);
21604 + if (rval != QL_SUCCESS) {
21605 + EL(ha, "no dump template, status=%xh\n", rval);
21606 + return (QL_FUNCTION_PARAMETER_ERROR);
21607 + }
21608 + }
21609 + template_buff = ha->dmp_template.bp;
21610 + tsize = template_buff->hdr.size_of_template;
21611 +
21612 + if (ha->md_capture_size == 0) {
21613 + ha->ql_dump_ptr = kmem_zalloc(tsize, KM_NOSLEEP);
21614 + if (ha->ql_dump_ptr == NULL) {
21615 + QL_PRINT_10(ha, "done, failed alloc\n");
21616 + return (QL_MEMORY_ALLOC_FAILED);
21617 + }
21618 + cnt = (uint32_t)(tsize / sizeof (uint32_t));
21619 + dp = (uint32_t *)ha->ql_dump_ptr;
21620 + bp = (uint32_t *)&template_buff->hdr;
21621 + while (cnt--) {
21622 + *dp++ = ddi_get32(ha->dmp_template.acc_handle, bp++);
21623 + }
21624 + ha->md_capture_size = ql_2700_dmp_parse_template(ha,
21625 + (ql_dt_hdr_t *)ha->ql_dump_ptr, NULL, 0);
21626 + kmem_free(ha->ql_dump_ptr, tsize);
21627 + ha->ql_dump_ptr = NULL;
21628 +
21629 + if (ha->md_capture_size == 0) {
21630 + return (QL_MEMORY_ALLOC_FAILED);
21631 + }
21632 +
17898 21633 /*
17899 - * If there is a timeout value associated with this IDC
17900 - * notification then there is an implied requirement
17901 - * that we return an ACK.
21634 + * Determine ascii dump file size
21635 + * 2 ascii bytes per binary byte + a space and
21636 + * a newline every 16 binary bytes
17902 21637 */
17903 - if (ha->idc_mb[1] & IDC_TIMEOUT_MASK) {
17904 - rval = ql_idc_ack(ha);
17905 - if (rval != QL_SUCCESS) {
17906 - EL(ha, "idc_ack status=%xh %xh\n", rval,
17907 - ha->idc_mb[2]);
21638 + ha->risc_dump_size = ha->md_capture_size << 1;
21639 + ha->risc_dump_size += ha->md_capture_size;
21640 + ha->risc_dump_size += ha->md_capture_size / 16 + 1;
21641 + QL_PRINT_10(ha, "md_capture_size=%xh, "
21642 + "risc_dump_size=%xh\n", ha->md_capture_size,
21643 + ha->risc_dump_size);
21644 + }
21645 +
21646 + ha->ql_dump_ptr = kmem_zalloc(ha->md_capture_size, KM_NOSLEEP);
21647 + if (ha->ql_dump_ptr == NULL) {
21648 + QL_PRINT_10(ha, "done, failed alloc\n");
21649 + return (QL_MEMORY_ALLOC_FAILED);
21650 + }
21651 + ha->ql_dump_size = ha->md_capture_size;
21652 +
21653 + /* Disable ISP interrupts. */
21654 + ql_disable_intr(ha);
21655 +
21656 + cnt = (uint32_t)(tsize / sizeof (uint32_t));
21657 + dp = (uint32_t *)ha->ql_dump_ptr;
21658 + bp = (uint32_t *)&template_buff->hdr;
21659 + while (cnt--) {
21660 + *dp++ = ddi_get32(ha->dmp_template.acc_handle, bp++);
21661 + }
21662 +
21663 + (void) ql_2700_dmp_parse_template(ha,
21664 + (ql_dt_hdr_t *)ha->ql_dump_ptr,
21665 + (uint8_t *)dp, ha->ql_dump_size);
21666 +
21667 +#ifdef _BIG_ENDIAN
21668 + cnt = (uint32_t)(tsize / sizeof (uint32_t));
21669 + dp = (uint32_t *)ha->ql_dump_ptr;
21670 + while (cnt--) {
21671 + ql_chg_endian((uint8_t *)dp, 4);
21672 + dp++;
21673 + }
21674 +#endif
21675 + QL_PRINT_10(ha, "done\n");
21676 + return (QL_SUCCESS);
21677 +}
21678 +
21679 +/*
21680 + * ql_27xx_ascii_fw_dump
21681 + * Converts ISP27xx firmware binary dump to ascii.
21682 + *
21683 + * Input:
21684 + * ha: port info pointer.
21685 + * bptr: buffer pointer.
21686 + *
21687 + * Returns:
21688 + * Amount of data buffer used.
21689 + *
21690 + * Context:
21691 + * Kernel context.
21692 + */
21693 +static size_t
21694 +ql_27xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
21695 +{
21696 + uint32_t cnt, len, dsize;
21697 + uint8_t *fw;
21698 + caddr_t bp;
21699 +
21700 + QL_PRINT_10(ha, "started\n");
21701 +
21702 + if ((len = ha->risc_dump_size) == 0) {
21703 + QL_PRINT_10(ha, "no buffer\n");
21704 + return (0);
21705 + }
21706 +
21707 + dsize = ha->ql_dump_size;
21708 + fw = (uint8_t *)ha->ql_dump_ptr;
21709 + bp = bufp;
21710 +
21711 + QL_PRINT_10(ha, "fw_dump_buffer=%ph, fw_bin_dump_size=%xh\n",
21712 + (void *)ha->ql_dump_ptr, ha->ql_dump_size);
21713 +
21714 + /*
21715 + * 2 ascii bytes per binary byte + a space and
21716 + * a newline every 16 binary bytes
21717 + */
21718 + cnt = 0;
21719 + while (cnt < dsize) {
21720 + (void) snprintf(bp, len, "%02x ", *fw++);
21721 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21722 + return (strlen(bufp));
21723 + }
21724 + if (++cnt % 16 == 0) {
21725 + (void) snprintf(bp, len, "\n");
21726 + if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21727 + return (strlen(bufp));
17908 21728 }
17909 21729 }
17910 - break;
17911 - case MBA_IDC_COMPLETE:
21730 + }
21731 + if (cnt % 16 != 0) {
21732 + (void) snprintf(bp, len, "\n");
21733 + bp = ql_str_ptr(ha, bp, &len);
21734 + if (bp == NULL) {
21735 + return (strlen(bufp));
21736 + }
21737 + }
21738 +
21739 + QL_PRINT_10(ha, "done=%xh\n", strlen(bufp));
21740 +
21741 + return (strlen(bufp));
21742 +}
21743 +
21744 +/* ******************************************************************* */
21745 +/* ********************* Dump Template Functions ********************* */
21746 +/* ******************************************************************* */
21747 +
21748 +/*
21749 + * ql_2700_get_module_dmp_template
21750 + * Get dump template from firmware module
21751 + *
21752 + * Input:
21753 + * ha: adapter state pointer.
21754 + *
21755 + * Returns:
21756 + * ql local function return status code.
21757 + *
21758 + * Context:
21759 + * Kernel context.
21760 + */
21761 +int
21762 +ql_2700_get_module_dmp_template(ql_adapter_state_t *ha)
21763 +{
21764 + int rval;
21765 + uint32_t word_count, cnt, *bp, *dp;
21766 +
21767 + QL_PRINT_10(ha, "started\n");
21768 +
21769 + if (ha->dmp_template.dma_handle != NULL) {
21770 + return (QL_SUCCESS);
21771 + }
21772 +
21773 + if ((word_count = ha->risc_fw[2].length) == 0) {
21774 + EL(ha, "no dump template, length=0\n");
21775 + return (QL_FUNCTION_PARAMETER_ERROR);
21776 + }
21777 +
21778 + /* Allocate template buffer. */
21779 + ha->dmp_template.size = word_count << 2;
21780 + ha->dmp_template.type = LITTLE_ENDIAN_DMA;
21781 + ha->dmp_template.max_cookie_count = 1;
21782 + ha->dmp_template.alignment = 8;
21783 + rval = ql_alloc_phys(ha, &ha->dmp_template, KM_SLEEP);
21784 + if (rval != QL_SUCCESS) {
21785 + EL(ha, "unable to allocate template buffer, "
21786 + "status=%xh\n", rval);
21787 + return (rval);
21788 + }
21789 +
21790 + /* Get big endian template. */
21791 + bp = ha->dmp_template.bp;
21792 + dp = (uint32_t *)ha->risc_fw[2].code;
21793 + for (cnt = 0; cnt < word_count; cnt++) {
21794 + ddi_put32(ha->dmp_template.acc_handle, bp, *dp++);
21795 + if (cnt > 6) {
21796 + ql_chg_endian((uint8_t *)bp, 4);
21797 + }
21798 + bp++;
21799 + }
21800 +
21801 + QL_PRINT_10(ha, "done\n");
21802 + return (rval);
21803 +}
21804 +
21805 +/*
21806 + * ql_2700_get_flash_dmp_template
21807 + * Get dump template from flash
21808 + *
21809 + * Input:
21810 + * pi: port info pointer.
21811 + *
21812 + * Returns:
21813 + * ql local function return status code.
21814 + *
21815 + * Context:
21816 + * Kernel context.
21817 + */
21818 +int
21819 +ql_2700_get_flash_dmp_template(ql_adapter_state_t *ha)
21820 +{
21821 + int rval;
21822 + uint32_t word_count, cnt, *bp;
21823 + uint32_t faddr = ha->flash_data_addr | ha->flash_fw_addr;
21824 + uint32_t fdata = 0;
21825 +
21826 + QL_PRINT_10(ha, "started, fw_addr=%xh\n", ha->flash_fw_addr);
21827 +
21828 + if (ha->dmp_template.dma_handle != NULL) {
21829 + ql_free_phys(ha, &ha->dmp_template);
21830 + }
21831 +
21832 + /* First array length */
21833 + rval = ql_24xx_read_flash(ha, faddr + 3, &fdata);
21834 + QL_PRINT_7(ha, "read_flash, fw_addr=0x%x, data=0x%x\n",
21835 + faddr + 3, fdata);
21836 + if (rval != QL_SUCCESS) {
21837 + EL(ha, "2700_read_flash status=%xh\n", rval);
21838 + return (rval);
21839 + }
21840 + if (fdata == 0 || fdata == 0xffffffff) {
21841 + EL(ha, "Invalid first array length = %xh\n", fdata);
21842 + return (QL_FUNCTION_PARAMETER_ERROR);
21843 + }
21844 + ql_chg_endian((uint8_t *)&fdata, 4);
21845 + QL_PRINT_7(ha, "First array length = %xh\n", fdata);
21846 + faddr += fdata;
21847 +
21848 + /* Second array length */
21849 + rval = ql_24xx_read_flash(ha, faddr + 3, &fdata);
21850 + QL_PRINT_7(ha, "read_flash, fw_addr=0x%x, data=0x%x\n",
21851 + faddr + 3, fdata);
21852 + if (rval != QL_SUCCESS) {
21853 + EL(ha, "2700_read_flash status=%xh\n", rval);
21854 + return (rval);
21855 + }
21856 + if (fdata == 0 || fdata == 0xffffffff) {
21857 + EL(ha, "Invalid second array length = %xh\n", fdata);
21858 + return (QL_FUNCTION_PARAMETER_ERROR);
21859 + }
21860 + ql_chg_endian((uint8_t *)&fdata, 4);
21861 + QL_PRINT_7(ha, "Second array length = %xh\n", fdata);
21862 + faddr += fdata;
21863 +
21864 + /* Third array length (dump template) */
21865 + rval = ql_24xx_read_flash(ha, faddr + 2, &fdata);
21866 + QL_PRINT_7(ha, "read_flash, fw_addr=0x%x, data=0x%x\n",
21867 + faddr + 2, fdata);
21868 + if (rval != QL_SUCCESS) {
21869 + EL(ha, "2700_read_flash status=%xh\n", rval);
21870 + return (rval);
21871 + }
21872 + if (fdata == 0 || fdata == 0xffffffff) {
21873 + EL(ha, "Invalid third array length = %xh\n", fdata);
21874 + return (QL_FUNCTION_PARAMETER_ERROR);
21875 + }
21876 + ql_chg_endian((uint8_t *)&fdata, 4);
21877 + QL_PRINT_7(ha, "Third array length = %xh\n", fdata);
21878 + word_count = fdata;
21879 +
21880 + /* Allocate template buffer. */
21881 + ha->dmp_template.size = word_count << 2;
21882 + ha->dmp_template.type = LITTLE_ENDIAN_DMA;
21883 + ha->dmp_template.max_cookie_count = 1;
21884 + ha->dmp_template.alignment = 8;
21885 + rval = ql_alloc_phys(ha, &ha->dmp_template, KM_SLEEP);
21886 + if (rval != QL_SUCCESS) {
21887 + EL(ha, "unable to allocate template buffer, "
21888 + "status=%xh\n", rval);
21889 + return (rval);
21890 + }
21891 +
21892 + /* Get big endian template. */
21893 + bp = ha->dmp_template.bp;
21894 + for (cnt = 0; cnt < word_count; cnt++) {
21895 + rval = ql_24xx_read_flash(ha, faddr++, &fdata);
21896 + if (rval != QL_SUCCESS) {
21897 + EL(ha, "2700_read_flash status=%xh\n", rval);
21898 + ql_free_phys(ha, &ha->dmp_template);
21899 + return (rval);
21900 + }
21901 + ddi_put32(ha->dmp_template.acc_handle, bp, fdata);
21902 + bp++;
21903 + }
21904 +
21905 + QL_PRINT_10(ha, "done\n");
21906 + return (rval);
21907 +}
21908 +
21909 +static uint32_t
21910 +ql_2700_dmp_parse_template(ql_adapter_state_t *ha, ql_dt_hdr_t *template_hdr,
21911 + uint8_t *dump_buff, uint32_t buff_size)
21912 +{
21913 + int e_cnt, esize, num_of_entries;
21914 + uint32_t bsize;
21915 + time_t time;
21916 + uint8_t *dbuff, *dbuff_end;
21917 + ql_dt_entry_t *entry;
21918 + int sane_end = 0;
21919 +
21920 + dbuff = dump_buff; /* dbuff = NULL size determination. */
21921 + dbuff_end = dump_buff + buff_size;
21922 +
21923 + template_hdr->ver_attr[0] = ha->fw_major_version;
21924 + template_hdr->ver_attr[1] = ha->fw_minor_version;
21925 + template_hdr->ver_attr[2] = ha->fw_subminor_version;
21926 + template_hdr->ver_attr[3] = ha->fw_attributes;
21927 + template_hdr->ver_attr[4] = ha->fw_ext_attributes;
21928 +
21929 + QL_PRINT_7(ha, "started, template_hdr=%ph, dump_buff=%ph, "
21930 + "buff_size=%xh, buff_end=%ph\n", (void *)template_hdr,
21931 + (void *)dbuff, buff_size, (void *)dbuff_end);
21932 +
21933 + /* Setup parameters */
21934 + QL_PRINT_7(ha, "type=%d, first_entry_offset=%xh, "
21935 + "num_of_entries=%xh ver_attr=%xh,%xh,%xh,%xh,%xh\n",
21936 + template_hdr->type, template_hdr->first_entry_offset,
21937 + template_hdr->num_of_entries, template_hdr->ver_attr[0],
21938 + template_hdr->ver_attr[1], template_hdr->ver_attr[2],
21939 + template_hdr->ver_attr[3], template_hdr->ver_attr[4]);
21940 +
21941 + if (template_hdr->type != DT_THDR) {
21942 + EL(ha, "Template header not found\n");
21943 + return (0);
21944 + }
21945 + if (dbuff != NULL) {
21946 + (void) drv_getparm(TIME, &time);
21947 + template_hdr->driver_timestamp = LSD(time);
21948 + }
21949 +
21950 + num_of_entries = template_hdr->num_of_entries;
21951 + entry = (ql_dt_entry_t *)((caddr_t)template_hdr +
21952 + template_hdr->first_entry_offset);
21953 +
21954 + bsize = template_hdr->size_of_template;
21955 + for (e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
21956 + QL_PRINT_7(ha, "e_cnt=%xh, entry=%ph, type=%d, size=%xh, "
21957 + "capture_flags=%xh, driver_flags=%xh, bofst=%xh\n",
21958 + e_cnt, (void *)entry, entry->h.type, entry->h.size,
21959 + entry->h.capture_flags, entry->h.driver_flags,
21960 + dbuff != NULL ? (uintptr_t)dbuff - (uintptr_t)template_hdr :
21961 + bsize);
17912 21962 /*
17913 - * We don't ACK completions, only these require action.
21963 + * Decode the entry type and process it accordingly
17914 21964 */
17915 - switch (ha->idc_mb[2]) {
17916 - case IDC_OPC_PORT_RESET_MBC:
17917 - case IDC_OPC_SET_PORT_CONFIG_MBC:
17918 - ADAPTER_STATE_LOCK(ha);
17919 - if (ha->idc_restart_cnt != 0) {
17920 - ha->idc_restart_cnt--;
17921 - if (ha->idc_restart_cnt == 0) {
17922 - ha->idc_restart_timer = 0;
17923 - ADAPTER_STATE_UNLOCK(ha);
17924 - TASK_DAEMON_LOCK(ha);
17925 - ha->task_daemon_flags &= ~DRIVER_STALL;
17926 - TASK_DAEMON_UNLOCK(ha);
17927 - ql_restart_queues(ha);
17928 - } else {
17929 - ADAPTER_STATE_UNLOCK(ha);
21965 + esize = 0;
21966 + switch (entry->h.type) {
21967 + case DT_NOP:
21968 + if (dbuff != NULL) {
21969 + entry->h.driver_flags = (uint8_t)
21970 + (entry->h.driver_flags | SKIPPED_FLAG);
21971 + }
21972 + QL_PRINT_3(ha, "Skipping Entry ID=%d, type=%d\n",
21973 + e_cnt, entry->h.type);
21974 + break;
21975 + case DT_TEND:
21976 + if (dbuff != NULL) {
21977 + entry->h.driver_flags = (uint8_t)
21978 + (entry->h.driver_flags | SKIPPED_FLAG);
21979 + }
21980 + QL_PRINT_3(ha, "Skipping Entry ID=%d, type=%d\n",
21981 + e_cnt, entry->h.type);
21982 + sane_end++;
21983 + break;
21984 + case DT_RIOB1:
21985 + esize = ql_2700_dt_riob1(ha, (ql_dt_riob1_t *)entry,
21986 + dbuff, dbuff_end);
21987 + break;
21988 + case DT_WIOB1:
21989 + ql_2700_dt_wiob1(ha, (ql_dt_wiob1_t *)entry,
21990 + dbuff, dbuff_end);
21991 + break;
21992 + case DT_RIOB2:
21993 + esize = ql_2700_dt_riob2(ha, (ql_dt_riob2_t *)entry,
21994 + dbuff, dbuff_end);
21995 + break;
21996 + case DT_WIOB2:
21997 + ql_2700_dt_wiob2(ha, (ql_dt_wiob2_t *)entry,
21998 + dbuff, dbuff_end);
21999 + break;
22000 + case DT_RPCI:
22001 + esize = ql_2700_dt_rpci(ha, (ql_dt_rpci_t *)entry,
22002 + dbuff, dbuff_end);
22003 + break;
22004 + case DT_WPCI:
22005 + ql_2700_dt_wpci(ha, (ql_dt_wpci_t *)entry,
22006 + dbuff, dbuff_end);
22007 + break;
22008 + case DT_RRAM:
22009 + esize = ql_2700_dt_rram(ha, (ql_dt_rram_t *)entry,
22010 + dbuff, dbuff_end);
22011 + break;
22012 + case DT_GQUE:
22013 + esize = ql_2700_dt_gque(ha, (ql_dt_gque_t *)entry,
22014 + dbuff, dbuff_end);
22015 + break;
22016 + case DT_GFCE:
22017 + esize = ql_2700_dt_gfce(ha, (ql_dt_gfce_t *)entry,
22018 + dbuff, dbuff_end);
22019 + break;
22020 + case DT_PRISC:
22021 + ql_2700_dt_prisc(ha, (ql_dt_prisc_t *)entry,
22022 + dbuff, dbuff_end);
22023 + break;
22024 + case DT_RRISC:
22025 + ql_2700_dt_rrisc(ha, (ql_dt_rrisc_t *)entry,
22026 + dbuff, dbuff_end);
22027 + break;
22028 + case DT_DINT:
22029 + ql_2700_dt_dint(ha, (ql_dt_dint_t *)entry,
22030 + dbuff, dbuff_end);
22031 + break;
22032 + case DT_GHBD:
22033 + esize = ql_2700_dt_ghbd(ha, (ql_dt_ghbd_t *)entry,
22034 + dbuff, dbuff_end);
22035 + break;
22036 + case DT_SCRA:
22037 + esize = ql_2700_dt_scra(ha, (ql_dt_scra_t *)entry,
22038 + dbuff, dbuff_end);
22039 + break;
22040 + case DT_RRREG:
22041 + esize = ql_2700_dt_rrreg(ha, (ql_dt_rrreg_t *)entry,
22042 + dbuff, dbuff_end);
22043 + break;
22044 + case DT_WRREG:
22045 + ql_2700_dt_wrreg(ha, (ql_dt_wrreg_t *)entry,
22046 + dbuff, dbuff_end);
22047 + break;
22048 + case DT_RRRAM:
22049 + esize = ql_2700_dt_rrram(ha, (ql_dt_rrram_t *)entry,
22050 + dbuff, dbuff_end);
22051 + break;
22052 + case DT_RPCIC:
22053 + esize = ql_2700_dt_rpcic(ha, (ql_dt_rpcic_t *)entry,
22054 + dbuff, dbuff_end);
22055 + break;
22056 + case DT_GQUES:
22057 + esize = ql_2700_dt_gques(ha, (ql_dt_gques_t *)entry,
22058 + dbuff, dbuff_end);
22059 + break;
22060 + case DT_WDMP:
22061 + esize = ql_2700_dt_wdmp(ha, (ql_dt_wdmp_t *)entry,
22062 + dbuff, dbuff_end);
22063 + break;
22064 + default:
22065 + entry->h.driver_flags = (uint8_t)
22066 + (entry->h.driver_flags | SKIPPED_FLAG);
22067 + EL(ha, "Entry ID=%d, type=%d unknown\n", e_cnt,
22068 + entry->h.type);
22069 + break;
22070 + }
22071 + if (dbuff != NULL && esize) {
22072 + QL_PRINT_7(ha, "entry=%d, esize=%xh, capture data\n",
22073 + entry->h.type, esize);
22074 + QL_DUMP_3(dbuff, 8, esize);
22075 + dbuff += esize;
22076 + }
22077 + bsize += esize;
22078 + /* next entry in the template */
22079 + entry = (ql_dt_entry_t *)((caddr_t)entry + entry->h.size);
22080 + }
22081 + if (sane_end > 1) {
22082 + EL(ha, "Template configuration error. Check Template\n");
22083 + }
22084 +
22085 + QL_PRINT_7(ha, "done, num of entries=%xh, size=%xh\n",
22086 + template_hdr->num_of_entries, bsize);
22087 + return (bsize);
22088 +}
22089 +
22090 +static int
22091 +ql_2700_dt_riob1(ql_adapter_state_t *ha, ql_dt_riob1_t *entry,
22092 + uint8_t *dbuff, uint8_t *dbuff_end)
22093 +{
22094 + int esize;
22095 + uint32_t i, cnt;
22096 + uint8_t *bp = dbuff;
22097 + uint32_t addr = entry->addr;
22098 + uint8_t *reg = (uint8_t *)ha->iobase + entry->pci_offset;
22099 +
22100 + QL_PRINT_7(ha, "started, buf=%ph, addr=%xh, reg_size=%xh, "
22101 + "reg_count=%x%02xh, pci_offset=%xh\n", (void *)dbuff, entry->addr,
22102 + entry->reg_size, entry->reg_count_h, entry->reg_count_l,
22103 + entry->pci_offset);
22104 +
22105 + cnt = CHAR_TO_SHORT(entry->reg_count_l, entry->reg_count_h);
22106 + esize = cnt * 4; /* addr */
22107 + esize += cnt * entry->reg_size; /* data */
22108 +
22109 + if (dbuff == NULL) {
22110 + QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22111 + return (esize);
22112 + }
22113 + if (esize + dbuff >= dbuff_end) {
22114 + EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22115 + entry->h.driver_flags = (uint8_t)
22116 + (entry->h.driver_flags | SKIPPED_FLAG);
22117 + return (0);
22118 + }
22119 +
22120 + WRT32_IO_REG(ha, io_base_addr, addr);
22121 + while (cnt--) {
22122 + *bp++ = LSB(LSW(addr));
22123 + *bp++ = MSB(LSW(addr));
22124 + *bp++ = LSB(MSW(addr));
22125 + *bp++ = MSB(MSW(addr));
22126 + for (i = 0; i < entry->reg_size; i++) {
22127 + *bp++ = RD_REG_BYTE(ha, reg++);
22128 + }
22129 + addr++;
22130 + }
22131 +
22132 + QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22133 + return (esize);
22134 +}
22135 +
22136 +static void
22137 +ql_2700_dt_wiob1(ql_adapter_state_t *ha, ql_dt_wiob1_t *entry,
22138 + uint8_t *dbuff, uint8_t *dbuff_end)
22139 +{
22140 + uint8_t *reg = (uint8_t *)ha->iobase + entry->pci_offset;
22141 +
22142 + QL_PRINT_7(ha, "started, addr=%xh, data=%xh, pci_offset=%xh\n",
22143 + entry->addr, entry->data, entry->pci_offset);
22144 +
22145 + if (dbuff == NULL) {
22146 + QL_PRINT_7(ha, "null buf done\n");
22147 + return;
22148 + }
22149 + if (dbuff >= dbuff_end) {
22150 + EL(ha, "skipped, no buffer space, needed=0\n");
22151 + entry->h.driver_flags = (uint8_t)
22152 + (entry->h.driver_flags | SKIPPED_FLAG);
22153 + return;
22154 + }
22155 +
22156 + WRT32_IO_REG(ha, io_base_addr, entry->addr);
22157 + WRT_REG_DWORD(ha, reg, entry->data);
22158 +
22159 + QL_PRINT_7(ha, "done\n");
22160 +}
22161 +
22162 +static int
22163 +ql_2700_dt_riob2(ql_adapter_state_t *ha, ql_dt_riob2_t *entry,
22164 + uint8_t *dbuff, uint8_t *dbuff_end)
22165 +{
22166 + int esize;
22167 + uint32_t i, cnt;
22168 + uint8_t *bp = dbuff;
22169 + uint8_t *reg = (uint8_t *)ha->iobase + entry->pci_offset;
22170 + uint32_t addr = entry->addr;
22171 +
22172 + QL_PRINT_7(ha, "started, buf=%ph, addr=%xh, reg_size=%xh, "
22173 + "reg_count=%x%02xh, pci_offset=%xh, bank_sel_offset=%xh, "
22174 + "reg_bank=%xh\n", (void *)dbuff, entry->addr,
22175 + entry->reg_size, entry->reg_count_h, entry->reg_count_l,
22176 + entry->pci_offset, entry->bank_sel_offset, entry->reg_bank);
22177 +
22178 + cnt = CHAR_TO_SHORT(entry->reg_count_l, entry->reg_count_h);
22179 + esize = cnt * 4; /* addr */
22180 + esize += cnt * entry->reg_size; /* data */
22181 +
22182 + if (dbuff == NULL) {
22183 + QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22184 + return (esize);
22185 + }
22186 + if (esize + dbuff >= dbuff_end) {
22187 + EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22188 + entry->h.driver_flags = (uint8_t)
22189 + (entry->h.driver_flags | SKIPPED_FLAG);
22190 + return (0);
22191 + }
22192 +
22193 + WRT32_IO_REG(ha, io_base_addr, addr);
22194 + WRT_REG_DWORD(ha, ha->iobase + entry->bank_sel_offset, entry->reg_bank);
22195 + while (cnt--) {
22196 + *bp++ = LSB(LSW(addr));
22197 + *bp++ = MSB(LSW(addr));
22198 + *bp++ = LSB(MSW(addr));
22199 + *bp++ = MSB(MSW(addr));
22200 + for (i = 0; i < entry->reg_size; i++) {
22201 + *bp++ = RD_REG_BYTE(ha, reg++);
22202 + }
22203 + addr++;
22204 + }
22205 +
22206 + QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22207 + return (esize);
22208 +}
22209 +
22210 +static void
22211 +ql_2700_dt_wiob2(ql_adapter_state_t *ha, ql_dt_wiob2_t *entry,
22212 + uint8_t *dbuff, uint8_t *dbuff_end)
22213 +{
22214 + uint16_t data;
22215 + uint8_t *reg = (uint8_t *)ha->iobase + entry->pci_offset;
22216 +
22217 + QL_PRINT_7(ha, "started, addr=%xh, data=%x%02xh, pci_offset=%xhh, "
22218 + "bank_sel_offset=%xh, reg_bank=%xh\n", entry->addr, entry->data_h,
22219 + entry->data_l, entry->pci_offset, entry->bank_sel_offset,
22220 + entry->reg_bank);
22221 +
22222 + if (dbuff == NULL) {
22223 + QL_PRINT_7(ha, "null buf done\n");
22224 + return;
22225 + }
22226 + if (dbuff >= dbuff_end) {
22227 + EL(ha, "skipped, no buffer space, needed=0\n");
22228 + entry->h.driver_flags = (uint8_t)
22229 + (entry->h.driver_flags | SKIPPED_FLAG);
22230 + return;
22231 + }
22232 +
22233 + data = CHAR_TO_SHORT(entry->data_l, entry->data_h);
22234 +
22235 + WRT32_IO_REG(ha, io_base_addr, entry->addr);
22236 + WRT_REG_DWORD(ha, ha->iobase + entry->bank_sel_offset, entry->reg_bank);
22237 + WRT_REG_WORD(ha, reg, data);
22238 +
22239 + QL_PRINT_7(ha, "done\n");
22240 +}
22241 +
22242 +static int
22243 +ql_2700_dt_rpci(ql_adapter_state_t *ha, ql_dt_rpci_t *entry, uint8_t *dbuff,
22244 + uint8_t *dbuff_end)
22245 +{
22246 + int esize;
22247 + uint32_t i;
22248 + uint8_t *bp = dbuff;
22249 + uint8_t *reg = (uint8_t *)ha->iobase + entry->addr;
22250 +
22251 + QL_PRINT_7(ha, "started, addr=%xh, reg=%ph\n", entry->addr,
22252 + (void *)reg);
22253 +
22254 + esize = 4; /* addr */
22255 + esize += 4; /* data */
22256 +
22257 + if (dbuff == NULL) {
22258 + QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22259 + return (esize);
22260 + }
22261 + if (esize + dbuff >= dbuff_end) {
22262 + EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22263 + entry->h.driver_flags = (uint8_t)
22264 + (entry->h.driver_flags | SKIPPED_FLAG);
22265 + return (0);
22266 + }
22267 +
22268 + *bp++ = LSB(LSW(entry->addr));
22269 + *bp++ = MSB(LSW(entry->addr));
22270 + *bp++ = LSB(MSW(entry->addr));
22271 + *bp++ = MSB(MSW(entry->addr));
22272 + for (i = 0; i < 4; i++) {
22273 + *bp++ = RD_REG_BYTE(ha, reg++);
22274 + }
22275 +
22276 + QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22277 + return (esize);
22278 +}
22279 +
22280 +static void
22281 +ql_2700_dt_wpci(ql_adapter_state_t *ha, ql_dt_wpci_t *entry,
22282 + uint8_t *dbuff, uint8_t *dbuff_end)
22283 +{
22284 + uint8_t *reg = (uint8_t *)ha->iobase + entry->addr;
22285 +
22286 + QL_PRINT_7(ha, "started, addr=%xh, data=%xh, reg=%ph\n",
22287 + entry->addr, entry->data, (void *)reg);
22288 +
22289 + if (dbuff == NULL) {
22290 + QL_PRINT_7(ha, "null buf done\n");
22291 + return;
22292 + }
22293 + if (dbuff >= dbuff_end) {
22294 + EL(ha, "skipped, no buffer space, needed=0\n");
22295 + entry->h.driver_flags = (uint8_t)
22296 + (entry->h.driver_flags | SKIPPED_FLAG);
22297 + return;
22298 + }
22299 +
22300 + WRT_REG_DWORD(ha, reg, entry->data);
22301 +
22302 + QL_PRINT_7(ha, "done\n");
22303 +}
22304 +
22305 +static int
22306 +ql_2700_dt_rram(ql_adapter_state_t *ha, ql_dt_rram_t *entry,
22307 + uint8_t *dbuff, uint8_t *dbuff_end)
22308 +{
22309 + int esize, rval;
22310 + uint32_t start = entry->start_addr;
22311 + uint32_t end = entry->end_addr;
22312 +
22313 + QL_PRINT_7(ha, "started, buf=%ph, ram_area=%xh, start_addr=%xh, "
22314 + "end_addr=%xh\n", (void *)dbuff, entry->ram_area,
22315 + entry->start_addr, entry->end_addr);
22316 +
22317 + if (entry->ram_area == 2) {
22318 + end = ha->fw_ext_memory_end;
22319 + } else if (entry->ram_area == 3) {
22320 + start = ha->fw_shared_ram_start;
22321 + end = ha->fw_shared_ram_end;
22322 + } else if (entry->ram_area == 4) {
22323 + start = ha->fw_ddr_ram_start;
22324 + end = ha->fw_ddr_ram_end;
22325 + } else if (entry->ram_area != 1) {
22326 + EL(ha, "skipped, unknown RAM_AREA %d\n", entry->ram_area);
22327 + start = 0;
22328 + end = 0;
22329 + }
22330 + esize = end > start ? end - start : 0;
22331 + if (esize) {
22332 + esize = (esize + 1) * 4;
22333 + }
22334 +
22335 + if (dbuff == NULL) {
22336 + QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22337 + return (esize);
22338 + }
22339 + if (esize == 0 || esize + dbuff >= dbuff_end) {
22340 + if (esize != 0) {
22341 + EL(ha, "skipped, no buffer space, needed=%xh\n",
22342 + esize);
22343 + } else {
22344 + /*EMPTY*/
22345 + QL_PRINT_7(ha, "skipped, no ram_area=%xh, start=%xh, "
22346 + "end=%xh\n", entry->ram_area, start, end);
22347 + }
22348 + entry->h.driver_flags = (uint8_t)
22349 + (entry->h.driver_flags | SKIPPED_FLAG);
22350 + return (0);
22351 + }
22352 + entry->end_addr = end;
22353 + entry->start_addr = start;
22354 +
22355 + if ((rval = ql_2700_dump_ram(ha, MBC_DUMP_RAM_EXTENDED,
22356 + start, esize / 4, dbuff)) != QL_SUCCESS) {
22357 + EL(ha, "dump_ram failed, rval=%xh, addr=%xh, len=%xh, "
22358 + "esize=0\n", rval, start, esize / 4);
22359 + return (0);
22360 + }
22361 +
22362 + QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22363 + return (esize);
22364 +}
22365 +
22366 +static int
22367 +ql_2700_dt_gque(ql_adapter_state_t *ha, ql_dt_gque_t *entry,
22368 + uint8_t *dbuff, uint8_t *dbuff_end)
22369 +{
22370 + int esize;
22371 + uint32_t cnt, q_cnt, e_cnt, i;
22372 + uint8_t *bp = dbuff, *dp;
22373 +
22374 + QL_PRINT_7(ha, "started, buf=%ph, num_queues=%xh, queue_type=%xh\n",
22375 + (void *)dbuff, entry->num_queues, entry->queue_type);
22376 +
22377 + if (entry->queue_type == 1) {
22378 + ql_request_q_t *req_q;
22379 +
22380 + e_cnt = ha->rsp_queues_cnt > 1 ? 2 : 1;
22381 + esize = e_cnt * 2; /* queue number */
22382 + esize += e_cnt * 2; /* queue entries */
22383 +
22384 + /* queue size */
22385 + esize += ha->req_q[0]->req_entry_cnt * REQUEST_ENTRY_SIZE;
22386 + if (e_cnt > 1) {
22387 + esize += ha->req_q[1]->req_entry_cnt *
22388 + REQUEST_ENTRY_SIZE;
22389 + }
22390 +
22391 + if (dbuff == NULL) {
22392 + QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22393 + return (esize);
22394 + }
22395 + if (esize + dbuff >= dbuff_end) {
22396 + EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22397 + entry->h.driver_flags = (uint8_t)
22398 + (entry->h.driver_flags | SKIPPED_FLAG);
22399 + return (0);
22400 + }
22401 + entry->num_queues = e_cnt;
22402 +
22403 + for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
22404 + req_q = q_cnt == 0 ? ha->req_q[0] : ha->req_q[1];
22405 + e_cnt = req_q->req_entry_cnt;
22406 + dp = req_q->req_ring.bp;
22407 + *bp++ = LSB(q_cnt);
22408 + *bp++ = MSB(q_cnt);
22409 + *bp++ = LSB(e_cnt);
22410 + *bp++ = MSB(e_cnt);
22411 + for (cnt = 0; cnt < e_cnt; cnt++) {
22412 + for (i = 0; i < REQUEST_ENTRY_SIZE; i++) {
22413 + *bp++ = *dp++;
17930 22414 }
22415 + }
22416 + }
22417 + } else if (entry->queue_type == 2) {
22418 + ql_response_q_t *rsp_q;
22419 +
22420 + e_cnt = ha->rsp_queues_cnt;
22421 + esize = e_cnt * 2; /* queue number */
22422 + esize += e_cnt * 2; /* queue entries */
22423 +
22424 + /* queue size */
22425 + for (q_cnt = 0; q_cnt < ha->rsp_queues_cnt; q_cnt++) {
22426 + rsp_q = ha->rsp_queues[q_cnt];
22427 + esize += rsp_q->rsp_entry_cnt * RESPONSE_ENTRY_SIZE;
22428 + }
22429 +
22430 + if (dbuff == NULL) {
22431 + QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22432 + return (esize);
22433 + }
22434 + if (esize + dbuff >= dbuff_end) {
22435 + EL(ha, "skipped2, no buffer space, needed=%xh\n",
22436 + esize);
22437 + entry->h.driver_flags = (uint8_t)
22438 + (entry->h.driver_flags | SKIPPED_FLAG);
22439 + return (0);
22440 + }
22441 + entry->num_queues = e_cnt;
22442 +
22443 + for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
22444 + rsp_q = ha->rsp_queues[q_cnt];
22445 + e_cnt = rsp_q->rsp_entry_cnt;
22446 + dp = rsp_q->rsp_ring.bp;
22447 + *bp++ = LSB(q_cnt);
22448 + *bp++ = MSB(q_cnt);
22449 + *bp++ = LSB(e_cnt);
22450 + *bp++ = MSB(e_cnt);
22451 + for (cnt = 0; cnt < e_cnt; cnt++) {
22452 + for (i = 0; i < RESPONSE_ENTRY_SIZE; i++) {
22453 + *bp++ = *dp++;
22454 + }
22455 + }
22456 + }
22457 + } else if (entry->queue_type == 3) {
22458 + QL_PRINT_7(ha, "skipped, no ATIO queue, esize=0\n");
22459 + if (dbuff != NULL) {
22460 + entry->num_queues = 0;
22461 + entry->h.driver_flags = (uint8_t)
22462 + (entry->h.driver_flags | SKIPPED_FLAG);
22463 + }
22464 + return (0);
22465 + } else {
22466 + EL(ha, "skipped, unknown queue_type %d, esize=0\n",
22467 + entry->queue_type);
22468 + if (dbuff != NULL) {
22469 + entry->h.driver_flags = (uint8_t)
22470 + (entry->h.driver_flags | SKIPPED_FLAG);
22471 + }
22472 + return (0);
22473 + }
22474 +
22475 + QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22476 + return (esize);
22477 +}
22478 +
22479 +/*ARGSUSED*/
22480 +static int
22481 +ql_2700_dt_gfce(ql_adapter_state_t *ha, ql_dt_gfce_t *entry,
22482 + uint8_t *dbuff, uint8_t *dbuff_end)
22483 +{
22484 + QL_PRINT_7(ha, "started\n");
22485 +
22486 + QL_PRINT_7(ha, "skipped, not supported, esize=0\n");
22487 + if (dbuff != NULL) {
22488 + entry->h.driver_flags = (uint8_t)
22489 + (entry->h.driver_flags | SKIPPED_FLAG);
22490 + }
22491 +
22492 + return (0);
22493 +}
22494 +
22495 +static void
22496 +ql_2700_dt_prisc(ql_adapter_state_t *ha, ql_dt_prisc_t *entry,
22497 + uint8_t *dbuff, uint8_t *dbuff_end)
22498 +{
22499 + clock_t timer;
22500 +
22501 + QL_PRINT_7(ha, "started\n");
22502 +
22503 + if (dbuff == NULL) {
22504 + QL_PRINT_7(ha, "null buf done\n");
22505 + return;
22506 + }
22507 + if (dbuff >= dbuff_end) {
22508 + EL(ha, "skipped, no buffer space, needed=0\n");
22509 + entry->h.driver_flags = (uint8_t)
22510 + (entry->h.driver_flags | SKIPPED_FLAG);
22511 + return;
22512 + }
22513 +
22514 + /* Pause RISC. */
22515 + if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
22516 + WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
22517 + for (timer = 30000;
22518 + (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0;
22519 + timer--) {
22520 + if (timer) {
22521 + drv_usecwait(100);
22522 + if (timer % 10000 == 0) {
22523 + EL(ha, "risc pause %d\n", timer);
22524 + }
17931 22525 } else {
17932 - ADAPTER_STATE_UNLOCK(ha);
22526 + EL(ha, "risc pause timeout\n");
22527 + break;
17933 22528 }
22529 + }
22530 + }
22531 +
22532 + QL_PRINT_7(ha, "done\n");
22533 +}
22534 +
22535 +static void
22536 +ql_2700_dt_rrisc(ql_adapter_state_t *ha, ql_dt_rrisc_t *entry,
22537 + uint8_t *dbuff, uint8_t *dbuff_end)
22538 +{
22539 + clock_t timer;
22540 +
22541 + QL_PRINT_7(ha, "started\n");
22542 +
22543 + if (dbuff == NULL) {
22544 + QL_PRINT_7(ha, "null buf done\n");
22545 + return;
22546 + }
22547 + if (dbuff >= dbuff_end) {
22548 + EL(ha, "skipped, no buffer space, needed=0\n");
22549 + entry->h.driver_flags = (uint8_t)
22550 + (entry->h.driver_flags | SKIPPED_FLAG);
22551 + return;
22552 + }
22553 +
22554 + /* Shutdown DMA. */
22555 + WRT32_IO_REG(ha, ctrl_status, DMA_SHUTDOWN);
22556 +
22557 + /* Wait for DMA to stop. */
22558 + for (timer = 0; timer < 30000; timer++) {
22559 + if (!(RD32_IO_REG(ha, ctrl_status) & DMA_ACTIVE)) {
17934 22560 break;
17935 - default:
17936 - break; /* Don't care... */
17937 22561 }
17938 - break;
17939 - case MBA_IDC_TIME_EXTENDED:
17940 - QL_PRINT_10(CE_CONT, "(%d): MBA_IDC_TIME_EXTENDED="
17941 - "%xh\n", ha->instance, ha->idc_mb[2]);
17942 - break;
17943 - default:
17944 - EL(ha, "Inconsistent IDC event =%xh %xh\n", ha->idc_mb[0],
17945 - ha->idc_mb[2]);
17946 - ADAPTER_STATE_UNLOCK(ha);
17947 - break;
22562 + drv_usecwait(100);
17948 22563 }
22564 +
22565 + /* Reset the chip. */
22566 + WRT32_IO_REG(ha, ctrl_status, ISP_RESET);
22567 + drv_usecwait(200);
22568 +
22569 + /* Wait for RISC to recover from reset. */
22570 + for (timer = 30000; timer; timer--) {
22571 + ha->rom_status = RD16_IO_REG(ha, mailbox_out[0]);
22572 + if ((ha->rom_status & MBS_ROM_STATUS_MASK) != MBS_ROM_BUSY) {
22573 + break;
22574 + }
22575 + drv_usecwait(100);
22576 + }
22577 +
22578 + /* Wait for reset to finish. */
22579 + for (timer = 30000; timer; timer--) {
22580 + if (!(RD32_IO_REG(ha, ctrl_status) & ISP_RESET)) {
22581 + break;
22582 + }
22583 + drv_usecwait(100);
22584 + }
22585 +
22586 + ADAPTER_STATE_LOCK(ha);
22587 + ha->flags &= ~FIRMWARE_UP;
22588 + ADAPTER_STATE_UNLOCK(ha);
22589 +
22590 + QL_PRINT_7(ha, "done\n");
22591 +}
22592 +
22593 +static void
22594 +ql_2700_dt_dint(ql_adapter_state_t *ha, ql_dt_dint_t *entry,
22595 + uint8_t *dbuff, uint8_t *dbuff_end)
22596 +{
22597 + QL_PRINT_7(ha, "started, pci_offset=%xh, data=%xh\n",
22598 + entry->pci_offset, entry->data);
22599 +
22600 + if (dbuff == NULL) {
22601 + QL_PRINT_7(ha, "null buf done\n");
22602 + return;
22603 + }
22604 + if (dbuff >= dbuff_end) {
22605 + EL(ha, "skipped, no buffer space, needed=0\n");
22606 + entry->h.driver_flags = (uint8_t)
22607 + (entry->h.driver_flags | SKIPPED_FLAG);
22608 + return;
22609 + }
22610 +
22611 + ql_pci_config_put32(ha, entry->pci_offset, entry->data);
22612 +
22613 + QL_PRINT_7(ha, "done\n");
22614 +}
22615 +
22616 +/*ARGSUSED*/
22617 +static int
22618 +ql_2700_dt_ghbd(ql_adapter_state_t *ha, ql_dt_ghbd_t *entry,
22619 + uint8_t *dbuff, uint8_t *dbuff_end)
22620 +{
22621 + QL_PRINT_7(ha, "started\n");
22622 +
22623 + QL_PRINT_7(ha, "skipped, not supported\n");
22624 + if (dbuff != NULL) {
22625 + entry->h.driver_flags = (uint8_t)
22626 + (entry->h.driver_flags | SKIPPED_FLAG);
22627 + }
22628 +
22629 + return (0);
22630 +}
22631 +
22632 +/*ARGSUSED*/
22633 +static int
22634 +ql_2700_dt_scra(ql_adapter_state_t *ha, ql_dt_scra_t *entry,
22635 + uint8_t *dbuff, uint8_t *dbuff_end)
22636 +{
22637 + QL_PRINT_7(ha, "started\n");
22638 +
22639 + QL_PRINT_7(ha, "skipped, not supported, esize=0\n");
22640 + if (dbuff != NULL) {
22641 + entry->h.driver_flags = (uint8_t)
22642 + (entry->h.driver_flags | SKIPPED_FLAG);
22643 + }
22644 +
22645 + return (0);
22646 +}
22647 +
22648 +static int
22649 +ql_2700_dt_rrreg(ql_adapter_state_t *ha, ql_dt_rrreg_t *entry,
22650 + uint8_t *dbuff, uint8_t *dbuff_end)
22651 +{
22652 + int esize;
22653 + uint32_t i;
22654 + uint8_t *bp = dbuff;
22655 + uint8_t *reg = (uint8_t *)ha->iobase + 0xc4;
22656 + uint32_t addr = entry->addr;
22657 + uint32_t cnt = entry->count;
22658 +
22659 + QL_PRINT_7(ha, "started, buf=%ph, addr=%xh, count=%xh\n",
22660 + (void *)dbuff, entry->addr, entry->count);
22661 +
22662 + esize = cnt * 4; /* addr */
22663 + esize += cnt * 4; /* data */
22664 +
22665 + if (dbuff == NULL) {
22666 + QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22667 + return (esize);
22668 + }
22669 + if (esize + dbuff >= dbuff_end) {
22670 + EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22671 + entry->h.driver_flags = (uint8_t)
22672 + (entry->h.driver_flags | SKIPPED_FLAG);
22673 + return (0);
22674 + }
22675 +
22676 + WRT32_IO_REG(ha, io_base_addr, 0x40);
22677 + while (cnt--) {
22678 + WRT_REG_DWORD(ha, ha->iobase + 0xc0, addr | 0x80000000);
22679 + *bp++ = LSB(LSW(addr));
22680 + *bp++ = MSB(LSW(addr));
22681 + *bp++ = LSB(MSW(addr));
22682 + *bp++ = MSB(MSW(addr));
22683 + for (i = 0; i < 4; i++) {
22684 + *bp++ = RD_REG_BYTE(ha, reg + i);
22685 + }
22686 + addr += 4;
22687 + }
22688 +
22689 + QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22690 + return (esize);
22691 +}
22692 +
22693 +static void
22694 +ql_2700_dt_wrreg(ql_adapter_state_t *ha, ql_dt_wrreg_t *entry,
22695 + uint8_t *dbuff, uint8_t *dbuff_end)
22696 +{
22697 + QL_PRINT_7(ha, "started, addr=%xh, data=%xh\n", entry->addr,
22698 + entry->data);
22699 +
22700 + if (dbuff == NULL) {
22701 + QL_PRINT_7(ha, "null buf done\n");
22702 + return;
22703 + }
22704 + if (dbuff >= dbuff_end) {
22705 + EL(ha, "skipped, no buffer space, needed=0\n");
22706 + entry->h.driver_flags = (uint8_t)
22707 + (entry->h.driver_flags | SKIPPED_FLAG);
22708 + return;
22709 + }
22710 +
22711 + WRT32_IO_REG(ha, io_base_addr, 0x40);
22712 + WRT_REG_DWORD(ha, ha->iobase + 0xc4, entry->data);
22713 + WRT_REG_DWORD(ha, ha->iobase + 0xc0, entry->addr);
22714 +
22715 + QL_PRINT_7(ha, "done\n");
22716 +}
22717 +
22718 +static int
22719 +ql_2700_dt_rrram(ql_adapter_state_t *ha, ql_dt_rrram_t *entry,
22720 + uint8_t *dbuff, uint8_t *dbuff_end)
22721 +{
22722 + int rval, esize;
22723 +
22724 + QL_PRINT_7(ha, "started, buf=%ph, addr=%xh, count=%xh\n",
22725 + (void *)dbuff, entry->addr, entry->count);
22726 +
22727 + esize = entry->count * 4; /* data */
22728 +
22729 + if (dbuff == NULL) {
22730 + QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22731 + return (esize);
22732 + }
22733 + if (esize + dbuff >= dbuff_end) {
22734 + EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22735 + entry->h.driver_flags = (uint8_t)
22736 + (entry->h.driver_flags | SKIPPED_FLAG);
22737 + return (0);
22738 + }
22739 +
22740 + if ((rval = ql_2700_dump_ram(ha, MBC_MPI_RAM, entry->addr,
22741 + entry->count, dbuff)) != QL_SUCCESS) {
22742 + EL(ha, "dump_ram failed, rval=%xh, addr=%xh, len=%xh, "
22743 + "esize=0\n", rval, entry->addr, entry->count);
22744 + return (0);
22745 + }
22746 +
22747 + QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22748 + return (esize);
22749 +}
22750 +
22751 +static int
22752 +ql_2700_dt_rpcic(ql_adapter_state_t *ha, ql_dt_rpcic_t *entry,
22753 + uint8_t *dbuff, uint8_t *dbuff_end)
22754 +{
22755 + int esize;
22756 + uint32_t i;
22757 + uint8_t *bp = dbuff;
22758 + uint32_t addr = entry->addr;
22759 + uint32_t cnt = entry->count;
22760 +
22761 + QL_PRINT_7(ha, "started, buf=%ph, addr=%xh, count=%xh\n",
22762 + (void *)dbuff, entry->addr, entry->count);
22763 +
22764 + esize = cnt * 4; /* addr */
22765 + esize += cnt * 4; /* data */
22766 +
22767 + if (dbuff == NULL) {
22768 + QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22769 + return (esize);
22770 + }
22771 + if (esize + dbuff >= dbuff_end) {
22772 + EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22773 + entry->h.driver_flags = (uint8_t)
22774 + (entry->h.driver_flags | SKIPPED_FLAG);
22775 + return (0);
22776 + }
22777 +
22778 + while (cnt--) {
22779 + *bp++ = LSB(LSW(addr));
22780 + *bp++ = MSB(LSW(addr));
22781 + *bp++ = LSB(MSW(addr));
22782 + *bp++ = MSB(MSW(addr));
22783 + for (i = 0; i < 4; i++) {
22784 + *bp++ = ql_pci_config_get8(ha, addr++);
22785 + }
22786 + }
22787 +
22788 + QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22789 + return (esize);
22790 +}
22791 +
22792 +static int
22793 +ql_2700_dt_gques(ql_adapter_state_t *ha, ql_dt_gques_t *entry,
22794 + uint8_t *dbuff, uint8_t *dbuff_end)
22795 +{
22796 + int esize;
22797 + uint32_t q_cnt, e_cnt, data;
22798 + uint8_t *bp = dbuff;
22799 +
22800 + QL_PRINT_7(ha, "started, buf=%ph, num_queues=%xh, queue_type=%xh\n",
22801 + (void *)dbuff, entry->num_queues, entry->queue_type);
22802 +
22803 + if (entry->queue_type == 1) {
22804 + ql_request_q_t *req_q;
22805 +
22806 + e_cnt = ha->rsp_queues_cnt > 1 ? 2 : 1;
22807 + esize = e_cnt * 2; /* queue number */
22808 + esize += e_cnt * 2; /* shadow entries */
22809 +
22810 + /* shadow size */
22811 + esize += SHADOW_ENTRY_SIZE;
22812 + if (e_cnt > 1) {
22813 + esize += SHADOW_ENTRY_SIZE;
22814 + }
22815 + if (dbuff == NULL) {
22816 + QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22817 + return (esize);
22818 + }
22819 + if (esize + dbuff >= dbuff_end) {
22820 + EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22821 + entry->h.driver_flags = (uint8_t)
22822 + (entry->h.driver_flags | SKIPPED_FLAG);
22823 + return (0);
22824 + }
22825 + entry->num_queues = e_cnt;
22826 +
22827 + for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
22828 + req_q = q_cnt == 0 ? ha->req_q[0] : ha->req_q[1];
22829 + e_cnt = 1;
22830 + data = ddi_get32(req_q->req_ring.acc_handle,
22831 + req_q->req_out_shadow_ptr);
22832 + *bp++ = LSB(q_cnt);
22833 + *bp++ = MSB(q_cnt);
22834 + *bp++ = LSB(e_cnt);
22835 + *bp++ = MSB(e_cnt);
22836 + *bp++ = LSB(LSW(data));
22837 + *bp++ = MSB(LSW(data));
22838 + *bp++ = LSB(MSW(data));
22839 + *bp++ = MSB(MSW(data));
22840 + }
22841 + } else if (entry->queue_type == 2) {
22842 + ql_response_q_t *rsp_q;
22843 +
22844 + e_cnt = ha->rsp_queues_cnt;
22845 + esize = e_cnt * 2; /* queue number */
22846 + esize += e_cnt * 2; /* shadow entries */
22847 +
22848 + /* shadow size */
22849 + for (q_cnt = 0; q_cnt < ha->rsp_queues_cnt; q_cnt++) {
22850 + esize += SHADOW_ENTRY_SIZE;
22851 + }
22852 +
22853 + if (dbuff == NULL) {
22854 + QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22855 + return (esize);
22856 + }
22857 + if (esize + dbuff >= dbuff_end) {
22858 + EL(ha, "skipped2, no buffer space, needed=%xh\n",
22859 + esize);
22860 + entry->h.driver_flags = (uint8_t)
22861 + (entry->h.driver_flags | SKIPPED_FLAG);
22862 + return (0);
22863 + }
22864 + entry->num_queues = e_cnt;
22865 +
22866 + for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
22867 + rsp_q = ha->rsp_queues[q_cnt];
22868 + e_cnt = 1;
22869 + data = ddi_get32(rsp_q->rsp_ring.acc_handle,
22870 + rsp_q->rsp_in_shadow_ptr);
22871 + *bp++ = LSB(q_cnt);
22872 + *bp++ = MSB(q_cnt);
22873 + *bp++ = LSB(e_cnt);
22874 + *bp++ = MSB(e_cnt);
22875 + *bp++ = LSB(LSW(data));
22876 + *bp++ = MSB(LSW(data));
22877 + *bp++ = LSB(MSW(data));
22878 + *bp++ = MSB(MSW(data));
22879 + }
22880 + } else if (entry->queue_type == 3) {
22881 + EL(ha, "skipped, no ATIO queue, esize=0\n");
22882 + if (dbuff != NULL) {
22883 + entry->num_queues = 0;
22884 + entry->h.driver_flags = (uint8_t)
22885 + (entry->h.driver_flags | SKIPPED_FLAG);
22886 + }
22887 + return (0);
22888 + } else {
22889 + EL(ha, "skipped, unknown queue_type %d, esize=0\n",
22890 + entry->queue_type);
22891 + if (dbuff != NULL) {
22892 + entry->h.driver_flags = (uint8_t)
22893 + (entry->h.driver_flags | SKIPPED_FLAG);
22894 + }
22895 + return (0);
22896 + }
22897 +
22898 + QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22899 + return (esize);
22900 +}
22901 +
22902 +static int
22903 +ql_2700_dt_wdmp(ql_adapter_state_t *ha, ql_dt_wdmp_t *entry,
22904 + uint8_t *dbuff, uint8_t *dbuff_end)
22905 +{
22906 + int esize;
22907 + uint8_t *bp = dbuff;
22908 + uint32_t data, cnt = entry->length, *dp = entry->data;
22909 +
22910 + QL_PRINT_7(ha, "started, buf=%ph, length=%xh\n",
22911 + (void *)dbuff, entry->length);
22912 +
22913 + esize = cnt;
22914 + if (dbuff == NULL) {
22915 + QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22916 + return (esize);
22917 + }
22918 + if (esize + dbuff >= dbuff_end) {
22919 + EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22920 + entry->h.driver_flags = (uint8_t)
22921 + (entry->h.driver_flags | SKIPPED_FLAG);
22922 + return (0);
22923 + }
22924 +
22925 + while (cnt--) {
22926 + data = *dp++;
22927 + *bp++ = LSB(LSW(data));
22928 + *bp++ = MSB(LSW(data));
22929 + *bp++ = LSB(MSW(data));
22930 + *bp++ = MSB(MSW(data));
22931 + }
22932 + QL_PRINT_7(ha, "%s\n", dbuff);
22933 +
22934 + QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22935 + return (esize);
22936 +}
22937 +
22938 +/*
22939 + * ql_2700_dump_ram
22940 + * Dumps RAM.
22941 + * Risc interrupts must be disabled when this routine is called.
22942 + *
22943 + * Input:
22944 + * ha: adapter state pointer.
22945 + * cmd: MBC_DUMP_RAM_EXTENDED/MBC_MPI_RAM.
22946 + * risc_address: RISC code start address.
22947 + * len: Number of words.
22948 + * bp: buffer pointer.
22949 + *
22950 + * Returns:
22951 + * ql local function return status code.
22952 + *
22953 + * Context:
22954 + * Interrupt or Kernel context, no mailbox commands allowed.
22955 + */
22956 +static int
22957 +ql_2700_dump_ram(ql_adapter_state_t *ha, uint16_t cmd, uint32_t risc_address,
22958 + uint32_t len, uint8_t *bp)
22959 +{
22960 + dma_mem_t mem;
22961 + uint32_t i, stat, timer;
22962 + uint8_t *dp;
22963 + int rval = QL_SUCCESS;
22964 +
22965 + QL_PRINT_7(ha, "started, cmd=%xh, risc_address=%xh, len=%xh, "
22966 + "bp=%ph\n", cmd, risc_address, len, (void *)bp);
22967 +
22968 + mem.size = len * 4;
22969 + mem.type = LITTLE_ENDIAN_DMA;
22970 + mem.max_cookie_count = 1;
22971 + mem.alignment = 8;
22972 + if ((rval = ql_alloc_phys(ha, &mem, KM_SLEEP)) != QL_SUCCESS) {
22973 + EL(ha, "alloc status=%xh\n", rval);
22974 + return (rval);
22975 + }
22976 +
22977 + WRT16_IO_REG(ha, mailbox_in[0], cmd);
22978 + WRT16_IO_REG(ha, mailbox_in[1], LSW(risc_address));
22979 + WRT16_IO_REG(ha, mailbox_in[2], MSW(LSD(mem.cookie.dmac_laddress)));
22980 + WRT16_IO_REG(ha, mailbox_in[3], LSW(LSD(mem.cookie.dmac_laddress)));
22981 + WRT16_IO_REG(ha, mailbox_in[4], MSW(len));
22982 + WRT16_IO_REG(ha, mailbox_in[5], LSW(len));
22983 + WRT16_IO_REG(ha, mailbox_in[6], MSW(MSD(mem.cookie.dmac_laddress)));
22984 + WRT16_IO_REG(ha, mailbox_in[7], LSW(MSD(mem.cookie.dmac_laddress)));
22985 + WRT16_IO_REG(ha, mailbox_in[8], MSW(risc_address));
22986 + if (cmd == MBC_MPI_RAM) {
22987 + WRT16_IO_REG(ha, mailbox_in[9], BIT_0);
22988 + }
22989 +
22990 + WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
22991 + for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
22992 + stat = RD32_IO_REG(ha, risc2host);
22993 + if (stat & RH_RISC_INT) {
22994 + stat &= 0xff;
22995 + if ((stat == 1) || (stat == 0x10)) {
22996 + break;
22997 + } else if ((stat == 2) || (stat == 0x11)) {
22998 + rval = RD16_IO_REG(ha, mailbox_out[0]);
22999 + break;
23000 + }
23001 + WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
23002 + }
23003 + drv_usecwait(5);
23004 + }
23005 + WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
23006 +
23007 + if (timer == 0) {
23008 + QL_PRINT_7(ha, "timeout addr=%xh\n", risc_address);
23009 + rval = QL_FUNCTION_TIMEOUT;
23010 + } else {
23011 + (void) ddi_dma_sync(mem.dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
23012 + dp = mem.bp;
23013 + for (i = 0; i < mem.size; i++) {
23014 + *bp++ = *dp++;
23015 + }
23016 + }
23017 +
23018 + ql_free_phys(ha, &mem);
23019 +
23020 + QL_PRINT_7(ha, "done\n");
23021 + return (rval);
17949 23022 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX